Commit dd8856bd authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge git://git.infradead.org/users/dhowells/workq-2.6

* git://git.infradead.org/users/dhowells/workq-2.6:
  Actually update the fixed up compile failures.
  WorkQueue: Fix up arch-specific work items where possible
  WorkStruct: make allyesconfig
  WorkStruct: Pass the work_struct pointer instead of context data
  WorkStruct: Merge the pending bit into the wq_data pointer
  WorkStruct: Typedef the work function prototype
  WorkStruct: Separate delayable and non-delayable events.
parents f81cff0d 06328b4f
......@@ -60,16 +60,16 @@ static int sharpsl_ac_check(void);
static int sharpsl_fatal_check(void);
static int sharpsl_average_value(int ad);
static void sharpsl_average_clear(void);
static void sharpsl_charge_toggle(void *private_);
static void sharpsl_battery_thread(void *private_);
static void sharpsl_charge_toggle(struct work_struct *private_);
static void sharpsl_battery_thread(struct work_struct *private_);
/*
* Variables
*/
struct sharpsl_pm_status sharpsl_pm;
DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL);
DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL);
DECLARE_DELAYED_WORK(toggle_charger, sharpsl_charge_toggle);
DECLARE_DELAYED_WORK(sharpsl_bat, sharpsl_battery_thread);
DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger);
......@@ -116,7 +116,7 @@ void sharpsl_battery_kick(void)
EXPORT_SYMBOL(sharpsl_battery_kick);
static void sharpsl_battery_thread(void *private_)
static void sharpsl_battery_thread(struct work_struct *private_)
{
int voltage, percent, apm_status, i = 0;
......@@ -128,7 +128,7 @@ static void sharpsl_battery_thread(void *private_)
/* Corgi cannot confirm when battery fully charged so periodically kick! */
if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON)
&& time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL))
schedule_work(&toggle_charger);
schedule_delayed_work(&toggle_charger, 0);
while(1) {
voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT);
......@@ -212,7 +212,7 @@ static void sharpsl_charge_off(void)
sharpsl_pm_led(SHARPSL_LED_OFF);
sharpsl_pm.charge_mode = CHRG_OFF;
schedule_work(&sharpsl_bat);
schedule_delayed_work(&sharpsl_bat, 0);
}
static void sharpsl_charge_error(void)
......@@ -222,7 +222,7 @@ static void sharpsl_charge_error(void)
sharpsl_pm.charge_mode = CHRG_ERROR;
}
static void sharpsl_charge_toggle(void *private_)
static void sharpsl_charge_toggle(struct work_struct *private_)
{
dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies);
......@@ -254,7 +254,7 @@ static void sharpsl_ac_timer(unsigned long data)
else if (sharpsl_pm.charge_mode == CHRG_ON)
sharpsl_charge_off();
schedule_work(&sharpsl_bat);
schedule_delayed_work(&sharpsl_bat, 0);
}
......@@ -279,10 +279,10 @@ static void sharpsl_chrg_full_timer(unsigned long data)
sharpsl_charge_off();
} else if (sharpsl_pm.full_count < 2) {
dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n");
schedule_work(&toggle_charger);
schedule_delayed_work(&toggle_charger, 0);
} else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) {
dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n");
schedule_work(&toggle_charger);
schedule_delayed_work(&toggle_charger, 0);
} else {
sharpsl_charge_off();
sharpsl_pm.charge_mode = CHRG_DONE;
......
......@@ -323,7 +323,8 @@ static int h3_transceiver_mode(struct device *dev, int mode)
cancel_delayed_work(&irda_config->gpio_expa);
PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
schedule_work(&irda_config->gpio_expa);
#error this is not permitted - mode is an argument variable
schedule_delayed_work(&irda_config->gpio_expa, 0);
return 0;
}
......
......@@ -74,7 +74,7 @@ static struct omap_kp_platform_data nokia770_kp_data = {
.rows = 8,
.cols = 8,
.keymap = nokia770_keymap,
.keymapsize = ARRAY_SIZE(nokia770_keymap)
.keymapsize = ARRAY_SIZE(nokia770_keymap),
.delay = 4,
};
......@@ -191,7 +191,7 @@ static void nokia770_audio_pwr_up(void)
printk("HP connected\n");
}
static void codec_delayed_power_down(void *arg)
static void codec_delayed_power_down(struct work_struct *work)
{
down(&audio_pwr_sem);
if (audio_pwr_state == -1)
......@@ -200,7 +200,7 @@ static void codec_delayed_power_down(void *arg)
up(&audio_pwr_sem);
}
static DECLARE_WORK(codec_power_down_work, codec_delayed_power_down, NULL);
static DECLARE_DELAYED_WORK(codec_power_down_work, codec_delayed_power_down);
static void nokia770_audio_pwr_down(void)
{
......
......@@ -35,7 +35,7 @@ static u8 hw_led_state;
static u8 tps_leds_change;
static void tps_work(void *unused)
static void tps_work(struct work_struct *unused)
{
for (;;) {
u8 leds;
......@@ -61,7 +61,7 @@ static void tps_work(void *unused)
}
}
static DECLARE_WORK(work, tps_work, NULL);
static DECLARE_WORK(work, tps_work);
#ifdef CONFIG_OMAP_OSK_MISTRAL
......
......@@ -206,7 +206,8 @@ static int h4_transceiver_mode(struct device *dev, int mode)
cancel_delayed_work(&irda_config->gpio_expa);
PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
schedule_work(&irda_config->gpio_expa);
#error this is not permitted - mode is an argument variable
schedule_delayed_work(&irda_config->gpio_expa, 0);
return 0;
}
......
......@@ -36,11 +36,11 @@ I2C_CLIENT_INSMOD;
static int max7310_write(struct i2c_client *client, int address, int data);
static struct i2c_client max7310_template;
static void akita_ioexp_work(void *private_);
static void akita_ioexp_work(struct work_struct *private_);
static struct device *akita_ioexp_device;
static unsigned char ioexp_output_value = AKITA_IOEXP_IO_OUT;
DECLARE_WORK(akita_ioexp, akita_ioexp_work, NULL);
DECLARE_WORK(akita_ioexp, akita_ioexp_work);
/*
......@@ -158,7 +158,7 @@ void akita_reset_ioexp(struct device *dev, unsigned char bit)
EXPORT_SYMBOL(akita_set_ioexp);
EXPORT_SYMBOL(akita_reset_ioexp);
static void akita_ioexp_work(void *private_)
static void akita_ioexp_work(struct work_struct *private_)
{
if (akita_ioexp_device)
max7310_set_ouputs(akita_ioexp_device, ioexp_output_value);
......
......@@ -51,10 +51,10 @@ static void mce_checkregs (void *info)
}
}
static void mce_work_fn(void *data);
static DECLARE_WORK(mce_work, mce_work_fn, NULL);
static void mce_work_fn(struct work_struct *work);
static DECLARE_DELAYED_WORK(mce_work, mce_work_fn);
static void mce_work_fn(void *data)
static void mce_work_fn(struct work_struct *work)
{
on_each_cpu(mce_checkregs, NULL, 1, 1);
schedule_delayed_work(&mce_work, MCE_RATE);
......
......@@ -1049,13 +1049,15 @@ void cpu_exit_clear(void)
struct warm_boot_cpu_info {
struct completion *complete;
struct work_struct task;
int apicid;
int cpu;
};
static void __cpuinit do_warm_boot_cpu(void *p)
static void __cpuinit do_warm_boot_cpu(struct work_struct *work)
{
struct warm_boot_cpu_info *info = p;
struct warm_boot_cpu_info *info =
container_of(work, struct warm_boot_cpu_info, task);
do_boot_cpu(info->apicid, info->cpu);
complete(info->complete);
}
......@@ -1064,7 +1066,6 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
{
DECLARE_COMPLETION_ONSTACK(done);
struct warm_boot_cpu_info info;
struct work_struct task;
int apicid, ret;
struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
......@@ -1089,7 +1090,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
info.complete = &done;
info.apicid = apicid;
info.cpu = cpu;
INIT_WORK(&task, do_warm_boot_cpu, &info);
INIT_WORK(&info.task, do_warm_boot_cpu);
tsc_sync_disabled = 1;
......@@ -1097,7 +1098,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
KERNEL_PGD_PTRS);
flush_tlb_all();
schedule_work(&task);
schedule_work(&info.task);
wait_for_completion(&done);
tsc_sync_disabled = 0;
......
......@@ -217,7 +217,7 @@ static unsigned int cpufreq_delayed_issched = 0;
static unsigned int cpufreq_init = 0;
static struct work_struct cpufreq_delayed_get_work;
static void handle_cpufreq_delayed_get(void *v)
static void handle_cpufreq_delayed_get(struct work_struct *work)
{
unsigned int cpu;
......@@ -306,7 +306,7 @@ static int __init cpufreq_tsc(void)
{
int ret;
INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
if (!ret)
......
......@@ -209,7 +209,7 @@ static void do_serial_bh(void)
}
#endif
static void do_softint(void *private_)
static void do_softint(struct work_struct *private_)
{
printk(KERN_ERR "simserial: do_softint called\n");
}
......@@ -698,7 +698,7 @@ static int get_async_struct(int line, struct async_struct **ret_info)
info->flags = sstate->flags;
info->xmit_fifo_size = sstate->xmit_fifo_size;
info->line = line;
INIT_WORK(&info->work, do_softint, info);
INIT_WORK(&info->work, do_softint);
info->state = sstate;
if (sstate->info) {
kfree(info);
......
......@@ -678,7 +678,7 @@ ia64_mca_cmc_vector_enable (void *dummy)
* disable the cmc interrupt vector.
*/
static void
ia64_mca_cmc_vector_disable_keventd(void *unused)
ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
{
on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
}
......@@ -690,7 +690,7 @@ ia64_mca_cmc_vector_disable_keventd(void *unused)
* enable the cmc interrupt vector.
*/
static void
ia64_mca_cmc_vector_enable_keventd(void *unused)
ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
{
on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
}
......@@ -1247,8 +1247,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
monarch_cpu = -1;
}
static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);
static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);
static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd);
/*
* ia64_mca_cmc_int_handler
......
......@@ -463,15 +463,17 @@ struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
}
struct create_idle {
struct work_struct work;
struct task_struct *idle;
struct completion done;
int cpu;
};
void
do_fork_idle(void *_c_idle)
do_fork_idle(struct work_struct *work)
{
struct create_idle *c_idle = _c_idle;
struct create_idle *c_idle =
container_of(work, struct create_idle, work);
c_idle->idle = fork_idle(c_idle->cpu);
complete(&c_idle->done);
......@@ -482,10 +484,10 @@ do_boot_cpu (int sapicid, int cpu)
{
int timeout;
struct create_idle c_idle = {
.work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
.cpu = cpu,
.done = COMPLETION_INITIALIZER(c_idle.done),
};
DECLARE_WORK(work, do_fork_idle, &c_idle);
c_idle.idle = get_idle_for_cpu(cpu);
if (c_idle.idle) {
......@@ -497,9 +499,9 @@ do_boot_cpu (int sapicid, int cpu)
* We can't use kernel_thread since we must avoid to reschedule the child.
*/
if (!keventd_up() || current_is_keventd())
work.func(work.data);
c_idle.work.func(&c_idle.work);
else {
schedule_work(&work);
schedule_work(&c_idle.work);
wait_for_completion(&c_idle.done);
}
......
......@@ -319,7 +319,7 @@ static void sp_cleanup(void)
static int channel_open = 0;
/* the work handler */
static void sp_work(void *data)
static void sp_work(struct work_struct *unused)
{
if (!channel_open) {
if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) {
......@@ -354,7 +354,7 @@ static void startwork(int vpe)
return;
}
INIT_WORK(&work, sp_work, NULL);
INIT_WORK(&work, sp_work);
queue_work(workqueue, &work);
} else
queue_work(workqueue, &work);
......
......@@ -14,7 +14,7 @@ static unsigned long avr_clock;
static struct work_struct wd_work;
static void wd_stop(void *unused)
static void wd_stop(struct work_struct *unused)
{
const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK";
int i = 0, rescue = 8;
......@@ -122,7 +122,7 @@ static int __init ls_uarts_init(void)
ls_uart_init();
INIT_WORK(&wd_work, wd_stop, NULL);
INIT_WORK(&wd_work, wd_stop);
schedule_work(&wd_work);
return 0;
......
......@@ -18,11 +18,11 @@
#define OLD_BACKLIGHT_MAX 15
static void pmac_backlight_key_worker(void *data);
static void pmac_backlight_set_legacy_worker(void *data);
static void pmac_backlight_key_worker(struct work_struct *work);
static void pmac_backlight_set_legacy_worker(struct work_struct *work);
static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker, NULL);
static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker, NULL);
static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker);
static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker);
/* Although these variables are used in interrupt context, it makes no sense to
* protect them. No user is able to produce enough key events per second and
......@@ -94,7 +94,7 @@ int pmac_backlight_curve_lookup(struct fb_info *info, int value)
return level;
}
static void pmac_backlight_key_worker(void *data)
static void pmac_backlight_key_worker(struct work_struct *work)
{
if (atomic_read(&kernel_backlight_disabled))
return;
......@@ -166,7 +166,7 @@ static int __pmac_backlight_set_legacy_brightness(int brightness)
return error;
}
static void pmac_backlight_set_legacy_worker(void *data)
static void pmac_backlight_set_legacy_worker(struct work_struct *work)
{
if (atomic_read(&kernel_backlight_disabled))
return;
......
......@@ -37,8 +37,8 @@
/* EEH event workqueue setup. */
static DEFINE_SPINLOCK(eeh_eventlist_lock);
LIST_HEAD(eeh_eventlist);
static void eeh_thread_launcher(void *);
DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
static void eeh_thread_launcher(struct work_struct *);
DECLARE_WORK(eeh_event_wq, eeh_thread_launcher);
/* Serialize reset sequences for a given pci device */
DEFINE_MUTEX(eeh_event_mutex);
......@@ -103,7 +103,7 @@ static int eeh_event_handler(void * dummy)
* eeh_thread_launcher
* @dummy - unused
*/
static void eeh_thread_launcher(void *dummy)
static void eeh_thread_launcher(struct work_struct *dummy)
{
if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0)
printk(KERN_ERR "Failed to start EEH daemon\n");
......
......@@ -385,6 +385,7 @@ struct fcc_enet_private {
phy_info_t *phy;
struct work_struct phy_relink;
struct work_struct phy_display_config;
struct net_device *dev;
uint sequence_done;
......@@ -1391,10 +1392,11 @@ static phy_info_t *phy_info[] = {
NULL
};
static void mii_display_status(void *data)
static void mii_display_status(struct work_struct *work)
{
struct net_device *dev = data;
volatile struct fcc_enet_private *fep = dev->priv;
volatile struct fcc_enet_private *fep =
container_of(work, struct fcc_enet_private, phy_relink);
struct net_device *dev = fep->dev;
uint s = fep->phy_status;
if (!fep->link && !fep->old_link) {
......@@ -1428,10 +1430,12 @@ static void mii_display_status(void *data)
printk(".\n");
}
static void mii_display_config(void *data)
static void mii_display_config(struct work_struct *work)
{
struct net_device *dev = data;
volatile struct fcc_enet_private *fep = dev->priv;
volatile struct fcc_enet_private *fep =
container_of(work, struct fcc_enet_private,
phy_display_config);
struct net_device *dev = fep->dev;
uint s = fep->phy_status;
printk("%s: config: auto-negotiation ", dev->name);
......@@ -1758,8 +1762,9 @@ static int __init fec_enet_init(void)
cep->phy_id_done = 0;
cep->phy_addr = fip->fc_phyaddr;
mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy);
INIT_WORK(&cep->phy_relink, mii_display_status, dev);
INIT_WORK(&cep->phy_display_config, mii_display_config, dev);
INIT_WORK(&cep->phy_relink, mii_display_status);
INIT_WORK(&cep->phy_display_config, mii_display_config);
cep->dev = dev;
#endif /* CONFIG_USE_MDIO */
fip++;
......
......@@ -173,6 +173,7 @@ struct fec_enet_private {
uint phy_speed;
phy_info_t *phy;
struct work_struct phy_task;
struct net_device *dev;
uint sequence_done;
......@@ -1263,10 +1264,11 @@ static void mii_display_status(struct net_device *dev)
printk(".\n");
}
static void mii_display_config(void *priv)
static void mii_display_config(struct work_struct *work)
{
struct net_device *dev = (struct net_device *)priv;
struct fec_enet_private *fep = dev->priv;
struct fec_enet_private *fep =
container_of(work, struct fec_enet_private, phy_task);
struct net_device *dev = fep->dev;
volatile uint *s = &(fep->phy_status);
printk("%s: config: auto-negotiation ", dev->name);
......@@ -1295,10 +1297,11 @@ static void mii_display_config(void *priv)
fep->sequence_done = 1;
}
static void mii_relink(void *priv)
static void mii_relink(struct work_struct *work)
{
struct net_device *dev = (struct net_device *)priv;
struct fec_enet_private *fep = dev->priv;
struct fec_enet_private *fep =
container_of(work, struct fec_enet_private, phy_task);
struct net_device *dev = fep->dev;
int duplex;
fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
......@@ -1325,7 +1328,8 @@ static void mii_queue_relink(uint mii_reg, struct net_device *dev)
{
struct fec_enet_private *fep = dev->priv;
INIT_WORK(&fep->phy_task, mii_relink, (void *)dev);
fep->dev = dev;
INIT_WORK(&fep->phy_task, mii_relink);
schedule_work(&fep->phy_task);
}
......@@ -1333,7 +1337,8 @@ static void mii_queue_config(uint mii_reg, struct net_device *dev)
{
struct fec_enet_private *fep = dev->priv;
INIT_WORK(&fep->phy_task, mii_display_config, (void *)dev);
fep->dev = dev;
INIT_WORK(&fep->phy_task, mii_display_config);
schedule_work(&fep->phy_task);
}
......
......@@ -92,8 +92,8 @@ static int appldata_timer_active;
* Work queue
*/
static struct workqueue_struct *appldata_wq;
static void appldata_work_fn(void *data);
static DECLARE_WORK(appldata_work, appldata_work_fn, NULL);
static void appldata_work_fn(struct work_struct *work);
static DECLARE_WORK(appldata_work, appldata_work_fn);
/*
......@@ -125,7 +125,7 @@ static void appldata_timer_function(unsigned long data)
*
* call data gathering function for each (active) module
*/
static void appldata_work_fn(void *data)
static void appldata_work_fn(struct work_struct *work)
{
struct list_head *lh;
struct appldata_ops *ops;
......
......@@ -638,7 +638,7 @@ int chan_out_fd(struct list_head *chans)
return -1;
}
void chan_interrupt(struct list_head *chans, struct work_struct *task,
void chan_interrupt(struct list_head *chans, struct delayed_work *task,
struct tty_struct *tty, int irq)
{
struct list_head *ele, *next;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment