diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/kmod.c | 16 | ||||
-rw-r--r-- | kernel/kthread.c | 13 | ||||
-rw-r--r-- | kernel/power/poweroff.c | 4 | ||||
-rw-r--r-- | kernel/relay.c | 10 | ||||
-rw-r--r-- | kernel/sys.c | 4 | ||||
-rw-r--r-- | kernel/workqueue.c | 109 |
6 files changed, 95 insertions, 61 deletions
diff --git a/kernel/kmod.c b/kernel/kmod.c index 2b76dee28496..8d2bea09a4ec 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -114,6 +114,7 @@ EXPORT_SYMBOL(request_module); #endif /* CONFIG_KMOD */ struct subprocess_info { + struct work_struct work; struct completion *complete; char *path; char **argv; @@ -221,9 +222,10 @@ static int wait_for_helper(void *data) } /* This is run by khelper thread */ -static void __call_usermodehelper(void *data) +static void __call_usermodehelper(struct work_struct *work) { - struct subprocess_info *sub_info = data; + struct subprocess_info *sub_info = + container_of(work, struct subprocess_info, work); pid_t pid; int wait = sub_info->wait; @@ -264,6 +266,8 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp, { DECLARE_COMPLETION_ONSTACK(done); struct subprocess_info sub_info = { + .work = __WORK_INITIALIZER(sub_info.work, + __call_usermodehelper), .complete = &done, .path = path, .argv = argv, @@ -272,7 +276,6 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp, .wait = wait, .retval = 0, }; - DECLARE_WORK(work, __call_usermodehelper, &sub_info); if (!khelper_wq) return -EBUSY; @@ -280,7 +283,7 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp, if (path[0] == '\0') return 0; - queue_work(khelper_wq, &work); + queue_work(khelper_wq, &sub_info.work); wait_for_completion(&done); return sub_info.retval; } @@ -291,6 +294,8 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp, { DECLARE_COMPLETION(done); struct subprocess_info sub_info = { + .work = __WORK_INITIALIZER(sub_info.work, + __call_usermodehelper), .complete = &done, .path = path, .argv = argv, @@ -298,7 +303,6 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp, .retval = 0, }; struct file *f; - DECLARE_WORK(work, __call_usermodehelper, &sub_info); if (!khelper_wq) return -EBUSY; @@ -318,7 +322,7 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp, } sub_info.stdin = f; - queue_work(khelper_wq, &work); + queue_work(khelper_wq, &sub_info.work); wait_for_completion(&done); return sub_info.retval; } diff --git a/kernel/kthread.c b/kernel/kthread.c index 4f9c60ef95e8..1db8c72d0d38 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -31,6 +31,8 @@ struct kthread_create_info /* Result passed back to kthread_create() from keventd. */ struct task_struct *result; struct completion done; + + struct work_struct work; }; struct kthread_stop_info @@ -111,9 +113,10 @@ static int kthread(void *_create) } /* We are keventd: create a thread. */ -static void keventd_create_kthread(void *_create) +static void keventd_create_kthread(struct work_struct *work) { - struct kthread_create_info *create = _create; + struct kthread_create_info *create = + container_of(work, struct kthread_create_info, work); int pid; /* We want our own signal handler (we take no signals by default). */ @@ -154,20 +157,20 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), ...) { struct kthread_create_info create; - DECLARE_WORK(work, keventd_create_kthread, &create); create.threadfn = threadfn; create.data = data; init_completion(&create.started); init_completion(&create.done); + INIT_WORK(&create.work, keventd_create_kthread); /* * The workqueue needs to start up first: */ if (!helper_wq) - work.func(work.data); + create.work.func(&create.work); else { - queue_work(helper_wq, &work); + queue_work(helper_wq, &create.work); wait_for_completion(&create.done); } if (!IS_ERR(create.result)) { diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c index f1f900ac3164..678ec736076b 100644 --- a/kernel/power/poweroff.c +++ b/kernel/power/poweroff.c @@ -16,12 +16,12 @@ * callback we use. */ -static void do_poweroff(void *dummy) +static void do_poweroff(struct work_struct *dummy) { kernel_power_off(); } -static DECLARE_WORK(poweroff_work, do_poweroff, NULL); +static DECLARE_WORK(poweroff_work, do_poweroff); static void handle_poweroff(int key, struct tty_struct *tty) { diff --git a/kernel/relay.c b/kernel/relay.c index f04bbdb56ac2..2b92e8ece85b 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -308,9 +308,10 @@ static struct rchan_callbacks default_channel_callbacks = { * reason waking is deferred is that calling directly from write * causes problems if you're writing from say the scheduler. */ -static void wakeup_readers(void *private) +static void wakeup_readers(struct work_struct *work) { - struct rchan_buf *buf = private; + struct rchan_buf *buf = + container_of(work, struct rchan_buf, wake_readers.work); wake_up_interruptible(&buf->read_wait); } @@ -328,7 +329,7 @@ static inline void __relay_reset(struct rchan_buf *buf, unsigned int init) if (init) { init_waitqueue_head(&buf->read_wait); kref_init(&buf->kref); - INIT_WORK(&buf->wake_readers, NULL, NULL); + INIT_DELAYED_WORK(&buf->wake_readers, NULL); } else { cancel_delayed_work(&buf->wake_readers); flush_scheduled_work(); @@ -549,7 +550,8 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length) buf->padding[old_subbuf]; smp_mb(); if (waitqueue_active(&buf->read_wait)) { - PREPARE_WORK(&buf->wake_readers, wakeup_readers, buf); + PREPARE_DELAYED_WORK(&buf->wake_readers, + wakeup_readers); schedule_delayed_work(&buf->wake_readers, 1); } } diff --git a/kernel/sys.c b/kernel/sys.c index 98489d82801b..c87b461de38d 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -880,7 +880,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user return 0; } -static void deferred_cad(void *dummy) +static void deferred_cad(struct work_struct *dummy) { kernel_restart(NULL); } @@ -892,7 +892,7 @@ static void deferred_cad(void *dummy) */ void ctrl_alt_del(void) { - static DECLARE_WORK(cad_work, deferred_cad, NULL); + static DECLARE_WORK(cad_work, deferred_cad); if (C_A_D) schedule_work(&cad_work); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 17c2f03d2c27..8d1e7cb8a51a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -80,6 +80,29 @@ static inline int is_single_threaded(struct workqueue_struct *wq) return list_empty(&wq->list); } +static inline void set_wq_data(struct work_struct *work, void *wq) +{ + unsigned long new, old, res; + + /* assume the pending flag is already set and that the task has already + * been queued on this workqueue */ + new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); + res = work->management; + if (res != new) { + do { + old = res; + new = (unsigned long) wq; + new |= (old & WORK_STRUCT_FLAG_MASK); + res = cmpxchg(&work->management, old, new); + } while (res != old); + } +} + +static inline void *get_wq_data(struct work_struct *work) +{ + return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK); +} + /* Preempt must be disabled. */ static void __queue_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) @@ -87,7 +110,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, unsigned long flags; spin_lock_irqsave(&cwq->lock, flags); - work->wq_data = cwq; + set_wq_data(work, cwq); list_add_tail(&work->entry, &cwq->worklist); cwq->insert_sequence++; wake_up(&cwq->more_work); @@ -108,7 +131,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) { int ret = 0, cpu = get_cpu(); - if (!test_and_set_bit(0, &work->pending)) { + if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { if (unlikely(is_single_threaded(wq))) cpu = singlethread_cpu; BUG_ON(!list_empty(&work->entry)); @@ -122,38 +145,42 @@ EXPORT_SYMBOL_GPL(queue_work); static void delayed_work_timer_fn(unsigned long __data) { - struct work_struct *work = (struct work_struct *)__data; - struct workqueue_struct *wq = work->wq_data; + struct delayed_work *dwork = (struct delayed_work *)__data; + struct workqueue_struct *wq = get_wq_data(&dwork->work); int cpu = smp_processor_id(); if (unlikely(is_single_threaded(wq))) cpu = singlethread_cpu; - __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); + __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work); } /** * queue_delayed_work - queue work on a workqueue after delay * @wq: workqueue to use - * @work: work to queue + * @work: delayable work to queue * @delay: number of jiffies to wait before queueing * * Returns 0 if @work was already on a queue, non-zero otherwise. */ int fastcall queue_delayed_work(struct workqueue_struct *wq, - struct work_struct *work, unsigned long delay) + struct delayed_work *dwork, unsigned long delay) { int ret = 0; - struct timer_list *timer = &work->timer; + struct timer_list *timer = &dwork->timer; + struct work_struct *work = &dwork->work; + + if (delay == 0) + return queue_work(wq, work); - if (!test_and_set_bit(0, &work->pending)) { + if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { BUG_ON(timer_pending(timer)); BUG_ON(!list_empty(&work->entry)); /* This stores wq for the moment, for the timer_fn */ - work->wq_data = wq; + set_wq_data(work, wq); timer->expires = jiffies + delay; - timer->data = (unsigned long)work; + timer->data = (unsigned long)dwork; timer->function = delayed_work_timer_fn; add_timer(timer); ret = 1; @@ -172,19 +199,20 @@ EXPORT_SYMBOL_GPL(queue_delayed_work); * Returns 0 if @work was already on a queue, non-zero otherwise. */ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, - struct work_struct *work, unsigned long delay) + struct delayed_work *dwork, unsigned long delay) { int ret = 0; - struct timer_list *timer = &work->timer; + struct timer_list *timer = &dwork->timer; + struct work_struct *work = &dwork->work; - if (!test_and_set_bit(0, &work->pending)) { + if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { BUG_ON(timer_pending(timer)); BUG_ON(!list_empty(&work->entry)); /* This stores wq for the moment, for the timer_fn */ - work->wq_data = wq; + set_wq_data(work, wq); timer->expires = jiffies + delay; - timer->data = (unsigned long)work; + timer->data = (unsigned long)dwork; timer->function = delayed_work_timer_fn; add_timer_on(timer, cpu); ret = 1; @@ -212,15 +240,15 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) while (!list_empty(&cwq->worklist)) { struct work_struct *work = list_entry(cwq->worklist.next, struct work_struct, entry); - void (*f) (void *) = work->func; - void *data = work->data; + work_func_t f = work->func; list_del_init(cwq->worklist.next); spin_unlock_irqrestore(&cwq->lock, flags); - BUG_ON(work->wq_data != cwq); - clear_bit(0, &work->pending); - f(data); + BUG_ON(get_wq_data(work) != cwq); + if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) + work_release(work); + f(work); spin_lock_irqsave(&cwq->lock, flags); cwq->remove_sequence++; @@ -468,38 +496,37 @@ EXPORT_SYMBOL(schedule_work); /** * schedule_delayed_work - put work task in global workqueue after delay - * @work: job to be done - * @delay: number of jiffies to wait + * @dwork: job to be done + * @delay: number of jiffies to wait or 0 for immediate execution * * After waiting for a given time this puts a job in the kernel-global * workqueue. */ -int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) +int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) { - return queue_delayed_work(keventd_wq, work, delay); + return queue_delayed_work(keventd_wq, dwork, delay); } EXPORT_SYMBOL(schedule_delayed_work); /** * schedule_delayed_work_on - queue work in global workqueue on CPU after delay * @cpu: cpu to use - * @work: job to be done + * @dwork: job to be done * @delay: number of jiffies to wait * * After waiting for a given time this puts a job in the kernel-global * workqueue on the specified CPU. */ int schedule_delayed_work_on(int cpu, - struct work_struct *work, unsigned long delay) + struct delayed_work *dwork, unsigned long delay) { - return queue_delayed_work_on(cpu, keventd_wq, work, delay); + return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); } EXPORT_SYMBOL(schedule_delayed_work_on); /** * schedule_on_each_cpu - call a function on each online CPU from keventd * @func: the function to call - * @info: a pointer to pass to func() * * Returns zero on success. * Returns -ve errno on failure. @@ -508,7 +535,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on); * * schedule_on_each_cpu() is very slow. */ -int schedule_on_each_cpu(void (*func)(void *info), void *info) +int schedule_on_each_cpu(work_func_t func) { int cpu; struct work_struct *works; @@ -519,7 +546,7 @@ int schedule_on_each_cpu(void (*func)(void *info), void *info) mutex_lock(&workqueue_mutex); for_each_online_cpu(cpu) { - INIT_WORK(per_cpu_ptr(works, cpu), func, info); + INIT_WORK(per_cpu_ptr(works, cpu), func); __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), per_cpu_ptr(works, cpu)); } @@ -539,12 +566,12 @@ EXPORT_SYMBOL(flush_scheduled_work); * cancel_rearming_delayed_workqueue - reliably kill off a delayed * work whose handler rearms the delayed work. * @wq: the controlling workqueue structure - * @work: the delayed work struct + * @dwork: the delayed work struct */ void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, - struct work_struct *work) + struct delayed_work *dwork) { - while (!cancel_delayed_work(work)) + while (!cancel_delayed_work(dwork)) flush_workqueue(wq); } EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); @@ -552,18 +579,17 @@ EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); /** * cancel_rearming_delayed_work - reliably kill off a delayed keventd * work whose handler rearms the delayed work. - * @work: the delayed work struct + * @dwork: the delayed work struct */ -void cancel_rearming_delayed_work(struct work_struct *work) +void cancel_rearming_delayed_work(struct delayed_work *dwork) { - cancel_rearming_delayed_workqueue(keventd_wq, work); + cancel_rearming_delayed_workqueue(keventd_wq, dwork); } EXPORT_SYMBOL(cancel_rearming_delayed_work); /** * execute_in_process_context - reliably execute the routine with user context * @fn: the function to execute - * @data: data to pass to the function * @ew: guaranteed storage for the execute work structure (must * be available when the work executes) * @@ -573,15 +599,14 @@ EXPORT_SYMBOL(cancel_rearming_delayed_work); * Returns: 0 - function was executed * 1 - function was scheduled for execution */ -int execute_in_process_context(void (*fn)(void *data), void *data, - struct execute_work *ew) +int execute_in_process_context(work_func_t fn, struct execute_work *ew) { if (!in_interrupt()) { - fn(data); + fn(&ew->work); return 0; } - INIT_WORK(&ew->work, fn, data); + INIT_WORK(&ew->work, fn); schedule_work(&ew->work); return 1; |