diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-02 18:54:49 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-02 18:54:49 +0200 |
commit | 033d9959ed2dc1029217d4165f80a71702dc578e (patch) | |
tree | 3d306316e44bdabce2e0bf2ef7e466e525f90b4c /block | |
parent | drivers: bus: omap_l3: fixup merge conflict resolution (diff) | |
parent | workqueue: remove spurious WARN_ON_ONCE(in_irq()) from try_to_grab_pending() (diff) | |
download | linux-033d9959ed2dc1029217d4165f80a71702dc578e.tar.xz linux-033d9959ed2dc1029217d4165f80a71702dc578e.zip |
Merge branch 'for-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue changes from Tejun Heo:
"This is workqueue updates for v3.7-rc1. A lot of activities this
round including considerable API and behavior cleanups.
* delayed_work combines a timer and a work item. The handling of the
timer part has always been a bit clunky leading to confusing
cancelation API with weird corner-case behaviors. delayed_work is
updated to use new IRQ safe timer and cancelation now works as
expected.
* Another deficiency of delayed_work was lack of the counterpart of
mod_timer() which led to cancel+queue combinations or open-coded
timer+work usages. mod_delayed_work[_on]() are added.
These two delayed_work changes make delayed_work provide interface
and behave like timer which is executed with process context.
* A work item could be executed concurrently on multiple CPUs, which
is rather unintuitive and made flush_work() behavior confusing and
half-broken under certain circumstances. This problem doesn't
exist for non-reentrant workqueues. While non-reentrancy check
isn't free, the overhead is incurred only when a work item bounces
across different CPUs and even in simulated pathological scenario
the overhead isn't too high.
All workqueues are made non-reentrant. This removes the
distinction between flush_[delayed_]work() and
flush_[delayed_]_work_sync(). The former is now as strong as the
latter and the specified work item is guaranteed to have finished
execution of any previous queueing on return.
* In addition to the various bug fixes, Lai redid and simplified CPU
hotplug handling significantly.
* Joonsoo introduced system_highpri_wq and used it during CPU
hotplug.
There are two merge commits - one to pull in IRQ safe timer from
tip/timers/core and the other to pull in CPU hotplug fixes from
wq/for-3.6-fixes as Lai's hotplug restructuring depended on them."
Fixed a number of trivial conflicts, but the more interesting conflicts
were silent ones where the deprecated interfaces had been used by new
code in the merge window, and thus didn't cause any real data conflicts.
Tejun pointed out a few of them, I fixed a couple more.
* 'for-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: (46 commits)
workqueue: remove spurious WARN_ON_ONCE(in_irq()) from try_to_grab_pending()
workqueue: use cwq_set_max_active() helper for workqueue_set_max_active()
workqueue: introduce cwq_set_max_active() helper for thaw_workqueues()
workqueue: remove @delayed from cwq_dec_nr_in_flight()
workqueue: fix possible stall on try_to_grab_pending() of a delayed work item
workqueue: use hotcpu_notifier() for workqueue_cpu_down_callback()
workqueue: use __cpuinit instead of __devinit for cpu callbacks
workqueue: rename manager_mutex to assoc_mutex
workqueue: WORKER_REBIND is no longer necessary for idle rebinding
workqueue: WORKER_REBIND is no longer necessary for busy rebinding
workqueue: reimplement idle worker rebinding
workqueue: deprecate __cancel_delayed_work()
workqueue: reimplement cancel_delayed_work() using try_to_grab_pending()
workqueue: use mod_delayed_work() instead of __cancel + queue
workqueue: use irqsafe timer for delayed_work
workqueue: clean up delayed_work initializers and add missing one
workqueue: make deferrable delayed_work initializer names consistent
workqueue: cosmetic whitespace updates for macro definitions
workqueue: deprecate system_nrt[_freezable]_wq
workqueue: deprecate flush[_delayed]_work_sync()
...
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 8 | ||||
-rw-r--r-- | block/blk-throttle.c | 14 | ||||
-rw-r--r-- | block/genhd.c | 14 |
3 files changed, 13 insertions, 23 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index ee3cb3a5e278..d2da64170513 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -262,7 +262,7 @@ EXPORT_SYMBOL(blk_start_queue); **/ void blk_stop_queue(struct request_queue *q) { - __cancel_delayed_work(&q->delay_work); + cancel_delayed_work(&q->delay_work); queue_flag_set(QUEUE_FLAG_STOPPED, q); } EXPORT_SYMBOL(blk_stop_queue); @@ -319,10 +319,8 @@ EXPORT_SYMBOL(__blk_run_queue); */ void blk_run_queue_async(struct request_queue *q) { - if (likely(!blk_queue_stopped(q))) { - __cancel_delayed_work(&q->delay_work); - queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); - } + if (likely(!blk_queue_stopped(q))) + mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); } EXPORT_SYMBOL(blk_run_queue_async); diff --git a/block/blk-throttle.c b/block/blk-throttle.c index e287c19908c8..a9664fa0b609 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -180,7 +180,7 @@ static inline unsigned int total_nr_queued(struct throtl_data *td) /* * Worker for allocating per cpu stat for tgs. This is scheduled on the - * system_nrt_wq once there are some groups on the alloc_list waiting for + * system_wq once there are some groups on the alloc_list waiting for * allocation. */ static void tg_stats_alloc_fn(struct work_struct *work) @@ -194,8 +194,7 @@ alloc_stats: stats_cpu = alloc_percpu(struct tg_stats_cpu); if (!stats_cpu) { /* allocation failed, try again after some time */ - queue_delayed_work(system_nrt_wq, dwork, - msecs_to_jiffies(10)); + schedule_delayed_work(dwork, msecs_to_jiffies(10)); return; } } @@ -238,7 +237,7 @@ static void throtl_pd_init(struct blkcg_gq *blkg) */ spin_lock_irqsave(&tg_stats_alloc_lock, flags); list_add(&tg->stats_alloc_node, &tg_stats_alloc_list); - queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0); + schedule_delayed_work(&tg_stats_alloc_work, 0); spin_unlock_irqrestore(&tg_stats_alloc_lock, flags); } @@ -930,12 +929,7 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay) /* schedule work if limits changed even if no bio is queued */ if (total_nr_queued(td) || td->limits_changed) { - /* - * We might have a work scheduled to be executed in future. - * Cancel that and schedule a new one. - */ - __cancel_delayed_work(dwork); - queue_delayed_work(kthrotld_workqueue, dwork, delay); + mod_delayed_work(kthrotld_workqueue, dwork, delay); throtl_log(td, "schedule work. delay=%lu jiffies=%lu", delay, jiffies); } diff --git a/block/genhd.c b/block/genhd.c index d839723303c8..6cace663a80e 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1490,9 +1490,9 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now) intv = disk_events_poll_jiffies(disk); set_timer_slack(&ev->dwork.timer, intv / 4); if (check_now) - queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); + queue_delayed_work(system_freezable_wq, &ev->dwork, 0); else if (intv) - queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv); + queue_delayed_work(system_freezable_wq, &ev->dwork, intv); out_unlock: spin_unlock_irqrestore(&ev->lock, flags); } @@ -1534,10 +1534,8 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask) spin_lock_irq(&ev->lock); ev->clearing |= mask; - if (!ev->block) { - cancel_delayed_work(&ev->dwork); - queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); - } + if (!ev->block) + mod_delayed_work(system_freezable_wq, &ev->dwork, 0); spin_unlock_irq(&ev->lock); } @@ -1573,7 +1571,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) /* uncondtionally schedule event check and wait for it to finish */ disk_block_events(disk); - queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); + queue_delayed_work(system_freezable_wq, &ev->dwork, 0); flush_delayed_work(&ev->dwork); __disk_unblock_events(disk, false); @@ -1610,7 +1608,7 @@ static void disk_events_workfn(struct work_struct *work) intv = disk_events_poll_jiffies(disk); if (!ev->block && intv) - queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv); + queue_delayed_work(system_freezable_wq, &ev->dwork, intv); spin_unlock_irq(&ev->lock); |