summaryrefslogtreecommitdiffstats
path: root/drivers/ide/ide-park.c
diff options
context:
space:
mode:
authorBartlomiej Zolnierkiewicz <bzolnier@gmail.com>2009-01-02 16:12:50 +0100
committerBartlomiej Zolnierkiewicz <bzolnier@gmail.com>2009-01-02 16:12:50 +0100
commit201bffa46466b4afdf7d29db8eca3fa5decb39c8 (patch)
tree47e7d85563690547b67748092e587be1f31046b5 /drivers/ide/ide-park.c
parentide: add ide_[un]lock_hwgroup() helpers (diff)
downloadlinux-201bffa46466b4afdf7d29db8eca3fa5decb39c8.tar.xz
linux-201bffa46466b4afdf7d29db8eca3fa5decb39c8.zip
ide: use per-device request queue locks (v2)
* Move hack for flush requests from choose_drive() to do_ide_request(). * Add ide_plug_device() helper and convert core IDE code from using per-hwgroup lock as a request lock to use the ->queue_lock instead. * Remove no longer needed: - choose_drive() function - WAKEUP() macro - 'sleeping' flag from ide_hwif_t - 'service_{start,time}' fields from ide_drive_t This patch results in much simpler and more maintainable code (besides being a scalability improvement). v2: * Fixes/improvements based on review from Elias: - take as many requests off the queue as possible - remove now redundant BUG_ON() Cc: Elias Oltmanns <eo@nebensachen.de> Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Diffstat (limited to 'drivers/ide/ide-park.c')
-rw-r--r--drivers/ide/ide-park.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 44c6787f8aeb..678454ac2483 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -16,16 +16,19 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
spin_lock_irq(&hwgroup->lock);
if (drive->dev_flags & IDE_DFLAG_PARKED) {
int reset_timer = time_before(timeout, drive->sleep);
+ int start_queue = 0;
drive->sleep = timeout;
wake_up_all(&ide_park_wq);
- if (reset_timer && hwgroup->sleeping &&
- del_timer(&hwgroup->timer)) {
- hwgroup->sleeping = 0;
- ide_unlock_hwgroup(hwgroup);
+ if (reset_timer && del_timer(&hwgroup->timer))
+ start_queue = 1;
+ spin_unlock_irq(&hwgroup->lock);
+
+ if (start_queue) {
+ spin_lock_irq(q->queue_lock);
blk_start_queueing(q);
+ spin_unlock_irq(q->queue_lock);
}
- spin_unlock_irq(&hwgroup->lock);
return;
}
spin_unlock_irq(&hwgroup->lock);