summaryrefslogtreecommitdiffstats
path: root/drivers/block/skd_main.c
diff options
context:
space:
mode:
authorBart Van Assche <bart.vanassche@wdc.com>2017-08-17 22:13:22 +0200
committerJens Axboe <axboe@kernel.dk>2017-08-18 16:45:29 +0200
commit8fe700650ef69a561a1745764aa42252cfee9c19 (patch)
tree9f45f0195c24c3aab645d2aad1f222091cc160ee /drivers/block/skd_main.c
parentskd: Rework request failing code path (diff)
downloadlinux-8fe700650ef69a561a1745764aa42252cfee9c19.tar.xz
linux-8fe700650ef69a561a1745764aa42252cfee9c19.zip
skd: Convert explicit skd_request_fn() calls
This will make it easier to convert this driver to the blk-mq approach. This patch also reduces interrupt latency by moving skd_request_fn() calls out of the skd_isr() interrupt. Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Hannes Reinecke <hare@suse.de> Cc: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/block/skd_main.c')
-rw-r--r--drivers/block/skd_main.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 8040500ba09c..3db89707b227 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -2806,7 +2806,7 @@ static void skd_completion_worker(struct work_struct *work)
* process everything in compq
*/
skd_isr_completion_posted(skdev, 0, &flush_enqueued);
- skd_request_fn(skdev->queue);
+ blk_run_queue_async(skdev->queue);
spin_unlock_irqrestore(&skdev->lock, flags);
}
@@ -2882,12 +2882,12 @@ skd_isr(int irq, void *ptr)
}
if (unlikely(flush_enqueued))
- skd_request_fn(skdev->queue);
+ blk_run_queue_async(skdev->queue);
if (deferred)
schedule_work(&skdev->completion_worker);
else if (!flush_enqueued)
- skd_request_fn(skdev->queue);
+ blk_run_queue_async(skdev->queue);
spin_unlock(&skdev->lock);
@@ -3588,12 +3588,12 @@ static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
&flush_enqueued);
if (flush_enqueued)
- skd_request_fn(skdev->queue);
+ blk_run_queue_async(skdev->queue);
if (deferred)
schedule_work(&skdev->completion_worker);
else if (!flush_enqueued)
- skd_request_fn(skdev->queue);
+ blk_run_queue_async(skdev->queue);
spin_unlock_irqrestore(&skdev->lock, flags);