summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_lib.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-04-11 19:07:01 +0200
committerChristoph Hellwig <hch@lst.de>2014-07-25 13:43:45 +0200
commit71e75c97f97a9645d25fbf3d8e4165a558f18747 (patch)
treefb85185386af55199c46499dc3ce366d227870e1 /drivers/scsi/scsi_lib.c
parentscsi: convert host_busy to atomic_t (diff)
downloadlinux-71e75c97f97a9645d25fbf3d8e4165a558f18747.tar.xz
linux-71e75c97f97a9645d25fbf3d8e4165a558f18747.zip
scsi: convert device_busy to atomic_t
Avoid taking the queue_lock to check the per-device queue limit. Instead we do an atomic_inc_return early on to grab our slot in the queue, and if necessary decrement it after finishing all checks. Unlike the host and target busy counters this doesn't allow us to avoid the queue_lock in the request_fn due to the way the interface works, but it'll allow us to prepare for using the blk-mq code, which doesn't use the queue_lock at all, and it at least avoids a queue_lock round trip in scsi_device_unbusy, which is still important given how busy the queue_lock is. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Webb Scales <webbnh@hp.com> Acked-by: Jens Axboe <axboe@kernel.dk> Tested-by: Bart Van Assche <bvanassche@acm.org> Tested-by: Robert Elliott <elliott@hp.com>
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r--drivers/scsi/scsi_lib.c50
1 files changed, 28 insertions, 22 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d0bd7e0ab7a8..1ddf0fb43b59 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -302,9 +302,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
spin_unlock_irqrestore(shost->host_lock, flags);
}
- spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
- sdev->device_busy--;
- spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+ atomic_dec(&sdev->device_busy);
}
/*
@@ -355,9 +353,9 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
static inline int scsi_device_is_busy(struct scsi_device *sdev)
{
- if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
+ if (atomic_read(&sdev->device_busy) >= sdev->queue_depth ||
+ sdev->device_blocked)
return 1;
-
return 0;
}
@@ -1204,7 +1202,7 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret)
* queue must be restarted, so we schedule a callback to happen
* shortly.
*/
- if (sdev->device_busy == 0)
+ if (atomic_read(&sdev->device_busy) == 0)
blk_delay_queue(q, SCSI_QUEUE_DELAY);
break;
default:
@@ -1255,26 +1253,33 @@ static void scsi_unprep_fn(struct request_queue *q, struct request *req)
static inline int scsi_dev_queue_ready(struct request_queue *q,
struct scsi_device *sdev)
{
- if (sdev->device_busy == 0 && sdev->device_blocked) {
+ unsigned int busy;
+
+ busy = atomic_inc_return(&sdev->device_busy) - 1;
+ if (sdev->device_blocked) {
+ if (busy)
+ goto out_dec;
+
/*
* unblock after device_blocked iterates to zero
*/
- if (--sdev->device_blocked == 0) {
- SCSI_LOG_MLQUEUE(3,
- sdev_printk(KERN_INFO, sdev,
- "unblocking device at zero depth\n"));
- } else {
+ if (--sdev->device_blocked != 0) {
blk_delay_queue(q, SCSI_QUEUE_DELAY);
- return 0;
+ goto out_dec;
}
+ SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
+ "unblocking device at zero depth\n"));
}
- if (scsi_device_is_busy(sdev))
- return 0;
+
+ if (busy >= sdev->queue_depth)
+ goto out_dec;
return 1;
+out_dec:
+ atomic_dec(&sdev->device_busy);
+ return 0;
}
-
/*
* scsi_target_queue_ready: checks if there we can send commands to target
* @sdev: scsi device on starget to check.
@@ -1448,7 +1453,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
* bump busy counts. To bump the counters, we need to dance
* with the locks as normal issue path does.
*/
- sdev->device_busy++;
+ atomic_inc(&sdev->device_busy);
atomic_inc(&shost->host_busy);
atomic_inc(&starget->target_busy);
@@ -1544,7 +1549,7 @@ static void scsi_request_fn(struct request_queue *q)
* accept it.
*/
req = blk_peek_request(q);
- if (!req || !scsi_dev_queue_ready(q, sdev))
+ if (!req)
break;
if (unlikely(!scsi_device_online(sdev))) {
@@ -1554,13 +1559,14 @@ static void scsi_request_fn(struct request_queue *q)
continue;
}
+ if (!scsi_dev_queue_ready(q, sdev))
+ break;
/*
* Remove the request from the request list.
*/
if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
blk_start_request(req);
- sdev->device_busy++;
spin_unlock_irq(q->queue_lock);
cmd = req->special;
@@ -1630,9 +1636,9 @@ static void scsi_request_fn(struct request_queue *q)
*/
spin_lock_irq(q->queue_lock);
blk_requeue_request(q, req);
- sdev->device_busy--;
+ atomic_dec(&sdev->device_busy);
out_delay:
- if (sdev->device_busy == 0 && !scsi_device_blocked(sdev))
+ if (atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
blk_delay_queue(q, SCSI_QUEUE_DELAY);
}
@@ -2371,7 +2377,7 @@ scsi_device_quiesce(struct scsi_device *sdev)
return err;
scsi_run_queue(sdev->request_queue);
- while (sdev->device_busy) {
+ while (atomic_read(&sdev->device_busy)) {
msleep_interruptible(200);
scsi_run_queue(sdev->request_queue);
}