diff options
Diffstat (limited to 'drivers/scsi/virtio_scsi.c')
-rw-r--r-- | drivers/scsi/virtio_scsi.c | 101 |
1 files changed, 87 insertions, 14 deletions
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 89ee5929eb6d..eee1bc0b506e 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -27,6 +27,8 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> +#include <scsi/scsi_tcq.h> +#include <linux/seqlock.h> #define VIRTIO_SCSI_MEMPOOL_SZ 64 #define VIRTIO_SCSI_EVENT_LEN 8 @@ -75,18 +77,16 @@ struct virtio_scsi_vq { * queue, and also lets the driver optimize the IRQ affinity for the virtqueues * (each virtqueue's affinity is set to the CPU that "owns" the queue). * - * tgt_lock is held to serialize reading and writing req_vq. Reading req_vq - * could be done locklessly, but we do not do it yet. + * tgt_seq is held to serialize reading and writing req_vq. * * Decrements of reqs are never concurrent with writes of req_vq: before the * decrement reqs will be != 0; after the decrement the virtqueue completion * routine will not use the req_vq so it can be changed by a new request. - * Thus they can happen outside the tgt_lock, provided of course we make reqs + * Thus they can happen outside the tgt_seq, provided of course we make reqs * an atomic_t. */ struct virtio_scsi_target_state { - /* This spinlock never held at the same time as vq_lock. */ - spinlock_t tgt_lock; + seqcount_t tgt_seq; /* Count of outstanding requests. */ atomic_t reqs; @@ -237,6 +237,16 @@ static void virtscsi_req_done(struct virtqueue *vq) virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd); }; +static void virtscsi_poll_requests(struct virtio_scsi *vscsi) +{ + int i, num_vqs; + + num_vqs = vscsi->num_queues; + for (i = 0; i < num_vqs; i++) + virtscsi_vq_done(vscsi, &vscsi->req_vqs[i], + virtscsi_complete_cmd); +} + static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_cmd *cmd = buf; @@ -253,6 +263,8 @@ static void virtscsi_ctrl_done(struct virtqueue *vq) virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free); }; +static void virtscsi_handle_event(struct work_struct *work); + static int virtscsi_kick_event(struct virtio_scsi *vscsi, struct virtio_scsi_event_node *event_node) { @@ -260,6 +272,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi, struct scatterlist sg; unsigned long flags; + INIT_WORK(&event_node->work, virtscsi_handle_event); sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); @@ -377,7 +390,6 @@ static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_event_node *event_node = buf; - INIT_WORK(&event_node->work, virtscsi_handle_event); schedule_work(&event_node->work); } @@ -547,19 +559,33 @@ static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi, unsigned long flags; u32 queue_num; - spin_lock_irqsave(&tgt->tgt_lock, flags); + local_irq_save(flags); + if (atomic_inc_return(&tgt->reqs) > 1) { + unsigned long seq; + + do { + seq = read_seqcount_begin(&tgt->tgt_seq); + vq = tgt->req_vq; + } while (read_seqcount_retry(&tgt->tgt_seq, seq)); + } else { + /* no writes can be concurrent because of atomic_t */ + write_seqcount_begin(&tgt->tgt_seq); + + /* keep previous req_vq if a reader just arrived */ + if (unlikely(atomic_read(&tgt->reqs) > 1)) { + vq = tgt->req_vq; + goto unlock; + } - if (atomic_inc_return(&tgt->reqs) > 1) - vq = tgt->req_vq; - else { queue_num = smp_processor_id(); while (unlikely(queue_num >= vscsi->num_queues)) queue_num -= vscsi->num_queues; - tgt->req_vq = vq = &vscsi->req_vqs[queue_num]; + unlock: + write_seqcount_end(&tgt->tgt_seq); } + local_irq_restore(flags); - spin_unlock_irqrestore(&tgt->tgt_lock, flags); return vq; } @@ -589,6 +615,18 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) ret = SUCCESS; + /* + * The spec guarantees that all requests related to the TMF have + * been completed, but the callback might not have run yet if + * we're using independent interrupts (e.g. MSI). Poll the + * virtqueues once. + * + * In the abort case, sc->scsi_done will do nothing, because + * the block layer must have detected a timeout and as a result + * REQ_ATOM_COMPLETE has been set. + */ + virtscsi_poll_requests(vscsi); + out: mempool_free(cmd, virtscsi_cmd_pool); return ret; @@ -617,6 +655,36 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc) return virtscsi_tmf(vscsi, cmd); } +/** + * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth + * @sdev: Virtscsi target whose queue depth to change + * @qdepth: New queue depth + * @reason: Reason for the queue depth change. + */ +static int virtscsi_change_queue_depth(struct scsi_device *sdev, + int qdepth, + int reason) +{ + struct Scsi_Host *shost = sdev->host; + int max_depth = shost->cmd_per_lun; + + switch (reason) { + case SCSI_QDEPTH_QFULL: /* Drop qdepth in response to BUSY state */ + scsi_track_queue_full(sdev, qdepth); + break; + case SCSI_QDEPTH_RAMP_UP: /* Raise qdepth after BUSY state resolved */ + case SCSI_QDEPTH_DEFAULT: /* Manual change via sysfs */ + scsi_adjust_queue_depth(sdev, + scsi_get_tag_type(sdev), + min(max_depth, qdepth)); + break; + default: + return -EOPNOTSUPP; + } + + return sdev->queue_depth; +} + static int virtscsi_abort(struct scsi_cmnd *sc) { struct virtio_scsi *vscsi = shost_priv(sc->device->host); @@ -643,14 +711,17 @@ static int virtscsi_abort(struct scsi_cmnd *sc) static int virtscsi_target_alloc(struct scsi_target *starget) { + struct Scsi_Host *sh = dev_to_shost(starget->dev.parent); + struct virtio_scsi *vscsi = shost_priv(sh); + struct virtio_scsi_target_state *tgt = kmalloc(sizeof(*tgt), GFP_KERNEL); if (!tgt) return -ENOMEM; - spin_lock_init(&tgt->tgt_lock); + seqcount_init(&tgt->tgt_seq); atomic_set(&tgt->reqs, 0); - tgt->req_vq = NULL; + tgt->req_vq = &vscsi->req_vqs[0]; starget->hostdata = tgt; return 0; @@ -669,6 +740,7 @@ static struct scsi_host_template virtscsi_host_template_single = { .this_id = -1, .cmd_size = sizeof(struct virtio_scsi_cmd), .queuecommand = virtscsi_queuecommand_single, + .change_queue_depth = virtscsi_change_queue_depth, .eh_abort_handler = virtscsi_abort, .eh_device_reset_handler = virtscsi_device_reset, @@ -686,6 +758,7 @@ static struct scsi_host_template virtscsi_host_template_multi = { .this_id = -1, .cmd_size = sizeof(struct virtio_scsi_cmd), .queuecommand = virtscsi_queuecommand_multi, + .change_queue_depth = virtscsi_change_queue_depth, .eh_abort_handler = virtscsi_abort, .eh_device_reset_handler = virtscsi_device_reset, |