summaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorMike Christie <michael.christie@oracle.com>2021-02-27 18:00:02 +0100
committerMartin K. Petersen <martin.petersen@oracle.com>2021-03-04 23:37:02 +0100
commit302990ac3b1b1a2b7b66f59a5c88038a51fbe18e (patch)
treebe970c2bc90c9b7f68cc2c2217c2f5168bbdc68b /drivers/target
parentscsi: target: core: Cleanup cmd flag bits (diff)
downloadlinux-302990ac3b1b1a2b7b66f59a5c88038a51fbe18e.tar.xz
linux-302990ac3b1b1a2b7b66f59a5c88038a51fbe18e.zip
scsi: target: core: Fix backend plugging
target_core_iblock is plugging and unplugging on every command and this is causing perf issues for drivers that prefer batched cmds. With recent patches we can now take multiple cmds from a fabric driver queue and then pass them down the backend drivers in a batch. This patch adds this support by adding 2 callouts to the backend for plugging and unplugging the device. Subsequent commits will add support for iblock and tcmu device plugging. Link: https://lore.kernel.org/r/20210227170006.5077-22-michael.christie@oracle.com Reviewed-by: Bodo Stroesser <bostroesser@gmail.com> Signed-off-by: Mike Christie <michael.christie@oracle.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/target_core_transport.c43
1 files changed, 42 insertions, 1 deletions
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index eea7c27dc4cd..1245c288d3bf 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1807,10 +1807,42 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
}
EXPORT_SYMBOL(target_submit_cmd);
+
+static struct se_dev_plug *target_plug_device(struct se_device *se_dev)
+{
+ struct se_dev_plug *se_plug;
+
+ if (!se_dev->transport->plug_device)
+ return NULL;
+
+ se_plug = se_dev->transport->plug_device(se_dev);
+ if (!se_plug)
+ return NULL;
+
+ se_plug->se_dev = se_dev;
+ /*
+ * We have a ref to the lun at this point, but the cmds could
+ * complete before we unplug, so grab a ref to the se_device so we
+ * can call back into the backend.
+ */
+ config_group_get(&se_dev->dev_group);
+ return se_plug;
+}
+
+static void target_unplug_device(struct se_dev_plug *se_plug)
+{
+ struct se_device *se_dev = se_plug->se_dev;
+
+ se_dev->transport->unplug_device(se_plug);
+ config_group_put(&se_dev->dev_group);
+}
+
void target_queued_submit_work(struct work_struct *work)
{
struct se_cmd_queue *sq = container_of(work, struct se_cmd_queue, work);
struct se_cmd *se_cmd, *next_cmd;
+ struct se_dev_plug *se_plug = NULL;
+ struct se_device *se_dev = NULL;
struct llist_node *cmd_list;
cmd_list = llist_del_all(&sq->cmd_list);
@@ -1819,8 +1851,17 @@ void target_queued_submit_work(struct work_struct *work)
return;
cmd_list = llist_reverse_order(cmd_list);
- llist_for_each_entry_safe(se_cmd, next_cmd, cmd_list, se_cmd_list)
+ llist_for_each_entry_safe(se_cmd, next_cmd, cmd_list, se_cmd_list) {
+ if (!se_dev) {
+ se_dev = se_cmd->se_dev;
+ se_plug = target_plug_device(se_dev);
+ }
+
target_submit(se_cmd);
+ }
+
+ if (se_plug)
+ target_unplug_device(se_plug);
}
/**