diff options
author | Nicholas Bellinger <nab@linux-iscsi.org> | 2011-07-21 06:41:48 +0200 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2011-07-22 11:37:48 +0200 |
commit | 1d20bb6147954d4fbd337a3d1b40c7eeae254cd7 (patch) | |
tree | 23dcdd9376c5e26acfd2817812e2d5ccfbf81149 /drivers/target | |
parent | target: Follow up core updates from AGrover and HCH (round 4) (diff) | |
download | linux-1d20bb6147954d4fbd337a3d1b40c7eeae254cd7.tar.xz linux-1d20bb6147954d4fbd337a3d1b40c7eeae254cd7.zip |
target: ->map_task_SG conversion to ->map_control_SG and ->map_data_SG
This patch breaks up the ->map_task_SG() backend call into two seperate
->map_control_SG() and ->map_data_SG() in order to better address
IBLOCK and pSCSI. IBLOCK only allocates bios for ->map_data_SG(), and
pSCSI will allocate a struct request for both cases.
This patch fixes incorrect usage of ->map_task_SG() for all se_cmd descriptors
in transport_generic_new_cmd() by moving the call into it's proper location
directly inside of transport_allocate_data_tasks()
Reported-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
-rw-r--r-- | drivers/target/target_core_iblock.c | 4 | ||||
-rw-r--r-- | drivers/target/target_core_pscsi.c | 14 | ||||
-rw-r--r-- | drivers/target/target_core_transport.c | 45 |
3 files changed, 36 insertions, 27 deletions
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 251fc66a8212..7e1234105442 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -591,7 +591,7 @@ static struct bio *iblock_get_bio( return bio; } -static int iblock_map_task_SG(struct se_task *task) +static int iblock_map_data_SG(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; @@ -755,7 +755,7 @@ static struct se_subsystem_api iblock_template = { .name = "iblock", .owner = THIS_MODULE, .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, - .map_task_SG = iblock_map_task_SG, + .map_data_SG = iblock_map_data_SG, .attach_hba = iblock_attach_hba, .detach_hba = iblock_detach_hba, .allocate_virtdevice = iblock_allocate_virtdevice, diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index a2ce5998d318..2b7b0da9146d 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -1049,7 +1049,7 @@ static inline struct bio *pscsi_get_bio(int sg_num) return bio; } -static int __pscsi_map_task_SG( +static int __pscsi_map_SG( struct se_task *task, struct scatterlist *task_sg, u32 task_sg_num, @@ -1198,7 +1198,10 @@ fail: return ret; } -static int pscsi_map_task_SG(struct se_task *task) +/* + * pSCSI maps both ->map_control_SG() and ->map_data_SG() to a single call. + */ +static int pscsi_map_SG(struct se_task *task) { int ret; @@ -1206,13 +1209,13 @@ static int pscsi_map_task_SG(struct se_task *task) * Setup the main struct request for the task->task_sg[] payload */ - ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_nents, 0); + ret = __pscsi_map_SG(task, task->task_sg, task->task_sg_nents, 0); if (ret >= 0 && task->task_sg_bidi) { /* * If present, set up the extra BIDI-COMMAND SCSI READ * struct request and payload. */ - ret = __pscsi_map_task_SG(task, task->task_sg_bidi, + ret = __pscsi_map_SG(task, task->task_sg_bidi, task->task_sg_nents, 1); } @@ -1340,7 +1343,8 @@ static struct se_subsystem_api pscsi_template = { .owner = THIS_MODULE, .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, .cdb_none = pscsi_CDB_none, - .map_task_SG = pscsi_map_task_SG, + .map_control_SG = pscsi_map_SG, + .map_data_SG = pscsi_map_SG, .attach_hba = pscsi_attach_hba, .detach_hba = pscsi_detach_hba, .pmode_enable_hba = pscsi_pmode_enable_hba, diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 55b6588904a4..007cfc164f5e 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -4081,8 +4081,7 @@ static int transport_allocate_data_tasks( struct se_device *dev = cmd->se_dev; unsigned long flags; sector_t sectors; - int task_count; - int i; + int task_count, i, ret; sector_t dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; struct scatterlist *sg; @@ -4129,7 +4128,7 @@ static int transport_allocate_data_tasks( task->task_padded_sg = 1; } - task->task_sg = kmalloc(sizeof(struct scatterlist) * \ + task->task_sg = kmalloc(sizeof(struct scatterlist) * task->task_sg_nents, GFP_KERNEL); if (!task->task_sg) { cmd->se_dev->transport->free_task(task); @@ -4157,6 +4156,20 @@ static int transport_allocate_data_tasks( list_add_tail(&task->t_list, &cmd->t_task_list); spin_unlock_irqrestore(&cmd->t_state_lock, flags); } + /* + * Now perform the memory map of task->task_sg[] into backend + * subsystem memory.. + */ + list_for_each_entry(task, &cmd->t_task_list, t_list) { + if (atomic_read(&task->task_sent)) + continue; + if (!dev->transport->map_data_SG) + continue; + + ret = dev->transport->map_data_SG(task); + if (ret < 0) + return 0; + } return task_count; } @@ -4196,8 +4209,8 @@ transport_allocate_control_task(struct se_cmd *cmd) spin_unlock_irqrestore(&cmd->t_state_lock, flags); if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { - if (dev->transport->map_task_SG) - ret = dev->transport->map_task_SG(task); + if (dev->transport->map_control_SG) + ret = dev->transport->map_control_SG(task); } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { if (dev->transport->cdb_none) ret = dev->transport->cdb_none(task); @@ -4239,8 +4252,6 @@ static u32 transport_allocate_tasks( */ int transport_generic_new_cmd(struct se_cmd *cmd) { - struct se_task *task; - struct se_device *dev = cmd->se_dev; int ret = 0; /* @@ -4254,22 +4265,16 @@ int transport_generic_new_cmd(struct se_cmd *cmd) if (ret < 0) return ret; } - + /* + * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for + * control or data CDB types, and perform the map to backend subsystem + * code from SGL memory allocated here by transport_generic_get_mem(), or + * via pre-existing SGL memory setup explictly by fabric module code with + * transport_generic_map_mem_to_cmd(). + */ ret = transport_new_cmd_obj(cmd); if (ret < 0) return ret; - - list_for_each_entry(task, &cmd->t_task_list, t_list) { - if (atomic_read(&task->task_sent)) - continue; - if (!dev->transport->map_task_SG) - continue; - - ret = dev->transport->map_task_SG(task); - if (ret < 0) - return ret; - } - /* * For WRITEs, let the fabric know its buffer is ready.. * This WRITE struct se_cmd (and all of its associated struct se_task's) |