diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-12-16 22:34:31 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-12-16 22:34:31 +0100 |
commit | 60f7c503d971a731ee3c4f884a9f2e80d476730d (patch) | |
tree | 30e23c822306b0e407f6218135feb5b2e847665d /drivers/target | |
parent | Merge tag 'for-5.11/drivers-2020-12-14' of git://git.kernel.dk/linux-block (diff) | |
parent | scsi: mpt3sas: Update driver version to 36.100.00.00 (diff) | |
download | linux-60f7c503d971a731ee3c4f884a9f2e80d476730d.tar.xz linux-60f7c503d971a731ee3c4f884a9f2e80d476730d.zip |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley:
"This consists of the usual driver updates (ufs, qla2xxx, smartpqi,
target, zfcp, fnic, mpt3sas, ibmvfc) plus a load of cleanups, a major
power management rework and a load of assorted minor updates.
There are a few core updates (formatting fixes being the big one) but
nothing major this cycle"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (279 commits)
scsi: mpt3sas: Update driver version to 36.100.00.00
scsi: mpt3sas: Handle trigger page after firmware update
scsi: mpt3sas: Add persistent MPI trigger page
scsi: mpt3sas: Add persistent SCSI sense trigger page
scsi: mpt3sas: Add persistent Event trigger page
scsi: mpt3sas: Add persistent Master trigger page
scsi: mpt3sas: Add persistent trigger pages support
scsi: mpt3sas: Sync time periodically between driver and firmware
scsi: qla2xxx: Update version to 10.02.00.104-k
scsi: qla2xxx: Fix device loss on 4G and older HBAs
scsi: qla2xxx: If fcport is undergoing deletion complete I/O with retry
scsi: qla2xxx: Fix the call trace for flush workqueue
scsi: qla2xxx: Fix flash update in 28XX adapters on big endian machines
scsi: qla2xxx: Handle aborts correctly for port undergoing deletion
scsi: qla2xxx: Fix N2N and NVMe connect retry failure
scsi: qla2xxx: Fix FW initialization error on big endian machines
scsi: qla2xxx: Fix crash during driver load on big endian machines
scsi: qla2xxx: Fix compilation issue in PPC systems
scsi: qla2xxx: Don't check for fw_started while posting NVMe command
scsi: qla2xxx: Tear down session if FW say it is down
...
Diffstat (limited to 'drivers/target')
-rw-r--r-- | drivers/target/loopback/tcm_loop.c | 14 | ||||
-rw-r--r-- | drivers/target/target_core_device.c | 59 | ||||
-rw-r--r-- | drivers/target/target_core_iblock.c | 1 | ||||
-rw-r--r-- | drivers/target/target_core_pr.c | 1 | ||||
-rw-r--r-- | drivers/target/target_core_sbc.c | 139 | ||||
-rw-r--r-- | drivers/target/target_core_tmr.c | 166 | ||||
-rw-r--r-- | drivers/target/target_core_tpg.c | 2 | ||||
-rw-r--r-- | drivers/target/target_core_transport.c | 170 | ||||
-rw-r--r-- | drivers/target/target_core_user.c | 164 | ||||
-rw-r--r-- | drivers/target/tcm_fc/tfc_cmd.c | 3 | ||||
-rw-r--r-- | drivers/target/tcm_fc/tfc_io.c | 1 | ||||
-rw-r--r-- | drivers/target/tcm_fc/tfc_sess.c | 2 |
12 files changed, 365 insertions, 357 deletions
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 16d5a4e117a2..badba437e5f9 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -46,6 +46,15 @@ static int tcm_loop_hba_no_cnt; static int tcm_loop_queue_status(struct se_cmd *se_cmd); +static unsigned int tcm_loop_nr_hw_queues = 1; +module_param_named(nr_hw_queues, tcm_loop_nr_hw_queues, uint, 0644); + +static unsigned int tcm_loop_can_queue = 1024; +module_param_named(can_queue, tcm_loop_can_queue, uint, 0644); + +static unsigned int tcm_loop_cmd_per_lun = 1024; +module_param_named(cmd_per_lun, tcm_loop_cmd_per_lun, uint, 0644); + /* * Called from struct target_core_fabric_ops->check_stop_free() */ @@ -305,10 +314,8 @@ static struct scsi_host_template tcm_loop_driver_template = { .eh_abort_handler = tcm_loop_abort_task, .eh_device_reset_handler = tcm_loop_device_reset, .eh_target_reset_handler = tcm_loop_target_reset, - .can_queue = 1024, .this_id = -1, .sg_tablesize = 256, - .cmd_per_lun = 1024, .max_sectors = 0xFFFF, .dma_boundary = PAGE_SIZE - 1, .module = THIS_MODULE, @@ -342,6 +349,9 @@ static int tcm_loop_driver_probe(struct device *dev) sh->max_lun = 0; sh->max_channel = 0; sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE; + sh->nr_hw_queues = tcm_loop_nr_hw_queues; + sh->can_queue = tcm_loop_can_queue; + sh->cmd_per_lun = tcm_loop_cmd_per_lun; host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 405d82d44717..7787c527aad3 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -65,6 +65,16 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd) atomic_long_add(se_cmd->data_length, &deve->read_bytes); + if ((se_cmd->data_direction == DMA_TO_DEVICE) && + deve->lun_access_ro) { + pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" + " Access for 0x%08llx\n", + se_cmd->se_tfo->fabric_name, + se_cmd->orig_fe_lun); + rcu_read_unlock(); + return TCM_WRITE_PROTECTED; + } + se_lun = rcu_dereference(deve->se_lun); if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { @@ -76,17 +86,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd) se_cmd->pr_res_key = deve->pr_res_key; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; se_cmd->lun_ref_active = true; - - if ((se_cmd->data_direction == DMA_TO_DEVICE) && - deve->lun_access_ro) { - pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" - " Access for 0x%08llx\n", - se_cmd->se_tfo->fabric_name, - se_cmd->orig_fe_lun); - rcu_read_unlock(); - ret = TCM_WRITE_PROTECTED; - goto ref_dev; - } } out_unlock: rcu_read_unlock(); @@ -106,21 +105,20 @@ out_unlock: return TCM_NON_EXISTENT_LUN; } - se_lun = se_sess->se_tpg->tpg_virt_lun0; - se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; - se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; - - percpu_ref_get(&se_lun->lun_ref); - se_cmd->lun_ref_active = true; - /* * Force WRITE PROTECT for virtual LUN 0 */ if ((se_cmd->data_direction != DMA_FROM_DEVICE) && - (se_cmd->data_direction != DMA_NONE)) { - ret = TCM_WRITE_PROTECTED; - goto ref_dev; - } + (se_cmd->data_direction != DMA_NONE)) + return TCM_WRITE_PROTECTED; + + se_lun = se_sess->se_tpg->tpg_virt_lun0; + if (!percpu_ref_tryget_live(&se_lun->lun_ref)) + return TCM_NON_EXISTENT_LUN; + + se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; + se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; + se_cmd->lun_ref_active = true; } /* * RCU reference protected by percpu se_lun->lun_ref taken above that @@ -128,7 +126,6 @@ out_unlock: * pointer can be kfree_rcu() by the final se_lun->lun_group put via * target_core_fabric_configfs.c:target_fabric_port_release */ -ref_dev: se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); atomic_long_inc(&se_cmd->se_dev->num_cmds); @@ -724,11 +721,24 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) { struct se_device *dev; struct se_lun *xcopy_lun; + int i; dev = hba->backend->ops->alloc_device(hba, name); if (!dev) return NULL; + dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL); + if (!dev->queues) { + dev->transport->free_device(dev); + return NULL; + } + + dev->queue_cnt = nr_cpu_ids; + for (i = 0; i < dev->queue_cnt; i++) { + INIT_LIST_HEAD(&dev->queues[i].state_list); + spin_lock_init(&dev->queues[i].lock); + } + dev->se_hba = hba; dev->transport = hba->backend->ops; dev->transport_flags = dev->transport->transport_flags_default; @@ -738,9 +748,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) INIT_LIST_HEAD(&dev->dev_sep_list); INIT_LIST_HEAD(&dev->dev_tmr_list); INIT_LIST_HEAD(&dev->delayed_cmd_list); - INIT_LIST_HEAD(&dev->state_list); INIT_LIST_HEAD(&dev->qf_cmd_list); - spin_lock_init(&dev->execute_task_lock); spin_lock_init(&dev->delayed_cmd_lock); spin_lock_init(&dev->dev_reservation_lock); spin_lock_init(&dev->se_port_lock); @@ -1013,6 +1021,7 @@ void target_free_device(struct se_device *dev) if (dev->transport->free_prot) dev->transport->free_prot(dev); + kfree(dev->queues); dev->transport->free_device(dev); } diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index f2bd2e207e0b..8ed93fd205c7 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -211,6 +211,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( break; case 512: blocks_long <<= 3; + break; default: break; } diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 5f79ea05f9b8..14db5e568f22 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -337,6 +337,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type, switch (pr_reg_type) { case PR_TYPE_WRITE_EXCLUSIVE: we = 1; + fallthrough; case PR_TYPE_EXCLUSIVE_ACCESS: /* * Some commands are only allowed for the persistent reservation diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 6e8b8d30938f..f7c527a826fd 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -434,20 +434,81 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, return ret; } +/* + * compare @cmp_len bytes of @read_sgl with @cmp_sgl. On miscompare, fill + * @miscmp_off and return TCM_MISCOMPARE_VERIFY. + */ +static sense_reason_t +compare_and_write_do_cmp(struct scatterlist *read_sgl, unsigned int read_nents, + struct scatterlist *cmp_sgl, unsigned int cmp_nents, + unsigned int cmp_len, unsigned int *miscmp_off) +{ + unsigned char *buf = NULL; + struct scatterlist *sg; + sense_reason_t ret; + unsigned int offset; + size_t rc; + int i; + + buf = kzalloc(cmp_len, GFP_KERNEL); + if (!buf) { + ret = TCM_OUT_OF_RESOURCES; + goto out; + } + + rc = sg_copy_to_buffer(cmp_sgl, cmp_nents, buf, cmp_len); + if (!rc) { + pr_err("sg_copy_to_buffer() failed for compare_and_write\n"); + ret = TCM_OUT_OF_RESOURCES; + goto out; + } + /* + * Compare SCSI READ payload against verify payload + */ + offset = 0; + ret = TCM_NO_SENSE; + for_each_sg(read_sgl, sg, read_nents, i) { + unsigned int len = min(sg->length, cmp_len); + unsigned char *addr = kmap_atomic(sg_page(sg)); + + if (memcmp(addr, buf + offset, len)) { + unsigned int i; + + for (i = 0; i < len && addr[i] == buf[offset + i]; i++) + ; + *miscmp_off = offset + i; + pr_warn("Detected MISCOMPARE at offset %u\n", + *miscmp_off); + ret = TCM_MISCOMPARE_VERIFY; + } + kunmap_atomic(addr); + if (ret != TCM_NO_SENSE) + goto out; + + offset += len; + cmp_len -= len; + if (!cmp_len) + break; + } + pr_debug("COMPARE AND WRITE read data matches compare data\n"); +out: + kfree(buf); + return ret; +} + static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success, int *post_ret) { struct se_device *dev = cmd->se_dev; struct sg_table write_tbl = { }; - struct scatterlist *write_sg, *sg; - unsigned char *buf = NULL, *addr; + struct scatterlist *write_sg; struct sg_mapping_iter m; - unsigned int offset = 0, len; - unsigned int nlbas = cmd->t_task_nolb; + unsigned int len; unsigned int block_size = dev->dev_attrib.block_size; - unsigned int compare_len = (nlbas * block_size); + unsigned int compare_len = (cmd->t_task_nolb * block_size); + unsigned int miscmp_off = 0; sense_reason_t ret = TCM_NO_SENSE; - int rc, i; + int i; /* * Handle early failure in transport_generic_request_failure(), @@ -473,12 +534,23 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes goto out; } - buf = kzalloc(cmd->data_length, GFP_KERNEL); - if (!buf) { - pr_err("Unable to allocate compare_and_write buf\n"); - ret = TCM_OUT_OF_RESOURCES; + ret = compare_and_write_do_cmp(cmd->t_bidi_data_sg, + cmd->t_bidi_data_nents, + cmd->t_data_sg, + cmd->t_data_nents, + compare_len, + &miscmp_off); + if (ret == TCM_MISCOMPARE_VERIFY) { + /* + * SBC-4 r15: 5.3 COMPARE AND WRITE command + * In the sense data (see 4.18 and SPC-5) the offset from the + * start of the Data-Out Buffer to the first byte of data that + * was not equal shall be reported in the INFORMATION field. + */ + cmd->sense_info = miscmp_off; + goto out; + } else if (ret) goto out; - } if (sg_alloc_table(&write_tbl, cmd->t_data_nents, GFP_KERNEL) < 0) { pr_err("Unable to allocate compare_and_write sg\n"); @@ -486,44 +558,9 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes goto out; } write_sg = write_tbl.sgl; - /* - * Setup verify and write data payloads from total NumberLBAs. - */ - rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf, - cmd->data_length); - if (!rc) { - pr_err("sg_copy_to_buffer() failed for compare_and_write\n"); - ret = TCM_OUT_OF_RESOURCES; - goto out; - } - /* - * Compare against SCSI READ payload against verify payload - */ - for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) { - addr = (unsigned char *)kmap_atomic(sg_page(sg)); - if (!addr) { - ret = TCM_OUT_OF_RESOURCES; - goto out; - } - - len = min(sg->length, compare_len); - - if (memcmp(addr, buf + offset, len)) { - pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n", - addr, buf + offset); - kunmap_atomic(addr); - goto miscompare; - } - kunmap_atomic(addr); - - offset += len; - compare_len -= len; - if (!compare_len) - break; - } i = 0; - len = cmd->t_task_nolb * block_size; + len = compare_len; sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG); /* * Currently assumes NoLB=1 and SGLs are PAGE_SIZE.. @@ -568,13 +605,8 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes __target_execute_cmd(cmd, false); - kfree(buf); return ret; -miscompare: - pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n", - dev->transport->name); - ret = TCM_MISCOMPARE_VERIFY; out: /* * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in @@ -582,7 +614,6 @@ out: */ up(&dev->caw_sem); sg_free_table(&write_tbl); - kfree(buf); return ret; } @@ -1439,7 +1470,7 @@ sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors, if (rc) { kunmap_atomic(daddr - dsg->offset); kunmap_atomic(paddr - psg->offset); - cmd->bad_sector = sector; + cmd->sense_info = sector; return rc; } next: diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index e4513ef09159..7347285471fa 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -121,57 +121,61 @@ void core_tmr_abort_task( unsigned long flags; bool rc; u64 ref_tag; - - spin_lock_irqsave(&dev->execute_task_lock, flags); - list_for_each_entry_safe(se_cmd, next, &dev->state_list, state_list) { - - if (se_sess != se_cmd->se_sess) - continue; - - /* skip task management functions, including tmr->task_cmd */ - if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) - continue; - - ref_tag = se_cmd->tag; - if (tmr->ref_task_tag != ref_tag) - continue; - - printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", - se_cmd->se_tfo->fabric_name, ref_tag); - - spin_lock(&se_sess->sess_cmd_lock); - rc = __target_check_io_state(se_cmd, se_sess, 0); - spin_unlock(&se_sess->sess_cmd_lock); - if (!rc) - continue; - - list_move_tail(&se_cmd->state_list, &aborted_list); - se_cmd->state_active = false; - - spin_unlock_irqrestore(&dev->execute_task_lock, flags); - - /* - * Ensure that this ABORT request is visible to the LU RESET - * code. - */ - if (!tmr->tmr_dev) - WARN_ON_ONCE(transport_lookup_tmr_lun(tmr->task_cmd) < - 0); - - if (dev->transport->tmr_notify) - dev->transport->tmr_notify(dev, TMR_ABORT_TASK, - &aborted_list); - - list_del_init(&se_cmd->state_list); - target_put_cmd_and_wait(se_cmd); - - printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" - " ref_tag: %llu\n", ref_tag); - tmr->response = TMR_FUNCTION_COMPLETE; - atomic_long_inc(&dev->aborts_complete); - return; + int i; + + for (i = 0; i < dev->queue_cnt; i++) { + spin_lock_irqsave(&dev->queues[i].lock, flags); + list_for_each_entry_safe(se_cmd, next, &dev->queues[i].state_list, + state_list) { + if (se_sess != se_cmd->se_sess) + continue; + + /* + * skip task management functions, including + * tmr->task_cmd + */ + if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) + continue; + + ref_tag = se_cmd->tag; + if (tmr->ref_task_tag != ref_tag) + continue; + + pr_err("ABORT_TASK: Found referenced %s task_tag: %llu\n", + se_cmd->se_tfo->fabric_name, ref_tag); + + spin_lock(&se_sess->sess_cmd_lock); + rc = __target_check_io_state(se_cmd, se_sess, 0); + spin_unlock(&se_sess->sess_cmd_lock); + if (!rc) + continue; + + list_move_tail(&se_cmd->state_list, &aborted_list); + se_cmd->state_active = false; + spin_unlock_irqrestore(&dev->queues[i].lock, flags); + + /* + * Ensure that this ABORT request is visible to the LU + * RESET code. + */ + if (!tmr->tmr_dev) + WARN_ON_ONCE(transport_lookup_tmr_lun(tmr->task_cmd) < 0); + + if (dev->transport->tmr_notify) + dev->transport->tmr_notify(dev, TMR_ABORT_TASK, + &aborted_list); + + list_del_init(&se_cmd->state_list); + target_put_cmd_and_wait(se_cmd); + + pr_err("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for ref_tag: %llu\n", + ref_tag); + tmr->response = TMR_FUNCTION_COMPLETE; + atomic_long_inc(&dev->aborts_complete); + return; + } + spin_unlock_irqrestore(&dev->queues[i].lock, flags); } - spin_unlock_irqrestore(&dev->execute_task_lock, flags); if (dev->transport->tmr_notify) dev->transport->tmr_notify(dev, TMR_ABORT_TASK, &aborted_list); @@ -273,7 +277,7 @@ static void core_tmr_drain_state_list( struct se_session *sess; struct se_cmd *cmd, *next; unsigned long flags; - int rc; + int rc, i; /* * Complete outstanding commands with TASK_ABORTED SAM status. @@ -297,35 +301,39 @@ static void core_tmr_drain_state_list( * Note that this seems to be independent of TAS (Task Aborted Status) * in the Control Mode Page. */ - spin_lock_irqsave(&dev->execute_task_lock, flags); - list_for_each_entry_safe(cmd, next, &dev->state_list, state_list) { - /* - * For PREEMPT_AND_ABORT usage, only process commands - * with a matching reservation key. - */ - if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) - continue; - - /* - * Not aborting PROUT PREEMPT_AND_ABORT CDB.. - */ - if (prout_cmd == cmd) - continue; - - sess = cmd->se_sess; - if (WARN_ON_ONCE(!sess)) - continue; - - spin_lock(&sess->sess_cmd_lock); - rc = __target_check_io_state(cmd, tmr_sess, tas); - spin_unlock(&sess->sess_cmd_lock); - if (!rc) - continue; - - list_move_tail(&cmd->state_list, &drain_task_list); - cmd->state_active = false; + for (i = 0; i < dev->queue_cnt; i++) { + spin_lock_irqsave(&dev->queues[i].lock, flags); + list_for_each_entry_safe(cmd, next, &dev->queues[i].state_list, + state_list) { + /* + * For PREEMPT_AND_ABORT usage, only process commands + * with a matching reservation key. + */ + if (target_check_cdb_and_preempt(preempt_and_abort_list, + cmd)) + continue; + + /* + * Not aborting PROUT PREEMPT_AND_ABORT CDB.. + */ + if (prout_cmd == cmd) + continue; + + sess = cmd->se_sess; + if (WARN_ON_ONCE(!sess)) + continue; + + spin_lock(&sess->sess_cmd_lock); + rc = __target_check_io_state(cmd, tmr_sess, tas); + spin_unlock(&sess->sess_cmd_lock); + if (!rc) + continue; + + list_move_tail(&cmd->state_list, &drain_task_list); + cmd->state_active = false; + } + spin_unlock_irqrestore(&dev->queues[i].lock, flags); } - spin_unlock_irqrestore(&dev->execute_task_lock, flags); if (dev->transport->tmr_notify) dev->transport->tmr_notify(dev, preempt_and_abort_list ? diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 62aa5fa63ac0..736847c933e5 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -328,7 +328,7 @@ static void target_shutdown_sessions(struct se_node_acl *acl) restart: spin_lock_irqsave(&acl->nacl_sess_lock, flags); list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) { - if (sess->sess_tearing_down) + if (atomic_read(&sess->stopped)) continue; list_del_init(&sess->sess_acl_list); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index ff26ab0a5f60..fca4bd079d02 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -215,7 +215,7 @@ static void target_release_sess_cmd_refcnt(struct percpu_ref *ref) { struct se_session *sess = container_of(ref, typeof(*sess), cmd_count); - wake_up(&sess->cmd_list_wq); + wake_up(&sess->cmd_count_wq); } /** @@ -228,9 +228,10 @@ int transport_init_session(struct se_session *se_sess) { INIT_LIST_HEAD(&se_sess->sess_list); INIT_LIST_HEAD(&se_sess->sess_acl_list); - INIT_LIST_HEAD(&se_sess->sess_cmd_list); spin_lock_init(&se_sess->sess_cmd_lock); - init_waitqueue_head(&se_sess->cmd_list_wq); + init_waitqueue_head(&se_sess->cmd_count_wq); + init_completion(&se_sess->stop_done); + atomic_set(&se_sess->stopped, 0); return percpu_ref_init(&se_sess->cmd_count, target_release_sess_cmd_refcnt, 0, GFP_KERNEL); } @@ -238,6 +239,14 @@ EXPORT_SYMBOL(transport_init_session); void transport_uninit_session(struct se_session *se_sess) { + /* + * Drivers like iscsi and loop do not call target_stop_session + * during session shutdown so we have to drop the ref taken at init + * time here. + */ + if (!atomic_read(&se_sess->stopped)) + percpu_ref_put(&se_sess->cmd_count); + percpu_ref_exit(&se_sess->cmd_count); } @@ -650,12 +659,12 @@ static void target_remove_from_state_list(struct se_cmd *cmd) if (!dev) return; - spin_lock_irqsave(&dev->execute_task_lock, flags); + spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags); if (cmd->state_active) { list_del(&cmd->state_list); cmd->state_active = false; } - spin_unlock_irqrestore(&dev->execute_task_lock, flags); + spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags); } /* @@ -866,10 +875,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) INIT_WORK(&cmd->work, success ? target_complete_ok_work : target_complete_failure_work); - if (cmd->se_cmd_flags & SCF_USE_CPUID) - queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); - else - queue_work(target_completion_wq, &cmd->work); + queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); } EXPORT_SYMBOL(target_complete_cmd); @@ -897,12 +903,13 @@ static void target_add_to_state_list(struct se_cmd *cmd) struct se_device *dev = cmd->se_dev; unsigned long flags; - spin_lock_irqsave(&dev->execute_task_lock, flags); + spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags); if (!cmd->state_active) { - list_add_tail(&cmd->state_list, &dev->state_list); + list_add_tail(&cmd->state_list, + &dev->queues[cmd->cpuid].state_list); cmd->state_active = true; } - spin_unlock_irqrestore(&dev->execute_task_lock, flags); + spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags); } /* @@ -1390,6 +1397,9 @@ void transport_init_se_cmd( cmd->sense_buffer = sense_buffer; cmd->orig_fe_lun = unpacked_lun; + if (!(cmd->se_cmd_flags & SCF_USE_CPUID)) + cmd->cpuid = smp_processor_id(); + cmd->state_active = false; } EXPORT_SYMBOL(transport_init_se_cmd); @@ -1607,6 +1617,9 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess BUG_ON(!se_tpg); BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); BUG_ON(in_interrupt()); + + if (flags & TARGET_SCF_USE_CPUID) + se_cmd->se_cmd_flags |= SCF_USE_CPUID; /* * Initialize se_cmd for target operation. From this point * exceptions are handled by sending exception status via @@ -1616,17 +1629,11 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess data_length, data_dir, task_attr, sense, unpacked_lun); - if (flags & TARGET_SCF_USE_CPUID) - se_cmd->se_cmd_flags |= SCF_USE_CPUID; - else - se_cmd->cpuid = WORK_CPU_UNBOUND; - if (flags & TARGET_SCF_UNKNOWN_SIZE) se_cmd->unknown_data_length = 1; /* - * Obtain struct se_cmd->cmd_kref reference and add new cmd to - * se_sess->sess_cmd_list. A second kref_get here is necessary - * for fabrics using TARGET_SCF_ACK_KREF that expect a second + * Obtain struct se_cmd->cmd_kref reference. A second kref_get here is + * necessary for fabrics using TARGET_SCF_ACK_KREF that expect a second * kref_put() to happen during fabric packet acknowledgement. */ ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); @@ -1764,29 +1771,6 @@ static void target_complete_tmr_failure(struct work_struct *work) transport_cmd_check_stop_to_fabric(se_cmd); } -static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag, - u64 *unpacked_lun) -{ - struct se_cmd *se_cmd; - unsigned long flags; - bool ret = false; - - spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); - list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { - if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) - continue; - - if (se_cmd->tag == tag) { - *unpacked_lun = se_cmd->orig_fe_lun; - ret = true; - break; - } - } - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - - return ret; -} - /** * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd * for TMR CDBs @@ -1834,16 +1818,6 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, core_tmr_release_req(se_cmd->se_tmr_req); return ret; } - /* - * If this is ABORT_TASK with no explicit fabric provided LUN, - * go ahead and search active session tags for a match to figure - * out unpacked_lun for the original se_cmd. - */ - if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) { - if (!target_lookup_lun_from_tag(se_sess, tag, - &se_cmd->orig_fe_lun)) - goto failure; - } ret = transport_lookup_tmr_lun(se_cmd); if (ret) @@ -2788,14 +2762,13 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) EXPORT_SYMBOL(transport_generic_free_cmd); /** - * target_get_sess_cmd - Add command to active ->sess_cmd_list + * target_get_sess_cmd - Verify the session is accepting cmds and take ref * @se_cmd: command descriptor to add * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() */ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) { struct se_session *se_sess = se_cmd->se_sess; - unsigned long flags; int ret = 0; /* @@ -2810,15 +2783,8 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) se_cmd->se_cmd_flags |= SCF_ACK_KREF; } - spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); - if (se_sess->sess_tearing_down) { + if (!percpu_ref_tryget_live(&se_sess->cmd_count)) ret = -ESHUTDOWN; - goto out; - } - list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); - percpu_ref_get(&se_sess->cmd_count); -out: - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); if (ret && ack_kref) target_put_sess_cmd(se_cmd); @@ -2843,13 +2809,6 @@ static void target_release_cmd_kref(struct kref *kref) struct se_session *se_sess = se_cmd->se_sess; struct completion *free_compl = se_cmd->free_compl; struct completion *abrt_compl = se_cmd->abrt_compl; - unsigned long flags; - - if (se_sess) { - spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); - list_del_init(&se_cmd->se_cmd_list); - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - } target_free_cmd_mem(se_cmd); se_cmd->se_tfo->release_cmd(se_cmd); @@ -2977,21 +2936,25 @@ void target_show_cmd(const char *pfx, struct se_cmd *cmd) } EXPORT_SYMBOL(target_show_cmd); +static void target_stop_session_confirm(struct percpu_ref *ref) +{ + struct se_session *se_sess = container_of(ref, struct se_session, + cmd_count); + complete_all(&se_sess->stop_done); +} + /** - * target_sess_cmd_list_set_waiting - Set sess_tearing_down so no new commands are queued. - * @se_sess: session to flag + * target_stop_session - Stop new IO from being queued on the session. + * @se_sess: session to stop */ -void target_sess_cmd_list_set_waiting(struct se_session *se_sess) +void target_stop_session(struct se_session *se_sess) { - unsigned long flags; - - spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); - se_sess->sess_tearing_down = 1; - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - - percpu_ref_kill(&se_sess->cmd_count); + pr_debug("Stopping session queue.\n"); + if (atomic_cmpxchg(&se_sess->stopped, 0, 1) == 0) + percpu_ref_kill_and_confirm(&se_sess->cmd_count, + target_stop_session_confirm); } -EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); +EXPORT_SYMBOL(target_stop_session); /** * target_wait_for_sess_cmds - Wait for outstanding commands @@ -2999,19 +2962,19 @@ EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); */ void target_wait_for_sess_cmds(struct se_session *se_sess) { - struct se_cmd *cmd; int ret; - WARN_ON_ONCE(!se_sess->sess_tearing_down); + WARN_ON_ONCE(!atomic_read(&se_sess->stopped)); do { - ret = wait_event_timeout(se_sess->cmd_list_wq, + pr_debug("Waiting for running cmds to complete.\n"); + ret = wait_event_timeout(se_sess->cmd_count_wq, percpu_ref_is_zero(&se_sess->cmd_count), 180 * HZ); - list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list) - target_show_cmd("session shutdown: still waiting for ", - cmd); } while (ret <= 0); + + wait_for_completion(&se_sess->stop_done); + pr_debug("Waiting for cmds done.\n"); } EXPORT_SYMBOL(target_wait_for_sess_cmds); @@ -3094,14 +3057,14 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) } EXPORT_SYMBOL(transport_wait_for_tasks); -struct sense_info { +struct sense_detail { u8 key; u8 asc; u8 ascq; - bool add_sector_info; + bool add_sense_info; }; -static const struct sense_info sense_info_table[] = { +static const struct sense_detail sense_detail_table[] = { [TCM_NO_SENSE] = { .key = NOT_READY }, @@ -3196,24 +3159,25 @@ static const struct sense_info sense_info_table[] = { .key = MISCOMPARE, .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */ .ascq = 0x00, + .add_sense_info = true, }, [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = { .key = ABORTED_COMMAND, .asc = 0x10, .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */ - .add_sector_info = true, + .add_sense_info = true, }, [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = { .key = ABORTED_COMMAND, .asc = 0x10, .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ - .add_sector_info = true, + .add_sense_info = true, }, [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = { .key = ABORTED_COMMAND, .asc = 0x10, .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ - .add_sector_info = true, + .add_sense_info = true, }, [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = { .key = COPY_ABORTED, @@ -3261,42 +3225,42 @@ static const struct sense_info sense_info_table[] = { */ static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) { - const struct sense_info *si; + const struct sense_detail *sd; u8 *buffer = cmd->sense_buffer; int r = (__force int)reason; u8 key, asc, ascq; bool desc_format = target_sense_desc_format(cmd->se_dev); - if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key) - si = &sense_info_table[r]; + if (r < ARRAY_SIZE(sense_detail_table) && sense_detail_table[r].key) + sd = &sense_detail_table[r]; else - si = &sense_info_table[(__force int) + sd = &sense_detail_table[(__force int) TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; - key = si->key; + key = sd->key; if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc, &ascq)) { cmd->scsi_status = SAM_STAT_BUSY; return; } - } else if (si->asc == 0) { + } else if (sd->asc == 0) { WARN_ON_ONCE(cmd->scsi_asc == 0); asc = cmd->scsi_asc; ascq = cmd->scsi_ascq; } else { - asc = si->asc; - ascq = si->ascq; + asc = sd->asc; + ascq = sd->ascq; } cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; cmd->scsi_status = SAM_STAT_CHECK_CONDITION; cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq); - if (si->add_sector_info) + if (sd->add_sense_info) WARN_ON_ONCE(scsi_set_sense_information(buffer, cmd->scsi_sense_length, - cmd->bad_sector) < 0); + cmd->sense_info) < 0); } int diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 590e6d072228..6b171fff007b 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -586,14 +586,15 @@ static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd) } static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd, - struct iovec **iov, int prev_dbi, int *remain) + struct iovec **iov, int prev_dbi, int len) { /* Get the next dbi */ int dbi = tcmu_cmd_get_dbi(cmd); + /* Do not add more than DATA_BLOCK_SIZE to iov */ - int len = min_t(int, DATA_BLOCK_SIZE, *remain); + if (len > DATA_BLOCK_SIZE) + len = DATA_BLOCK_SIZE; - *remain -= len; /* * The following code will gather and map the blocks to the same iovec * when the blocks are all next to each other. @@ -618,8 +619,8 @@ static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd, int dbi = -2; /* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */ - while (data_length > 0) - dbi = new_block_to_iov(udev, cmd, iov, dbi, &data_length); + for (; data_length > 0; data_length -= DATA_BLOCK_SIZE) + dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length); } static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) @@ -688,67 +689,83 @@ static inline size_t head_to_end(size_t head, size_t size) #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) -static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd, - struct iovec **iov) +#define TCMU_SG_TO_DATA_AREA 1 +#define TCMU_DATA_AREA_TO_SG 2 + +static inline void tcmu_copy_data(struct tcmu_dev *udev, + struct tcmu_cmd *tcmu_cmd, uint32_t direction, + struct scatterlist *sg, unsigned int sg_nents, + struct iovec **iov, size_t data_len) { - struct se_cmd *se_cmd = tcmu_cmd->se_cmd; /* start value of dbi + 1 must not be a valid dbi */ - int i, dbi = -2; - int block_remaining = 0; - int data_len = se_cmd->data_length; - void *from, *to = NULL; - size_t copy_bytes, offset; - struct scatterlist *sg; - struct page *page = NULL; - - for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) { - int sg_remaining = sg->length; - from = kmap_atomic(sg_page(sg)) + sg->offset; - while (sg_remaining > 0) { - if (block_remaining == 0) { - if (to) { - flush_dcache_page(page); - kunmap_atomic(to); - } - - /* get next dbi and add to IOVs */ - dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi, - &data_len); - page = tcmu_get_block_page(udev, dbi); - to = kmap_atomic(page); - block_remaining = DATA_BLOCK_SIZE; - } + int dbi = -2; + size_t block_remaining, cp_len; + struct sg_mapping_iter sg_iter; + unsigned int sg_flags; + struct page *page; + void *data_page_start, *data_addr; - copy_bytes = min_t(size_t, sg_remaining, - block_remaining); - offset = DATA_BLOCK_SIZE - block_remaining; - memcpy(to + offset, from + sg->length - sg_remaining, - copy_bytes); + if (direction == TCMU_SG_TO_DATA_AREA) + sg_flags = SG_MITER_ATOMIC | SG_MITER_FROM_SG; + else + sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; + sg_miter_start(&sg_iter, sg, sg_nents, sg_flags); - sg_remaining -= copy_bytes; - block_remaining -= copy_bytes; + while (data_len) { + if (direction == TCMU_SG_TO_DATA_AREA) + dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi, + data_len); + else + dbi = tcmu_cmd_get_dbi(tcmu_cmd); + page = tcmu_get_block_page(udev, dbi); + if (direction == TCMU_DATA_AREA_TO_SG) + flush_dcache_page(page); + data_page_start = kmap_atomic(page); + block_remaining = DATA_BLOCK_SIZE; + + while (block_remaining && data_len) { + if (!sg_miter_next(&sg_iter)) { + /* set length to 0 to abort outer loop */ + data_len = 0; + pr_debug("tcmu_move_data: aborting data copy due to exhausted sg_list\n"); + break; + } + cp_len = min3(sg_iter.length, block_remaining, data_len); + + data_addr = data_page_start + + DATA_BLOCK_SIZE - block_remaining; + if (direction == TCMU_SG_TO_DATA_AREA) + memcpy(data_addr, sg_iter.addr, cp_len); + else + memcpy(sg_iter.addr, data_addr, cp_len); + + data_len -= cp_len; + block_remaining -= cp_len; + sg_iter.consumed = cp_len; } - kunmap_atomic(from - sg->offset); - } + sg_miter_stop(&sg_iter); - if (to) { - flush_dcache_page(page); - kunmap_atomic(to); + kunmap_atomic(data_page_start); + if (direction == TCMU_SG_TO_DATA_AREA) + flush_dcache_page(page); } } -static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, +static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd, + struct iovec **iov) +{ + struct se_cmd *se_cmd = tcmu_cmd->se_cmd; + + tcmu_copy_data(udev, tcmu_cmd, TCMU_SG_TO_DATA_AREA, se_cmd->t_data_sg, + se_cmd->t_data_nents, iov, se_cmd->data_length); +} + +static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd, bool bidi, uint32_t read_len) { - struct se_cmd *se_cmd = cmd->se_cmd; - int i, dbi; - int block_remaining = 0; - void *from = NULL, *to; - size_t copy_bytes, offset; - struct scatterlist *sg, *data_sg; - struct page *page; + struct se_cmd *se_cmd = tcmu_cmd->se_cmd; + struct scatterlist *data_sg; unsigned int data_nents; - uint32_t count = 0; if (!bidi) { data_sg = se_cmd->t_data_sg; @@ -759,46 +776,15 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, * buffer blocks, and before gathering the Data-In buffer * the Data-Out buffer blocks should be skipped. */ - count = cmd->dbi_cnt - cmd->dbi_bidi_cnt; + tcmu_cmd_set_dbi_cur(tcmu_cmd, + tcmu_cmd->dbi_cnt - tcmu_cmd->dbi_bidi_cnt); data_sg = se_cmd->t_bidi_data_sg; data_nents = se_cmd->t_bidi_data_nents; } - tcmu_cmd_set_dbi_cur(cmd, count); - - for_each_sg(data_sg, sg, data_nents, i) { - int sg_remaining = sg->length; - to = kmap_atomic(sg_page(sg)) + sg->offset; - while (sg_remaining > 0 && read_len > 0) { - if (block_remaining == 0) { - if (from) - kunmap_atomic(from); - - block_remaining = DATA_BLOCK_SIZE; - dbi = tcmu_cmd_get_dbi(cmd); - page = tcmu_get_block_page(udev, dbi); - from = kmap_atomic(page); - flush_dcache_page(page); - } - copy_bytes = min_t(size_t, sg_remaining, - block_remaining); - if (read_len < copy_bytes) - copy_bytes = read_len; - offset = DATA_BLOCK_SIZE - block_remaining; - memcpy(to + sg->length - sg_remaining, from + offset, - copy_bytes); - - sg_remaining -= copy_bytes; - block_remaining -= copy_bytes; - read_len -= copy_bytes; - } - kunmap_atomic(to - sg->offset); - if (read_len == 0) - break; - } - if (from) - kunmap_atomic(from); + tcmu_copy_data(udev, tcmu_cmd, TCMU_DATA_AREA_TO_SG, data_sg, + data_nents, NULL, read_len); } static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh) diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index a7ed56602c6c..768f250680d9 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -19,7 +19,6 @@ #include <asm/unaligned.h> #include <scsi/scsi_tcq.h> #include <scsi/libfc.h> -#include <scsi/fc_encode.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> @@ -551,7 +550,7 @@ static void ft_send_work(struct work_struct *work) if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb, &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), ntohl(fcp->fc_dl), task_attr, data_dir, - TARGET_SCF_ACK_KREF | TARGET_SCF_USE_CPUID)) + TARGET_SCF_ACK_KREF)) goto err; pr_debug("r_ctl %x target_submit_cmd %p\n", fh->fh_r_ctl, cmd); diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 6a38ff936389..bbe2e29612fa 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -28,7 +28,6 @@ #include <linux/ratelimit.h> #include <asm/unaligned.h> #include <scsi/libfc.h> -#include <scsi/fc_encode.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index 4fd6a1de947c..23ce506d5402 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -275,7 +275,7 @@ static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id) static void ft_close_sess(struct ft_sess *sess) { - target_sess_cmd_list_set_waiting(sess->se_sess); + target_stop_session(sess->se_sess); target_wait_for_sess_cmds(sess->se_sess); ft_sess_put(sess); } |