diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-25 11:17:39 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-25 11:17:39 +0200 |
commit | 7c1953ddb609f1c161bf4a11a5e4e4577e82e557 (patch) | |
tree | 283244582f4fafd15a2ddf52971e0e5ff048af47 /drivers/target | |
parent | Merge branch 'for-linus' of http://ftp.arm.linux.org.uk/pub/linux/arm/kernel/... (diff) | |
parent | target: Fix compile warning w/ missing module.h include (diff) | |
download | linux-7c1953ddb609f1c161bf4a11a5e4e4577e82e557.tar.xz linux-7c1953ddb609f1c161bf4a11a5e4e4577e82e557.zip |
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (62 commits)
target: Fix compile warning w/ missing module.h include
target: Remove legacy se_task->task_timer and associated logic
target: Fix incorrect transport_sent usage
target: re-use the command S/G list for single-task commands
target: Fix BIDI t_task_cdb handling in transport_generic_new_cmd
target: remove transport_allocate_tasks
target: merge transport_new_cmd_obj into transport_generic_new_cmd
target: remove the task_sg_bidi field se_task and pSCSI BIDI support
target: transport_subsystem_check_init cleanups
target: use a workqueue for I/O completions
target: remove unused TRANSPORT_ states
target: remove TRANSPORT_DEFERRED_CMD state
target: remove the TRANSPORT_REMOVE state
target: move depth_left manipulation out of transport_generic_request_failure
target: stop task timers earlier
target: remove TF_TIMER_STOP
target: factor some duplicate code for stopping a task
target: fix list walking in transport_free_dev_tasks
target: use transport_cmd_check_stop_to_fabric consistently
target: do not pass the queue object to transport_remove_cmd_from_queue
...
Diffstat (limited to 'drivers/target')
33 files changed, 901 insertions, 1956 deletions
diff --git a/drivers/target/Makefile b/drivers/target/Makefile index 1060c7b7f803..62e54053bcd8 100644 --- a/drivers/target/Makefile +++ b/drivers/target/Makefile @@ -6,7 +6,6 @@ target_core_mod-y := target_core_configfs.o \ target_core_hba.o \ target_core_pr.o \ target_core_alua.o \ - target_core_scdb.o \ target_core_tmr.o \ target_core_tpg.o \ target_core_transport.o \ diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 6a4ea29c2f36..4d01768fcd90 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -765,7 +765,7 @@ static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 : cmd->se_cmd.t_data_nents; - iov_count += TRANSPORT_IOV_DATA_BUFFER; + iov_count += ISCSI_IOV_DATA_BUFFER; cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL); if (!cmd->iov_data) { @@ -3538,16 +3538,8 @@ get_immediate: spin_lock_bh(&conn->cmd_lock); list_del(&cmd->i_list); spin_unlock_bh(&conn->cmd_lock); - /* - * Determine if a struct se_cmd is assoicated with - * this struct iscsi_cmd. - */ - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) && - !(cmd->tmr_req)) - iscsit_release_cmd(cmd); - else - transport_generic_free_cmd(&cmd->se_cmd, - 1, 0); + + iscsit_free_cmd(cmd); goto get_immediate; case ISTATE_SEND_NOPIN_WANT_RESPONSE: spin_unlock_bh(&cmd->istate_lock); @@ -3940,7 +3932,6 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) { struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL; struct iscsi_session *sess = conn->sess; - struct se_cmd *se_cmd; /* * We expect this function to only ever be called from either RX or TX * thread context via iscsit_close_connection() once the other context @@ -3948,35 +3939,13 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) */ spin_lock_bh(&conn->cmd_lock); list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) { - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) { - list_del(&cmd->i_list); - spin_unlock_bh(&conn->cmd_lock); - iscsit_increment_maxcmdsn(cmd, sess); - se_cmd = &cmd->se_cmd; - /* - * Special cases for active iSCSI TMR, and - * transport_lookup_cmd_lun() failing from - * iscsit_get_lun_for_cmd() in iscsit_handle_scsi_cmd(). - */ - if (cmd->tmr_req && se_cmd->transport_wait_for_tasks) - se_cmd->transport_wait_for_tasks(se_cmd, 1, 1); - else if (cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) - transport_release_cmd(se_cmd); - else - iscsit_release_cmd(cmd); - - spin_lock_bh(&conn->cmd_lock); - continue; - } list_del(&cmd->i_list); spin_unlock_bh(&conn->cmd_lock); iscsit_increment_maxcmdsn(cmd, sess); - se_cmd = &cmd->se_cmd; - if (se_cmd->transport_wait_for_tasks) - se_cmd->transport_wait_for_tasks(se_cmd, 1, 1); + iscsit_free_cmd(cmd); spin_lock_bh(&conn->cmd_lock); } diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index 11fd74307811..beb39469e7f1 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -18,6 +18,7 @@ * GNU General Public License for more details. ******************************************************************************/ +#include <linux/kernel.h> #include <linux/string.h> #include <linux/crypto.h> #include <linux/err.h> @@ -27,40 +28,11 @@ #include "iscsi_target_nego.h" #include "iscsi_target_auth.h" -static unsigned char chap_asciihex_to_binaryhex(unsigned char val[2]) -{ - unsigned char result = 0; - /* - * MSB - */ - if ((val[0] >= 'a') && (val[0] <= 'f')) - result = ((val[0] - 'a' + 10) & 0xf) << 4; - else - if ((val[0] >= 'A') && (val[0] <= 'F')) - result = ((val[0] - 'A' + 10) & 0xf) << 4; - else /* digit */ - result = ((val[0] - '0') & 0xf) << 4; - /* - * LSB - */ - if ((val[1] >= 'a') && (val[1] <= 'f')) - result |= ((val[1] - 'a' + 10) & 0xf); - else - if ((val[1] >= 'A') && (val[1] <= 'F')) - result |= ((val[1] - 'A' + 10) & 0xf); - else /* digit */ - result |= ((val[1] - '0') & 0xf); - - return result; -} - static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len) { - int i, j = 0; + int j = DIV_ROUND_UP(len, 2); - for (i = 0; i < len; i += 2) { - dst[j++] = (unsigned char) chap_asciihex_to_binaryhex(&src[i]); - } + hex2bin(dst, src, j); dst[j] = '\0'; return j; diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 470ed551eeb5..3723d90d5ae5 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -57,6 +57,9 @@ #define TA_PROD_MODE_WRITE_PROTECT 0 #define TA_CACHE_CORE_NPS 0 + +#define ISCSI_IOV_DATA_BUFFER 5 + enum tpg_np_network_transport_table { ISCSI_TCP = 0, ISCSI_SCTP_TCP = 1, @@ -425,7 +428,6 @@ struct iscsi_cmd { /* Number of times struct iscsi_cmd is present in immediate queue */ atomic_t immed_queue_count; atomic_t response_queue_count; - atomic_t transport_sent; spinlock_t datain_lock; spinlock_t dataout_timeout_lock; /* spinlock for protecting struct iscsi_cmd->i_state */ diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index 91a4d170bda4..0b8404c30125 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c @@ -143,12 +143,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) list_del(&cmd->i_list); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || - !(cmd->se_cmd.transport_wait_for_tasks)) - iscsit_release_cmd(cmd); - else - cmd->se_cmd.transport_wait_for_tasks( - &cmd->se_cmd, 1, 1); + iscsit_free_cmd(cmd); spin_lock(&cr->conn_recovery_cmd_lock); } spin_unlock(&cr->conn_recovery_cmd_lock); @@ -170,12 +165,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) list_del(&cmd->i_list); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || - !(cmd->se_cmd.transport_wait_for_tasks)) - iscsit_release_cmd(cmd); - else - cmd->se_cmd.transport_wait_for_tasks( - &cmd->se_cmd, 1, 1); + iscsit_free_cmd(cmd); spin_lock(&cr->conn_recovery_cmd_lock); } spin_unlock(&cr->conn_recovery_cmd_lock); @@ -260,12 +250,7 @@ void iscsit_discard_cr_cmds_by_expstatsn( iscsit_remove_cmd_from_connection_recovery(cmd, sess); spin_unlock(&cr->conn_recovery_cmd_lock); - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || - !(cmd->se_cmd.transport_wait_for_tasks)) - iscsit_release_cmd(cmd); - else - cmd->se_cmd.transport_wait_for_tasks( - &cmd->se_cmd, 1, 0); + iscsit_free_cmd(cmd); spin_lock(&cr->conn_recovery_cmd_lock); } spin_unlock(&cr->conn_recovery_cmd_lock); @@ -319,12 +304,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn) list_del(&cmd->i_list); spin_unlock_bh(&conn->cmd_lock); - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || - !(cmd->se_cmd.transport_wait_for_tasks)) - iscsit_release_cmd(cmd); - else - cmd->se_cmd.transport_wait_for_tasks( - &cmd->se_cmd, 1, 1); + iscsit_free_cmd(cmd); spin_lock_bh(&conn->cmd_lock); } spin_unlock_bh(&conn->cmd_lock); @@ -377,13 +357,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) list_del(&cmd->i_list); spin_unlock_bh(&conn->cmd_lock); - - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || - !(cmd->se_cmd.transport_wait_for_tasks)) - iscsit_release_cmd(cmd); - else - cmd->se_cmd.transport_wait_for_tasks( - &cmd->se_cmd, 1, 0); + iscsit_free_cmd(cmd); spin_lock_bh(&conn->cmd_lock); continue; } @@ -403,13 +377,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) { list_del(&cmd->i_list); spin_unlock_bh(&conn->cmd_lock); - - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || - !(cmd->se_cmd.transport_wait_for_tasks)) - iscsit_release_cmd(cmd); - else - cmd->se_cmd.transport_wait_for_tasks( - &cmd->se_cmd, 1, 1); + iscsit_free_cmd(cmd); spin_lock_bh(&conn->cmd_lock); continue; } @@ -434,10 +402,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) iscsit_free_all_datain_reqs(cmd); - if ((cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) && - cmd->se_cmd.transport_wait_for_tasks) - cmd->se_cmd.transport_wait_for_tasks(&cmd->se_cmd, - 0, 0); + transport_wait_for_tasks(&cmd->se_cmd); /* * Add the struct iscsi_cmd to the connection recovery cmd list */ diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c index db1fe1ec84df..490207eacde9 100644 --- a/drivers/target/iscsi/iscsi_target_tmr.c +++ b/drivers/target/iscsi/iscsi_target_tmr.c @@ -250,7 +250,7 @@ static int iscsit_task_reassign_complete_write( * so if we have received all DataOUT we can safety ignore Initiator. */ if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { - if (!atomic_read(&cmd->transport_sent)) { + if (!atomic_read(&cmd->se_cmd.t_transport_sent)) { pr_debug("WRITE ITT: 0x%08x: t_state: %d" " never sent to transport\n", cmd->init_task_tag, cmd->se_cmd.t_state); @@ -314,11 +314,11 @@ static int iscsit_task_reassign_complete_read( cmd->acked_data_sn = (tmr_req->exp_data_sn - 1); } - if (!atomic_read(&cmd->transport_sent)) { + if (!atomic_read(&cmd->se_cmd.t_transport_sent)) { pr_debug("READ ITT: 0x%08x: t_state: %d never sent to" " transport\n", cmd->init_task_tag, cmd->se_cmd.t_state); - transport_generic_handle_cdb(se_cmd); + transport_handle_cdb_direct(se_cmd); return 0; } diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index f00137f377b2..02348f727bd4 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -289,7 +289,8 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr( } se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, - (void *)cmd->tmr_req, tcm_function); + (void *)cmd->tmr_req, tcm_function, + GFP_KERNEL); if (!se_cmd->se_tmr_req) goto out; @@ -839,6 +840,23 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd) kmem_cache_free(lio_cmd_cache, cmd); } +void iscsit_free_cmd(struct iscsi_cmd *cmd) +{ + /* + * Determine if a struct se_cmd is assoicated with + * this struct iscsi_cmd. + */ + switch (cmd->iscsi_opcode) { + case ISCSI_OP_SCSI_CMD: + case ISCSI_OP_SCSI_TMFUNC: + transport_generic_free_cmd(&cmd->se_cmd, 1); + break; + default: + iscsit_release_cmd(cmd); + break; + } +} + int iscsit_check_session_usage_count(struct iscsi_session *sess) { spin_lock_bh(&sess->session_usage_lock); diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index 2cd49d607bda..835bf7de0281 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -30,6 +30,7 @@ extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_c extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *); extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); extern void iscsit_release_cmd(struct iscsi_cmd *); +extern void iscsit_free_cmd(struct iscsi_cmd *); extern int iscsit_check_session_usage_count(struct iscsi_session *); extern void iscsit_dec_session_usage_count(struct iscsi_session *); extern void iscsit_inc_session_usage_count(struct iscsi_session *); diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index aa2d67997235..b15d8cbf630b 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -200,7 +200,7 @@ static void tcm_loop_check_stop_free(struct se_cmd *se_cmd) * Release the struct se_cmd, which will make a callback to release * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd() */ - transport_generic_free_cmd(se_cmd, 0, 0); + transport_generic_free_cmd(se_cmd, 0); } static void tcm_loop_release_cmd(struct se_cmd *se_cmd) @@ -290,6 +290,15 @@ static int tcm_loop_queuecommand( */ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; + /* + * Ensure that this tl_tpg reference from the incoming sc->device->id + * has already been configured via tcm_loop_make_naa_tpg(). + */ + if (!tl_tpg->tl_hba) { + set_host_byte(sc, DID_NO_CONNECT); + sc->scsi_done(sc); + return 0; + } se_tpg = &tl_tpg->tl_se_tpg; /* * Determine the SAM Task Attribute and allocate tl_cmd and @@ -366,7 +375,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) * Allocate the LUN_RESET TMR */ se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr, - TMR_LUN_RESET); + TMR_LUN_RESET, GFP_KERNEL); if (IS_ERR(se_cmd->se_tmr_req)) goto release; /* @@ -388,7 +397,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) SUCCESS : FAILED; release: if (se_cmd) - transport_generic_free_cmd(se_cmd, 1, 0); + transport_generic_free_cmd(se_cmd, 1); else kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); kfree(tl_tmr); @@ -1245,6 +1254,9 @@ void tcm_loop_drop_naa_tpg( */ core_tpg_deregister(se_tpg); + tl_tpg->tl_hba = NULL; + tl_tpg->tl_tpgt = 0; + pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s" " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), config_item_name(&wwn->wwn_group.cg_item), tpgt); diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 98c98a3a0250..8f4447749c71 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -24,7 +24,6 @@ * ******************************************************************************/ -#include <linux/version.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/configfs.h> @@ -68,6 +67,15 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) unsigned char *buf; u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first Target port group descriptor */ + /* + * Need at least 4 bytes of response data or else we can't + * even fit the return data length. + */ + if (cmd->data_length < 4) { + pr_warn("REPORT TARGET PORT GROUPS allocation length %u" + " too small\n", cmd->data_length); + return -EINVAL; + } buf = transport_kmap_first_data_page(cmd); @@ -75,6 +83,17 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { /* + * Check if the Target port group and Target port descriptor list + * based on tg_pt_gp_members count will fit into the response payload. + * Otherwise, bump rd_len to let the initiator know we have exceeded + * the allocation length and the response is truncated. + */ + if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) > + cmd->data_length) { + rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4); + continue; + } + /* * PREF: Preferred target port bit, determine if this * bit should be set for port group. */ diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index f04d4ef99dca..38535eb13929 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -24,7 +24,7 @@ */ #include <linux/kernel.h> -#include <linux/ctype.h> +#include <linux/module.h> #include <asm/unaligned.h> #include <scsi/scsi.h> @@ -156,11 +156,12 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) } static void -target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_off) +target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf) { unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0]; - unsigned char *buf = buf_off; - int cnt = 0, next = 1; + int cnt; + bool next = true; + /* * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field @@ -169,19 +170,18 @@ target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_of * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure * per device uniqeness. */ - while (*p != '\0') { - if (cnt >= 13) - break; - if (!isxdigit(*p)) { - p++; + for (cnt = 0; *p && cnt < 13; p++) { + int val = hex_to_bin(*p); + + if (val < 0) continue; - } - if (next != 0) { - buf[cnt++] |= hex_to_bin(*p++); - next = 0; + + if (next) { + next = false; + buf[cnt++] |= val; } else { - buf[cnt] = hex_to_bin(*p++) << 4; - next = 1; + next = true; + buf[cnt] = val << 4; } } } @@ -1266,3 +1266,52 @@ transport_emulate_control_cdb(struct se_task *task) return PYX_TRANSPORT_SENT_TO_TRANSPORT; } + +/* + * Write a CDB into @cdb that is based on the one the intiator sent us, + * but updated to only cover the sectors that the current task handles. + */ +void target_get_task_cdb(struct se_task *task, unsigned char *cdb) +{ + struct se_cmd *cmd = task->task_se_cmd; + unsigned int cdb_len = scsi_command_size(cmd->t_task_cdb); + + memcpy(cdb, cmd->t_task_cdb, cdb_len); + if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { + unsigned long long lba = task->task_lba; + u32 sectors = task->task_sectors; + + switch (cdb_len) { + case 6: + /* 21-bit LBA and 8-bit sectors */ + cdb[1] = (lba >> 16) & 0x1f; + cdb[2] = (lba >> 8) & 0xff; + cdb[3] = lba & 0xff; + cdb[4] = sectors & 0xff; + break; + case 10: + /* 32-bit LBA and 16-bit sectors */ + put_unaligned_be32(lba, &cdb[2]); + put_unaligned_be16(sectors, &cdb[7]); + break; + case 12: + /* 32-bit LBA and 32-bit sectors */ + put_unaligned_be32(lba, &cdb[2]); + put_unaligned_be32(sectors, &cdb[6]); + break; + case 16: + /* 64-bit LBA and 32-bit sectors */ + put_unaligned_be64(lba, &cdb[2]); + put_unaligned_be32(sectors, &cdb[10]); + break; + case 32: + /* 64-bit LBA and 32-bit sectors, extended CDB */ + put_unaligned_be64(lba, &cdb[12]); + put_unaligned_be32(sectors, &cdb[28]); + break; + default: + BUG(); + } + } +} +EXPORT_SYMBOL(target_get_task_cdb); diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index b2575d8568cc..e0c1e8a8dd4e 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -23,7 +23,6 @@ #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/version.h> #include <generated/utsrelease.h> #include <linux/utsname.h> #include <linux/init.h> @@ -133,14 +132,6 @@ static struct config_group *target_core_register_fabric( pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:" " %s\n", group, name); /* - * Ensure that TCM subsystem plugins are loaded at this point for - * using the RAMDISK_DR virtual LUN 0 and all other struct se_port - * LUN symlinks. - */ - if (transport_subsystem_check_init() < 0) - return ERR_PTR(-EINVAL); - - /* * Below are some hardcoded request_module() calls to automatically * local fabric modules when the following is called: * @@ -725,9 +716,6 @@ SE_DEV_ATTR_RO(hw_queue_depth); DEF_DEV_ATTRIB(queue_depth); SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR); -DEF_DEV_ATTRIB(task_timeout); -SE_DEV_ATTR(task_timeout, S_IRUGO | S_IWUSR); - DEF_DEV_ATTRIB(max_unmap_lba_count); SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR); @@ -761,7 +749,6 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = { &target_core_dev_attrib_optimal_sectors.attr, &target_core_dev_attrib_hw_queue_depth.attr, &target_core_dev_attrib_queue_depth.attr, - &target_core_dev_attrib_task_timeout.attr, &target_core_dev_attrib_max_unmap_lba_count.attr, &target_core_dev_attrib_max_unmap_block_desc_count.attr, &target_core_dev_attrib_unmap_granularity.attr, @@ -3080,8 +3067,7 @@ static struct config_group *target_core_call_addhbatotarget( /* * Load up TCM subsystem plugins if they have not already been loaded. */ - if (transport_subsystem_check_init() < 0) - return ERR_PTR(-EINVAL); + transport_subsystem_check_init(); hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0); if (IS_ERR(hba)) diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index ca6e4a4df134..f870c3bcfd82 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -914,21 +914,6 @@ void se_dev_set_default_attribs( dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth; } -int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) -{ - if (task_timeout > DA_TASK_TIMEOUT_MAX) { - pr_err("dev[%p]: Passed task_timeout: %u larger then" - " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); - return -EINVAL; - } else { - dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout; - pr_debug("dev[%p]: Set SE Device task_timeout: %u\n", - dev, task_timeout); - } - - return 0; -} - int se_dev_set_max_unmap_lba_count( struct se_device *dev, u32 max_unmap_lba_count) @@ -972,36 +957,24 @@ int se_dev_set_unmap_granularity_alignment( int se_dev_set_emulate_dpo(struct se_device *dev, int flag) { - if ((flag != 0) && (flag != 1)) { + if (flag != 0 && flag != 1) { pr_err("Illegal value %d\n", flag); return -EINVAL; } - if (dev->transport->dpo_emulated == NULL) { - pr_err("dev->transport->dpo_emulated is NULL\n"); - return -EINVAL; - } - if (dev->transport->dpo_emulated(dev) == 0) { - pr_err("dev->transport->dpo_emulated not supported\n"); - return -EINVAL; - } - dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag; - pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation" - " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo); - return 0; + + pr_err("dpo_emulated not supported\n"); + return -EINVAL; } int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) { - if ((flag != 0) && (flag != 1)) { + if (flag != 0 && flag != 1) { pr_err("Illegal value %d\n", flag); return -EINVAL; } - if (dev->transport->fua_write_emulated == NULL) { - pr_err("dev->transport->fua_write_emulated is NULL\n"); - return -EINVAL; - } - if (dev->transport->fua_write_emulated(dev) == 0) { - pr_err("dev->transport->fua_write_emulated not supported\n"); + + if (dev->transport->fua_write_emulated == 0) { + pr_err("fua_write_emulated not supported\n"); return -EINVAL; } dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag; @@ -1012,36 +985,23 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) { - if ((flag != 0) && (flag != 1)) { + if (flag != 0 && flag != 1) { pr_err("Illegal value %d\n", flag); return -EINVAL; } - if (dev->transport->fua_read_emulated == NULL) { - pr_err("dev->transport->fua_read_emulated is NULL\n"); - return -EINVAL; - } - if (dev->transport->fua_read_emulated(dev) == 0) { - pr_err("dev->transport->fua_read_emulated not supported\n"); - return -EINVAL; - } - dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag; - pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n", - dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read); - return 0; + + pr_err("ua read emulated not supported\n"); + return -EINVAL; } int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) { - if ((flag != 0) && (flag != 1)) { + if (flag != 0 && flag != 1) { pr_err("Illegal value %d\n", flag); return -EINVAL; } - if (dev->transport->write_cache_emulated == NULL) { - pr_err("dev->transport->write_cache_emulated is NULL\n"); - return -EINVAL; - } - if (dev->transport->write_cache_emulated(dev) == 0) { - pr_err("dev->transport->write_cache_emulated not supported\n"); + if (dev->transport->write_cache_emulated == 0) { + pr_err("write_cache_emulated not supported\n"); return -EINVAL; } dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag; diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 55bbe0847a6d..09b6f8729f91 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -22,7 +22,6 @@ #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/version.h> #include <generated/utsrelease.h> #include <linux/utsname.h> #include <linux/init.h> diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index bc1b33639b8d..19a0be9c6570 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -26,7 +26,6 @@ * ******************************************************************************/ -#include <linux/version.h> #include <linux/string.h> #include <linux/parser.h> #include <linux/timer.h> @@ -273,13 +272,14 @@ fd_alloc_task(unsigned char *cdb) static int fd_do_readv(struct se_task *task) { struct fd_request *req = FILE_REQ(task); - struct fd_dev *dev = req->fd_task.se_dev->dev_ptr; + struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; + struct fd_dev *dev = se_dev->dev_ptr; struct file *fd = dev->fd_file; struct scatterlist *sg = task->task_sg; struct iovec *iov; mm_segment_t old_fs; loff_t pos = (task->task_lba * - task->se_dev->se_sub_dev->se_dev_attrib.block_size); + se_dev->se_sub_dev->se_dev_attrib.block_size); int ret = 0, i; iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); @@ -325,13 +325,14 @@ static int fd_do_readv(struct se_task *task) static int fd_do_writev(struct se_task *task) { struct fd_request *req = FILE_REQ(task); - struct fd_dev *dev = req->fd_task.se_dev->dev_ptr; + struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; + struct fd_dev *dev = se_dev->dev_ptr; struct file *fd = dev->fd_file; struct scatterlist *sg = task->task_sg; struct iovec *iov; mm_segment_t old_fs; loff_t pos = (task->task_lba * - task->se_dev->se_sub_dev->se_dev_attrib.block_size); + se_dev->se_sub_dev->se_dev_attrib.block_size); int ret, i = 0; iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); @@ -399,33 +400,6 @@ static void fd_emulate_sync_cache(struct se_task *task) } /* - * Tell TCM Core that we are capable of WriteCache emulation for - * an underlying struct se_device. - */ -static int fd_emulated_write_cache(struct se_device *dev) -{ - return 1; -} - -static int fd_emulated_dpo(struct se_device *dev) -{ - return 0; -} -/* - * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs - * for TYPE_DISK. - */ -static int fd_emulated_fua_write(struct se_device *dev) -{ - return 1; -} - -static int fd_emulated_fua_read(struct se_device *dev) -{ - return 0; -} - -/* * WRITE Force Unit Access (FUA) emulation on a per struct se_task * LBA range basis.. */ @@ -608,17 +582,6 @@ static ssize_t fd_show_configfs_dev_params( return bl; } -/* fd_get_cdb(): (Part of se_subsystem_api_t template) - * - * - */ -static unsigned char *fd_get_cdb(struct se_task *task) -{ - struct fd_request *req = FILE_REQ(task); - - return req->fd_scsi_cdb; -} - /* fd_get_device_rev(): (Part of se_subsystem_api_t template) * * @@ -650,15 +613,13 @@ static struct se_subsystem_api fileio_template = { .name = "fileio", .owner = THIS_MODULE, .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, + .write_cache_emulated = 1, + .fua_write_emulated = 1, .attach_hba = fd_attach_hba, .detach_hba = fd_detach_hba, .allocate_virtdevice = fd_allocate_virtdevice, .create_virtdevice = fd_create_virtdevice, .free_device = fd_free_device, - .dpo_emulated = fd_emulated_dpo, - .fua_write_emulated = fd_emulated_fua_write, - .fua_read_emulated = fd_emulated_fua_read, - .write_cache_emulated = fd_emulated_write_cache, .alloc_task = fd_alloc_task, .do_task = fd_do_task, .do_sync_cache = fd_emulate_sync_cache, @@ -666,7 +627,6 @@ static struct se_subsystem_api fileio_template = { .check_configfs_dev_params = fd_check_configfs_dev_params, .set_configfs_dev_params = fd_set_configfs_dev_params, .show_configfs_dev_params = fd_show_configfs_dev_params, - .get_cdb = fd_get_cdb, .get_device_rev = fd_get_device_rev, .get_device_type = fd_get_device_type, .get_blocks = fd_get_blocks, diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index daebd710b893..59e6e73106c2 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h @@ -14,9 +14,7 @@ struct fd_request { struct se_task fd_task; - /* SCSI CDB from iSCSI Command PDU */ - unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE]; -} ____cacheline_aligned; +}; #define FBDF_HAS_PATH 0x01 #define FBDF_HAS_SIZE 0x02 diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 7e1234105442..41ad02b5fb87 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -27,7 +27,6 @@ * ******************************************************************************/ -#include <linux/version.h> #include <linux/string.h> #include <linux/parser.h> #include <linux/timer.h> @@ -314,104 +313,42 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( return blocks_long; } +static void iblock_end_io_flush(struct bio *bio, int err) +{ + struct se_cmd *cmd = bio->bi_private; + + if (err) + pr_err("IBLOCK: cache flush failed: %d\n", err); + + if (cmd) + transport_complete_sync_cache(cmd, err == 0); + bio_put(bio); +} + /* - * Emulate SYCHRONIZE_CACHE_* + * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must + * always flush the whole cache. */ static void iblock_emulate_sync_cache(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; int immed = (cmd->t_task_cdb[1] & 0x2); - sector_t error_sector; - int ret; + struct bio *bio; /* * If the Immediate bit is set, queue up the GOOD response - * for this SYNCHRONIZE_CACHE op + * for this SYNCHRONIZE_CACHE op. */ if (immed) transport_complete_sync_cache(cmd, 1); - /* - * blkdev_issue_flush() does not support a specifying a range, so - * we have to flush the entire cache. - */ - ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector); - if (ret != 0) { - pr_err("IBLOCK: block_issue_flush() failed: %d " - " error_sector: %llu\n", ret, - (unsigned long long)error_sector); - } - + bio = bio_alloc(GFP_KERNEL, 0); + bio->bi_end_io = iblock_end_io_flush; + bio->bi_bdev = ib_dev->ibd_bd; if (!immed) - transport_complete_sync_cache(cmd, ret == 0); -} - -/* - * Tell TCM Core that we are capable of WriteCache emulation for - * an underlying struct se_device. - */ -static int iblock_emulated_write_cache(struct se_device *dev) -{ - return 1; -} - -static int iblock_emulated_dpo(struct se_device *dev) -{ - return 0; -} - -/* - * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs - * for TYPE_DISK. - */ -static int iblock_emulated_fua_write(struct se_device *dev) -{ - return 1; -} - -static int iblock_emulated_fua_read(struct se_device *dev) -{ - return 0; -} - -static int iblock_do_task(struct se_task *task) -{ - struct se_device *dev = task->task_se_cmd->se_dev; - struct iblock_req *req = IBLOCK_REQ(task); - struct bio *bio = req->ib_bio, *nbio = NULL; - struct blk_plug plug; - int rw; - - if (task->task_data_direction == DMA_TO_DEVICE) { - /* - * Force data to disk if we pretend to not have a volatile - * write cache, or the initiator set the Force Unit Access bit. - */ - if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || - (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && - task->task_se_cmd->t_tasks_fua)) - rw = WRITE_FUA; - else - rw = WRITE; - } else { - rw = READ; - } - - blk_start_plug(&plug); - while (bio) { - nbio = bio->bi_next; - bio->bi_next = NULL; - pr_debug("Calling submit_bio() task: %p bio: %p" - " bio->bi_sector: %llu\n", task, bio, - (unsigned long long)bio->bi_sector); - - submit_bio(rw, bio); - bio = nbio; - } - blk_finish_plug(&plug); - - return PYX_TRANSPORT_SENT_TO_TRANSPORT; + bio->bi_private = cmd; + submit_bio(WRITE_FLUSH, bio); } static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) @@ -425,20 +362,7 @@ static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) static void iblock_free_task(struct se_task *task) { - struct iblock_req *req = IBLOCK_REQ(task); - struct bio *bio, *hbio = req->ib_bio; - /* - * We only release the bio(s) here if iblock_bio_done() has not called - * bio_put() -> iblock_bio_destructor(). - */ - while (hbio != NULL) { - bio = hbio; - hbio = hbio->bi_next; - bio->bi_next = NULL; - bio_put(bio); - } - - kfree(req); + kfree(IBLOCK_REQ(task)); } enum { @@ -552,25 +476,21 @@ static ssize_t iblock_show_configfs_dev_params( static void iblock_bio_destructor(struct bio *bio) { struct se_task *task = bio->bi_private; - struct iblock_dev *ib_dev = task->se_dev->dev_ptr; + struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; bio_free(bio, ib_dev->ibd_bio_set); } -static struct bio *iblock_get_bio( - struct se_task *task, - struct iblock_req *ib_req, - struct iblock_dev *ib_dev, - int *ret, - sector_t lba, - u32 sg_num) +static struct bio * +iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num) { + struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; + struct iblock_req *ib_req = IBLOCK_REQ(task); struct bio *bio; bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); if (!bio) { pr_err("Unable to allocate memory for bio\n"); - *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; return NULL; } @@ -591,17 +511,33 @@ static struct bio *iblock_get_bio( return bio; } -static int iblock_map_data_SG(struct se_task *task) +static int iblock_do_task(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; - struct iblock_dev *ib_dev = task->se_dev->dev_ptr; - struct iblock_req *ib_req = IBLOCK_REQ(task); - struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; + struct bio *bio; + struct bio_list list; struct scatterlist *sg; - int ret = 0; u32 i, sg_num = task->task_sg_nents; sector_t block_lba; + struct blk_plug plug; + int rw; + + if (task->task_data_direction == DMA_TO_DEVICE) { + /* + * Force data to disk if we pretend to not have a volatile + * write cache, or the initiator set the Force Unit Access bit. + */ + if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || + (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && + task->task_se_cmd->t_tasks_fua)) + rw = WRITE_FUA; + else + rw = WRITE; + } else { + rw = READ; + } + /* * Do starting conversion up from non 512-byte blocksize with * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. @@ -620,68 +556,43 @@ static int iblock_map_data_SG(struct se_task *task) return PYX_TRANSPORT_LU_COMM_FAILURE; } - bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num); + bio = iblock_get_bio(task, block_lba, sg_num); if (!bio) - return ret; + return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + + bio_list_init(&list); + bio_list_add(&list, bio); - ib_req->ib_bio = bio; - hbio = tbio = bio; - /* - * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist - * from task->task_sg -> struct scatterlist memory. - */ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { - pr_debug("task: %p bio: %p Calling bio_add_page(): page:" - " %p len: %u offset: %u\n", task, bio, sg_page(sg), - sg->length, sg->offset); -again: - ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset); - if (ret != sg->length) { - - pr_debug("*** Set bio->bi_sector: %llu\n", - (unsigned long long)bio->bi_sector); - pr_debug("** task->task_size: %u\n", - task->task_size); - pr_debug("*** bio->bi_max_vecs: %u\n", - bio->bi_max_vecs); - pr_debug("*** bio->bi_vcnt: %u\n", - bio->bi_vcnt); - - bio = iblock_get_bio(task, ib_req, ib_dev, &ret, - block_lba, sg_num); + /* + * XXX: if the length the device accepts is shorter than the + * length of the S/G list entry this will cause and + * endless loop. Better hope no driver uses huge pages. + */ + while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) + != sg->length) { + bio = iblock_get_bio(task, block_lba, sg_num); if (!bio) goto fail; - - tbio = tbio->bi_next = bio; - pr_debug("-----------------> Added +1 bio: %p to" - " list, Going to again\n", bio); - goto again; + bio_list_add(&list, bio); } + /* Always in 512 byte units for Linux/Block */ block_lba += sg->length >> IBLOCK_LBA_SHIFT; sg_num--; - pr_debug("task: %p bio-add_page() passed!, decremented" - " sg_num to %u\n", task, sg_num); - pr_debug("task: %p bio_add_page() passed!, increased lba" - " to %llu\n", task, (unsigned long long)block_lba); - pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:" - " %u\n", task, bio->bi_vcnt); } - return 0; + blk_start_plug(&plug); + while ((bio = bio_list_pop(&list))) + submit_bio(rw, bio); + blk_finish_plug(&plug); + + return PYX_TRANSPORT_SENT_TO_TRANSPORT; + fail: - while (hbio) { - bio = hbio; - hbio = hbio->bi_next; - bio->bi_next = NULL; + while ((bio = bio_list_pop(&list))) bio_put(bio); - } - return ret; -} - -static unsigned char *iblock_get_cdb(struct se_task *task) -{ - return IBLOCK_REQ(task)->ib_scsi_cdb; + return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; } static u32 iblock_get_device_rev(struct se_device *dev) @@ -707,6 +618,7 @@ static void iblock_bio_done(struct bio *bio, int err) { struct se_task *task = bio->bi_private; struct iblock_req *ibr = IBLOCK_REQ(task); + /* * Set -EIO if !BIO_UPTODATE and the passed is still err=0 */ @@ -721,50 +633,31 @@ static void iblock_bio_done(struct bio *bio, int err) */ atomic_inc(&ibr->ib_bio_err_cnt); smp_mb__after_atomic_inc(); - bio_put(bio); - /* - * Wait to complete the task until the last bio as completed. - */ - if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) - return; - - ibr->ib_bio = NULL; - transport_complete_task(task, 0); - return; } - pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", - task, bio, task->task_lba, (unsigned long long)bio->bi_sector, err); - /* - * bio_put() will call iblock_bio_destructor() to release the bio back - * to ibr->ib_bio_set. - */ + bio_put(bio); - /* - * Wait to complete the task until the last bio as completed. - */ + if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) return; - /* - * Return GOOD status for task if zero ib_bio_err_cnt exists. - */ - ibr->ib_bio = NULL; - transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt))); + + pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", + task, bio, task->task_lba, + (unsigned long long)bio->bi_sector, err); + + transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt)); } static struct se_subsystem_api iblock_template = { .name = "iblock", .owner = THIS_MODULE, .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, - .map_data_SG = iblock_map_data_SG, + .write_cache_emulated = 1, + .fua_write_emulated = 1, .attach_hba = iblock_attach_hba, .detach_hba = iblock_detach_hba, .allocate_virtdevice = iblock_allocate_virtdevice, .create_virtdevice = iblock_create_virtdevice, .free_device = iblock_free_device, - .dpo_emulated = iblock_emulated_dpo, - .fua_write_emulated = iblock_emulated_fua_write, - .fua_read_emulated = iblock_emulated_fua_read, - .write_cache_emulated = iblock_emulated_write_cache, .alloc_task = iblock_alloc_task, .do_task = iblock_do_task, .do_discard = iblock_do_discard, @@ -773,7 +666,6 @@ static struct se_subsystem_api iblock_template = { .check_configfs_dev_params = iblock_check_configfs_dev_params, .set_configfs_dev_params = iblock_set_configfs_dev_params, .show_configfs_dev_params = iblock_show_configfs_dev_params, - .get_cdb = iblock_get_cdb, .get_device_rev = iblock_get_device_rev, .get_device_type = iblock_get_device_type, .get_blocks = iblock_get_blocks, diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h index a121cd1b6575..5cf1860c10d0 100644 --- a/drivers/target/target_core_iblock.h +++ b/drivers/target/target_core_iblock.h @@ -8,10 +8,8 @@ struct iblock_req { struct se_task ib_task; - unsigned char ib_scsi_cdb[TCM_MAX_COMMAND_SIZE]; atomic_t ib_bio_cnt; atomic_t ib_bio_err_cnt; - struct bio *ib_bio; } ____cacheline_aligned; #define IBDF_HAS_UDEV_PATH 0x01 diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 7fd3a161f7cc..0c4f783f924c 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -25,7 +25,6 @@ * ******************************************************************************/ -#include <linux/version.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/list.h> diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 2b7b0da9146d..dad671dee9e9 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -26,7 +26,6 @@ * ******************************************************************************/ -#include <linux/version.h> #include <linux/string.h> #include <linux/parser.h> #include <linux/timer.h> @@ -567,7 +566,7 @@ static struct se_device *pscsi_create_virtdevice( if (IS_ERR(sh)) { pr_err("pSCSI: Unable to locate" " pdv_host_id: %d\n", pdv->pdv_host_id); - return (struct se_device *) sh; + return ERR_CAST(sh); } } } else { @@ -677,7 +676,7 @@ static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task) */ static int pscsi_transport_complete(struct se_task *task) { - struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; + struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; struct scsi_device *sd = pdv->pdv_sd; int result; struct pscsi_plugin_task *pt = PSCSI_TASK(task); @@ -777,95 +776,6 @@ pscsi_alloc_task(unsigned char *cdb) return &pt->pscsi_task; } -static inline void pscsi_blk_init_request( - struct se_task *task, - struct pscsi_plugin_task *pt, - struct request *req, - int bidi_read) -{ - /* - * Defined as "scsi command" in include/linux/blkdev.h. - */ - req->cmd_type = REQ_TYPE_BLOCK_PC; - /* - * For the extra BIDI-COMMAND READ struct request we do not - * need to setup the remaining structure members - */ - if (bidi_read) - return; - /* - * Setup the done function pointer for struct request, - * also set the end_io_data pointer.to struct se_task. - */ - req->end_io = pscsi_req_done; - req->end_io_data = task; - /* - * Load the referenced struct se_task's SCSI CDB into - * include/linux/blkdev.h:struct request->cmd - */ - req->cmd_len = scsi_command_size(pt->pscsi_cdb); - req->cmd = &pt->pscsi_cdb[0]; - /* - * Setup pointer for outgoing sense data. - */ - req->sense = &pt->pscsi_sense[0]; - req->sense_len = 0; -} - -/* - * Used for pSCSI data payloads for all *NON* SCF_SCSI_DATA_SG_IO_CDB -*/ -static int pscsi_blk_get_request(struct se_task *task) -{ - struct pscsi_plugin_task *pt = PSCSI_TASK(task); - struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; - - pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue, - (task->task_data_direction == DMA_TO_DEVICE), - GFP_KERNEL); - if (!pt->pscsi_req || IS_ERR(pt->pscsi_req)) { - pr_err("PSCSI: blk_get_request() failed: %ld\n", - IS_ERR(pt->pscsi_req)); - return PYX_TRANSPORT_LU_COMM_FAILURE; - } - /* - * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC, - * and setup rq callback, CDB and sense. - */ - pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); - return 0; -} - -/* pscsi_do_task(): (Part of se_subsystem_api_t template) - * - * - */ -static int pscsi_do_task(struct se_task *task) -{ - struct pscsi_plugin_task *pt = PSCSI_TASK(task); - struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; - /* - * Set the struct request->timeout value based on peripheral - * device type from SCSI. - */ - if (pdv->pdv_sd->type == TYPE_DISK) - pt->pscsi_req->timeout = PS_TIMEOUT_DISK; - else - pt->pscsi_req->timeout = PS_TIMEOUT_OTHER; - - pt->pscsi_req->retries = PS_RETRY; - /* - * Queue the struct request into the struct scsi_device->request_queue. - * Also check for HEAD_OF_QUEUE SAM TASK attr from received se_cmd - * descriptor - */ - blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req, - (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG), - pscsi_req_done); - - return PYX_TRANSPORT_SENT_TO_TRANSPORT; -} - static void pscsi_free_task(struct se_task *task) { struct pscsi_plugin_task *pt = PSCSI_TASK(task); @@ -1049,15 +959,12 @@ static inline struct bio *pscsi_get_bio(int sg_num) return bio; } -static int __pscsi_map_SG( - struct se_task *task, - struct scatterlist *task_sg, - u32 task_sg_num, - int bidi_read) +static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, + struct bio **hbio) { - struct pscsi_plugin_task *pt = PSCSI_TASK(task); - struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; - struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; + struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; + u32 task_sg_num = task->task_sg_nents; + struct bio *bio = NULL, *tbio = NULL; struct page *page; struct scatterlist *sg; u32 data_len = task->task_size, i, len, bytes, off; @@ -1066,19 +973,8 @@ static int __pscsi_map_SG( int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; int rw = (task->task_data_direction == DMA_TO_DEVICE); - if (!task->task_size) - return 0; - /* - * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup - * the bio_vec maplist from task->task_sg -> - * struct scatterlist memory. The struct se_task->task_sg[] currently needs - * to be attached to struct bios for submission to Linux/SCSI using - * struct request to struct scsi_device->request_queue. - * - * Note that this will be changing post v2.6.28 as Target_Core_Mod/pSCSI - * is ported to upstream SCSI passthrough functionality that accepts - * struct scatterlist->page_link or struct page as a paraemeter. - */ + *hbio = NULL; + pr_debug("PSCSI: nr_pages: %d\n", nr_pages); for_each_sg(task_sg, sg, task_sg_num, i) { @@ -1115,8 +1011,8 @@ static int __pscsi_map_SG( * bios need to be added to complete a given * struct se_task */ - if (!hbio) - hbio = tbio = bio; + if (!*hbio) + *hbio = tbio = bio; else tbio = tbio->bi_next = bio; } @@ -1152,92 +1048,82 @@ static int __pscsi_map_SG( off = 0; } } - /* - * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND - * primary SCSI WRITE poayload mapped for struct se_task->task_sg[] - */ - if (!bidi_read) { - /* - * Starting with v2.6.31, call blk_make_request() passing in *hbio to - * allocate the pSCSI task a struct request. - */ - pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue, - hbio, GFP_KERNEL); - if (!pt->pscsi_req) { - pr_err("pSCSI: blk_make_request() failed\n"); - goto fail; - } - /* - * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC, - * and setup rq callback, CDB and sense. - */ - pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); - - return task->task_sg_nents; - } - /* - * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND - * SCSI READ paylaod mapped for struct se_task->task_sg_bidi[] - */ - pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue, - hbio, GFP_KERNEL); - if (!pt->pscsi_req->next_rq) { - pr_err("pSCSI: blk_make_request() failed for BIDI\n"); - goto fail; - } - pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1); return task->task_sg_nents; fail: - while (hbio) { - bio = hbio; - hbio = hbio->bi_next; + while (*hbio) { + bio = *hbio; + *hbio = (*hbio)->bi_next; bio->bi_next = NULL; - bio_endio(bio, 0); + bio_endio(bio, 0); /* XXX: should be error */ } return ret; } -/* - * pSCSI maps both ->map_control_SG() and ->map_data_SG() to a single call. - */ -static int pscsi_map_SG(struct se_task *task) +static int pscsi_do_task(struct se_task *task) { + struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; + struct pscsi_plugin_task *pt = PSCSI_TASK(task); + struct request *req; + struct bio *hbio; int ret; - /* - * Setup the main struct request for the task->task_sg[] payload - */ + target_get_task_cdb(task, pt->pscsi_cdb); + + if (task->task_se_cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { + req = blk_get_request(pdv->pdv_sd->request_queue, + (task->task_data_direction == DMA_TO_DEVICE), + GFP_KERNEL); + if (!req || IS_ERR(req)) { + pr_err("PSCSI: blk_get_request() failed: %ld\n", + req ? IS_ERR(req) : -ENOMEM); + return PYX_TRANSPORT_LU_COMM_FAILURE; + } + } else { + BUG_ON(!task->task_size); - ret = __pscsi_map_SG(task, task->task_sg, task->task_sg_nents, 0); - if (ret >= 0 && task->task_sg_bidi) { /* - * If present, set up the extra BIDI-COMMAND SCSI READ - * struct request and payload. + * Setup the main struct request for the task->task_sg[] payload */ - ret = __pscsi_map_SG(task, task->task_sg_bidi, - task->task_sg_nents, 1); + ret = pscsi_map_sg(task, task->task_sg, &hbio); + if (ret < 0) + return PYX_TRANSPORT_LU_COMM_FAILURE; + + req = blk_make_request(pdv->pdv_sd->request_queue, hbio, + GFP_KERNEL); + if (!req) { + pr_err("pSCSI: blk_make_request() failed\n"); + goto fail; + } } - if (ret < 0) - return PYX_TRANSPORT_LU_COMM_FAILURE; - return 0; -} + req->cmd_type = REQ_TYPE_BLOCK_PC; + req->end_io = pscsi_req_done; + req->end_io_data = task; + req->cmd_len = scsi_command_size(pt->pscsi_cdb); + req->cmd = &pt->pscsi_cdb[0]; + req->sense = &pt->pscsi_sense[0]; + req->sense_len = 0; + if (pdv->pdv_sd->type == TYPE_DISK) + req->timeout = PS_TIMEOUT_DISK; + else + req->timeout = PS_TIMEOUT_OTHER; + req->retries = PS_RETRY; -static int pscsi_CDB_none(struct se_task *task) -{ - return pscsi_blk_get_request(task); -} + blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req, + (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG), + pscsi_req_done); -/* pscsi_get_cdb(): - * - * - */ -static unsigned char *pscsi_get_cdb(struct se_task *task) -{ - struct pscsi_plugin_task *pt = PSCSI_TASK(task); + return PYX_TRANSPORT_SENT_TO_TRANSPORT; - return pt->pscsi_cdb; +fail: + while (hbio) { + struct bio *bio = hbio; + hbio = hbio->bi_next; + bio->bi_next = NULL; + bio_endio(bio, 0); /* XXX: should be error */ + } + return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; } /* pscsi_get_sense_buffer(): @@ -1328,23 +1214,13 @@ static void pscsi_req_done(struct request *req, int uptodate) pt->pscsi_resid = req->resid_len; pscsi_process_SAM_status(task, pt); - /* - * Release BIDI-READ if present - */ - if (req->next_rq != NULL) - __blk_put_request(req->q, req->next_rq); - __blk_put_request(req->q, req); - pt->pscsi_req = NULL; } static struct se_subsystem_api pscsi_template = { .name = "pscsi", .owner = THIS_MODULE, .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, - .cdb_none = pscsi_CDB_none, - .map_control_SG = pscsi_map_SG, - .map_data_SG = pscsi_map_SG, .attach_hba = pscsi_attach_hba, .detach_hba = pscsi_detach_hba, .pmode_enable_hba = pscsi_pmode_enable_hba, @@ -1358,7 +1234,6 @@ static struct se_subsystem_api pscsi_template = { .check_configfs_dev_params = pscsi_check_configfs_dev_params, .set_configfs_dev_params = pscsi_set_configfs_dev_params, .show_configfs_dev_params = pscsi_show_configfs_dev_params, - .get_cdb = pscsi_get_cdb, .get_sense_buffer = pscsi_get_sense_buffer, .get_device_rev = pscsi_get_device_rev, .get_device_type = pscsi_get_device_type, diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h index ebf4f1ae2c83..fdc17b6aefb3 100644 --- a/drivers/target/target_core_pscsi.h +++ b/drivers/target/target_core_pscsi.h @@ -27,7 +27,6 @@ struct pscsi_plugin_task { int pscsi_direction; int pscsi_result; u32 pscsi_resid; - struct request *pscsi_req; unsigned char pscsi_cdb[0]; } ____cacheline_aligned; diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index e567e129c697..5158d3846f19 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -27,7 +27,6 @@ * ******************************************************************************/ -#include <linux/version.h> #include <linux/string.h> #include <linux/parser.h> #include <linux/timer.h> @@ -351,7 +350,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) static int rd_MEMCPY_read(struct rd_request *req) { struct se_task *task = &req->rd_task; - struct rd_dev *dev = req->rd_task.se_dev->dev_ptr; + struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr; struct rd_dev_sg_table *table; struct scatterlist *sg_d, *sg_s; void *dst, *src; @@ -467,7 +466,7 @@ static int rd_MEMCPY_read(struct rd_request *req) static int rd_MEMCPY_write(struct rd_request *req) { struct se_task *task = &req->rd_task; - struct rd_dev *dev = req->rd_task.se_dev->dev_ptr; + struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr; struct rd_dev_sg_table *table; struct scatterlist *sg_d, *sg_s; void *dst, *src; @@ -582,7 +581,7 @@ static int rd_MEMCPY_write(struct rd_request *req) */ static int rd_MEMCPY_do_task(struct se_task *task) { - struct se_device *dev = task->se_dev; + struct se_device *dev = task->task_se_cmd->se_dev; struct rd_request *req = RD_REQ(task); unsigned long long lba; int ret; @@ -692,17 +691,6 @@ static ssize_t rd_show_configfs_dev_params( return bl; } -/* rd_get_cdb(): (Part of se_subsystem_api_t template) - * - * - */ -static unsigned char *rd_get_cdb(struct se_task *task) -{ - struct rd_request *req = RD_REQ(task); - - return req->rd_scsi_cdb; -} - static u32 rd_get_device_rev(struct se_device *dev) { return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ @@ -736,7 +724,6 @@ static struct se_subsystem_api rd_mcp_template = { .check_configfs_dev_params = rd_check_configfs_dev_params, .set_configfs_dev_params = rd_set_configfs_dev_params, .show_configfs_dev_params = rd_show_configfs_dev_params, - .get_cdb = rd_get_cdb, .get_device_rev = rd_get_device_rev, .get_device_type = rd_get_device_type, .get_blocks = rd_get_blocks, diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index 0d027732cd00..784e56a04100 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h @@ -22,8 +22,6 @@ void rd_module_exit(void); struct rd_request { struct se_task rd_task; - /* SCSI CDB from iSCSI Command PDU */ - unsigned char rd_scsi_cdb[TCM_MAX_COMMAND_SIZE]; /* Offset from start of page */ u32 rd_offset; /* Starting page in Ramdisk for request */ diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c deleted file mode 100644 index 72843441d4fa..000000000000 --- a/drivers/target/target_core_scdb.c +++ /dev/null @@ -1,105 +0,0 @@ -/******************************************************************************* - * Filename: target_core_scdb.c - * - * This file contains the generic target engine Split CDB related functions. - * - * Copyright (c) 2004-2005 PyX Technologies, Inc. - * Copyright (c) 2005, 2006, 2007 SBE, Inc. - * Copyright (c) 2007-2010 Rising Tide Systems - * Copyright (c) 2008-2010 Linux-iSCSI.org - * - * Nicholas A. Bellinger <nab@kernel.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - ******************************************************************************/ - -#include <linux/net.h> -#include <linux/string.h> -#include <scsi/scsi.h> -#include <asm/unaligned.h> - -#include <target/target_core_base.h> -#include <target/target_core_transport.h> - -#include "target_core_scdb.h" - -/* split_cdb_XX_6(): - * - * 21-bit LBA w/ 8-bit SECTORS - */ -void split_cdb_XX_6( - unsigned long long lba, - u32 sectors, - unsigned char *cdb) -{ - cdb[1] = (lba >> 16) & 0x1f; - cdb[2] = (lba >> 8) & 0xff; - cdb[3] = lba & 0xff; - cdb[4] = sectors & 0xff; -} - -/* split_cdb_XX_10(): - * - * 32-bit LBA w/ 16-bit SECTORS - */ -void split_cdb_XX_10( - unsigned long long lba, - u32 sectors, - unsigned char *cdb) -{ - put_unaligned_be32(lba, &cdb[2]); - put_unaligned_be16(sectors, &cdb[7]); -} - -/* split_cdb_XX_12(): - * - * 32-bit LBA w/ 32-bit SECTORS - */ -void split_cdb_XX_12( - unsigned long long lba, - u32 sectors, - unsigned char *cdb) -{ - put_unaligned_be32(lba, &cdb[2]); - put_unaligned_be32(sectors, &cdb[6]); -} - -/* split_cdb_XX_16(): - * - * 64-bit LBA w/ 32-bit SECTORS - */ -void split_cdb_XX_16( - unsigned long long lba, - u32 sectors, - unsigned char *cdb) -{ - put_unaligned_be64(lba, &cdb[2]); - put_unaligned_be32(sectors, &cdb[10]); -} - -/* - * split_cdb_XX_32(): - * - * 64-bit LBA w/ 32-bit SECTORS such as READ_32, WRITE_32 and emulated XDWRITEREAD_32 - */ -void split_cdb_XX_32( - unsigned long long lba, - u32 sectors, - unsigned char *cdb) -{ - put_unaligned_be64(lba, &cdb[12]); - put_unaligned_be32(sectors, &cdb[28]); -} diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h deleted file mode 100644 index 48e9ccc9585e..000000000000 --- a/drivers/target/target_core_scdb.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef TARGET_CORE_SCDB_H -#define TARGET_CORE_SCDB_H - -extern void split_cdb_XX_6(unsigned long long, u32, unsigned char *); -extern void split_cdb_XX_10(unsigned long long, u32, unsigned char *); -extern void split_cdb_XX_12(unsigned long long, u32, unsigned char *); -extern void split_cdb_XX_16(unsigned long long, u32, unsigned char *); -extern void split_cdb_XX_32(unsigned long long, u32, unsigned char *); - -#endif /* TARGET_CORE_SCDB_H */ diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index a8d6e1dee938..874152aed94a 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -32,7 +32,6 @@ #include <linux/delay.h> #include <linux/timer.h> #include <linux/string.h> -#include <linux/version.h> #include <generated/utsrelease.h> #include <linux/utsname.h> #include <linux/proc_fs.h> diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 27d4925e51c3..570b144a1edb 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -24,7 +24,6 @@ * ******************************************************************************/ -#include <linux/version.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/list.h> @@ -44,12 +43,12 @@ struct se_tmr_req *core_tmr_alloc_req( struct se_cmd *se_cmd, void *fabric_tmr_ptr, - u8 function) + u8 function, + gfp_t gfp_flags) { struct se_tmr_req *tmr; - tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ? - GFP_ATOMIC : GFP_KERNEL); + tmr = kmem_cache_zalloc(se_tmr_req_cache, gfp_flags); if (!tmr) { pr_err("Unable to allocate struct se_tmr_req\n"); return ERR_PTR(-ENOMEM); @@ -67,15 +66,16 @@ void core_tmr_release_req( struct se_tmr_req *tmr) { struct se_device *dev = tmr->tmr_dev; + unsigned long flags; if (!dev) { kmem_cache_free(se_tmr_req_cache, tmr); return; } - spin_lock_irq(&dev->se_tmr_lock); + spin_lock_irqsave(&dev->se_tmr_lock, flags); list_del(&tmr->tmr_list); - spin_unlock_irq(&dev->se_tmr_lock); + spin_unlock_irqrestore(&dev->se_tmr_lock, flags); kmem_cache_free(se_tmr_req_cache, tmr); } @@ -100,54 +100,20 @@ static void core_tmr_handle_tas_abort( transport_cmd_finish_abort(cmd, 0); } -int core_tmr_lun_reset( +static void core_tmr_drain_tmr_list( struct se_device *dev, struct se_tmr_req *tmr, - struct list_head *preempt_and_abort_list, - struct se_cmd *prout_cmd) + struct list_head *preempt_and_abort_list) { - struct se_cmd *cmd, *tcmd; - struct se_node_acl *tmr_nacl = NULL; - struct se_portal_group *tmr_tpg = NULL; - struct se_queue_obj *qobj = &dev->dev_queue_obj; + LIST_HEAD(drain_tmr_list); struct se_tmr_req *tmr_p, *tmr_pp; - struct se_task *task, *task_tmp; + struct se_cmd *cmd; unsigned long flags; - int fe_count, tas; - /* - * TASK_ABORTED status bit, this is configurable via ConfigFS - * struct se_device attributes. spc4r17 section 7.4.6 Control mode page - * - * A task aborted status (TAS) bit set to zero specifies that aborted - * tasks shall be terminated by the device server without any response - * to the application client. A TAS bit set to one specifies that tasks - * aborted by the actions of an I_T nexus other than the I_T nexus on - * which the command was received shall be completed with TASK ABORTED - * status (see SAM-4). - */ - tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; - /* - * Determine if this se_tmr is coming from a $FABRIC_MOD - * or struct se_device passthrough.. - */ - if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { - tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; - tmr_tpg = tmr->task_cmd->se_sess->se_tpg; - if (tmr_nacl && tmr_tpg) { - pr_debug("LUN_RESET: TMR caller fabric: %s" - " initiator port %s\n", - tmr_tpg->se_tpg_tfo->get_fabric_name(), - tmr_nacl->initiatorname); - } - } - pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", - (preempt_and_abort_list) ? "Preempt" : "TMR", - dev->transport->name, tas); /* * Release all pending and outgoing TMRs aside from the received * LUN_RESET tmr.. */ - spin_lock_irq(&dev->se_tmr_lock); + spin_lock_irqsave(&dev->se_tmr_lock, flags); list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { /* * Allow the received TMR to return with FUNCTION_COMPLETE. @@ -169,29 +135,48 @@ int core_tmr_lun_reset( (core_scsi3_check_cdb_abort_and_preempt( preempt_and_abort_list, cmd) != 0)) continue; - spin_unlock_irq(&dev->se_tmr_lock); - spin_lock_irqsave(&cmd->t_state_lock, flags); + spin_lock(&cmd->t_state_lock); if (!atomic_read(&cmd->t_transport_active)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - spin_lock_irq(&dev->se_tmr_lock); + spin_unlock(&cmd->t_state_lock); continue; } if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - spin_lock_irq(&dev->se_tmr_lock); + spin_unlock(&cmd->t_state_lock); continue; } + spin_unlock(&cmd->t_state_lock); + + list_move_tail(&tmr->tmr_list, &drain_tmr_list); + } + spin_unlock_irqrestore(&dev->se_tmr_lock, flags); + + while (!list_empty(&drain_tmr_list)) { + tmr = list_entry(drain_tmr_list.next, struct se_tmr_req, tmr_list); + list_del(&tmr->tmr_list); + cmd = tmr_p->task_cmd; + pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x," " Response: 0x%02x, t_state: %d\n", - (preempt_and_abort_list) ? "Preempt" : "", tmr_p, - tmr_p->function, tmr_p->response, cmd->t_state); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); + (preempt_and_abort_list) ? "Preempt" : "", tmr, + tmr->function, tmr->response, cmd->t_state); - transport_cmd_finish_abort_tmr(cmd); - spin_lock_irq(&dev->se_tmr_lock); + transport_cmd_finish_abort(cmd, 1); } - spin_unlock_irq(&dev->se_tmr_lock); +} + +static void core_tmr_drain_task_list( + struct se_device *dev, + struct se_cmd *prout_cmd, + struct se_node_acl *tmr_nacl, + int tas, + struct list_head *preempt_and_abort_list) +{ + LIST_HEAD(drain_task_list); + struct se_cmd *cmd; + struct se_task *task, *task_tmp; + unsigned long flags; + int fe_count; /* * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status. * This is following sam4r17, section 5.6 Aborting commands, Table 38 @@ -236,18 +221,28 @@ int core_tmr_lun_reset( if (prout_cmd == cmd) continue; - list_del(&task->t_state_list); + list_move_tail(&task->t_state_list, &drain_task_list); atomic_set(&task->task_state_active, 0); - spin_unlock_irqrestore(&dev->execute_task_lock, flags); + /* + * Remove from task execute list before processing drain_task_list + */ + if (!list_empty(&task->t_execute_list)) + __transport_remove_task_from_execute_queue(task, dev); + } + spin_unlock_irqrestore(&dev->execute_task_lock, flags); + + while (!list_empty(&drain_task_list)) { + task = list_entry(drain_task_list.next, struct se_task, t_state_list); + list_del(&task->t_state_list); + cmd = task->task_se_cmd; - spin_lock_irqsave(&cmd->t_state_lock, flags); pr_debug("LUN_RESET: %s cmd: %p task: %p" - " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" - "def_t_state: %d/%d cdb: 0x%02x\n", + " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d" + "cdb: 0x%02x\n", (preempt_and_abort_list) ? "Preempt" : "", cmd, task, cmd->se_tfo->get_task_tag(cmd), 0, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, - cmd->deferred_t_state, cmd->t_task_cdb[0]); + cmd->t_task_cdb[0]); pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" " t_task_cdbs: %d t_task_cdbs_left: %d" " t_task_cdbs_sent: %d -- t_transport_active: %d" @@ -260,35 +255,24 @@ int core_tmr_lun_reset( atomic_read(&cmd->t_transport_stop), atomic_read(&cmd->t_transport_sent)); - if (atomic_read(&task->task_active)) { - atomic_set(&task->task_stop, 1); - spin_unlock_irqrestore( - &cmd->t_state_lock, flags); - - pr_debug("LUN_RESET: Waiting for task: %p to shutdown" - " for dev: %p\n", task, dev); - wait_for_completion(&task->task_stop_comp); - pr_debug("LUN_RESET Completed task: %p shutdown for" - " dev: %p\n", task, dev); - spin_lock_irqsave(&cmd->t_state_lock, flags); - atomic_dec(&cmd->t_task_cdbs_left); - - atomic_set(&task->task_active, 0); - atomic_set(&task->task_stop, 0); - } else { - if (atomic_read(&task->task_execute_queue) != 0) - transport_remove_task_from_execute_queue(task, dev); - } - __transport_stop_task_timer(task, &flags); + /* + * If the command may be queued onto a workqueue cancel it now. + * + * This is equivalent to removal from the execute queue in the + * loop above, but we do it down here given that + * cancel_work_sync may block. + */ + if (cmd->t_state == TRANSPORT_COMPLETE) + cancel_work_sync(&cmd->work); + + spin_lock_irqsave(&cmd->t_state_lock, flags); + target_stop_task(task, &flags); if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { - spin_unlock_irqrestore( - &cmd->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); pr_debug("LUN_RESET: Skipping task: %p, dev: %p for" " t_task_cdbs_ex_left: %d\n", task, dev, atomic_read(&cmd->t_task_cdbs_ex_left)); - - spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } fe_count = atomic_read(&cmd->t_fe_count); @@ -298,22 +282,31 @@ int core_tmr_lun_reset( " task: %p, t_fe_count: %d dev: %p\n", task, fe_count, dev); atomic_set(&cmd->t_transport_aborted, 1); - spin_unlock_irqrestore(&cmd->t_state_lock, - flags); - core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); - spin_lock_irqsave(&dev->execute_task_lock, flags); + core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); continue; } pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p," " t_fe_count: %d dev: %p\n", task, fe_count, dev); atomic_set(&cmd->t_transport_aborted, 1); spin_unlock_irqrestore(&cmd->t_state_lock, flags); - core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); - spin_lock_irqsave(&dev->execute_task_lock, flags); + core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); } - spin_unlock_irqrestore(&dev->execute_task_lock, flags); +} + +static void core_tmr_drain_cmd_list( + struct se_device *dev, + struct se_cmd *prout_cmd, + struct se_node_acl *tmr_nacl, + int tas, + struct list_head *preempt_and_abort_list) +{ + LIST_HEAD(drain_cmd_list); + struct se_queue_obj *qobj = &dev->dev_queue_obj; + struct se_cmd *cmd, *tcmd; + unsigned long flags; /* * Release all commands remaining in the struct se_device cmd queue. * @@ -337,11 +330,26 @@ int core_tmr_lun_reset( */ if (prout_cmd == cmd) continue; + /* + * Skip direct processing of TRANSPORT_FREE_CMD_INTR for + * HW target mode fabrics. + */ + spin_lock(&cmd->t_state_lock); + if (cmd->t_state == TRANSPORT_FREE_CMD_INTR) { + spin_unlock(&cmd->t_state_lock); + continue; + } + spin_unlock(&cmd->t_state_lock); - atomic_dec(&cmd->t_transport_queue_active); + atomic_set(&cmd->t_transport_queue_active, 0); atomic_dec(&qobj->queue_cnt); - list_del(&cmd->se_queue_node); - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + list_move_tail(&cmd->se_queue_node, &drain_cmd_list); + } + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + + while (!list_empty(&drain_cmd_list)) { + cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node); + list_del_init(&cmd->se_queue_node); pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:" " %d t_fe_count: %d\n", (preempt_and_abort_list) ? @@ -354,9 +362,53 @@ int core_tmr_lun_reset( core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, atomic_read(&cmd->t_fe_count)); - spin_lock_irqsave(&qobj->cmd_queue_lock, flags); } - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); +} + +int core_tmr_lun_reset( + struct se_device *dev, + struct se_tmr_req *tmr, + struct list_head *preempt_and_abort_list, + struct se_cmd *prout_cmd) +{ + struct se_node_acl *tmr_nacl = NULL; + struct se_portal_group *tmr_tpg = NULL; + int tas; + /* + * TASK_ABORTED status bit, this is configurable via ConfigFS + * struct se_device attributes. spc4r17 section 7.4.6 Control mode page + * + * A task aborted status (TAS) bit set to zero specifies that aborted + * tasks shall be terminated by the device server without any response + * to the application client. A TAS bit set to one specifies that tasks + * aborted by the actions of an I_T nexus other than the I_T nexus on + * which the command was received shall be completed with TASK ABORTED + * status (see SAM-4). + */ + tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; + /* + * Determine if this se_tmr is coming from a $FABRIC_MOD + * or struct se_device passthrough.. + */ + if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { + tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; + tmr_tpg = tmr->task_cmd->se_sess->se_tpg; + if (tmr_nacl && tmr_tpg) { + pr_debug("LUN_RESET: TMR caller fabric: %s" + " initiator port %s\n", + tmr_tpg->se_tpg_tfo->get_fabric_name(), + tmr_nacl->initiatorname); + } + } + pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", + (preempt_and_abort_list) ? "Preempt" : "TMR", + dev->transport->name, tas); + + core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); + core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas, + preempt_and_abort_list); + core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas, + preempt_and_abort_list); /* * Clear any legacy SPC-2 reservation when called during * LOGICAL UNIT RESET @@ -379,3 +431,4 @@ int core_tmr_lun_reset( dev->transport->name); return 0; } + diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index a4b0a8d27f25..d75255804481 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -26,7 +26,6 @@ * ******************************************************************************/ -#include <linux/version.h> #include <linux/net.h> #include <linux/delay.h> #include <linux/string.h> @@ -55,11 +54,11 @@ #include "target_core_alua.h" #include "target_core_hba.h" #include "target_core_pr.h" -#include "target_core_scdb.h" #include "target_core_ua.h" static int sub_api_initialized; +static struct workqueue_struct *target_completion_wq; static struct kmem_cache *se_cmd_cache; static struct kmem_cache *se_sess_cache; struct kmem_cache *se_tmr_req_cache; @@ -70,30 +69,19 @@ struct kmem_cache *t10_alua_lu_gp_mem_cache; struct kmem_cache *t10_alua_tg_pt_gp_cache; struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; -/* Used for transport_dev_get_map_*() */ -typedef int (*map_func_t)(struct se_task *, u32); - static int transport_generic_write_pending(struct se_cmd *); static int transport_processing_thread(void *param); static int __transport_execute_tasks(struct se_device *dev); static void transport_complete_task_attr(struct se_cmd *cmd); -static int transport_complete_qf(struct se_cmd *cmd); static void transport_handle_queue_full(struct se_cmd *cmd, - struct se_device *dev, int (*qf_callback)(struct se_cmd *)); -static void transport_direct_request_timeout(struct se_cmd *cmd); + struct se_device *dev); static void transport_free_dev_tasks(struct se_cmd *cmd); -static u32 transport_allocate_tasks(struct se_cmd *cmd, - unsigned long long starting_lba, - enum dma_data_direction data_direction, - struct scatterlist *sgl, unsigned int nents); static int transport_generic_get_mem(struct se_cmd *cmd); -static int transport_generic_remove(struct se_cmd *cmd, - int session_reinstatement); -static void transport_release_fe_cmd(struct se_cmd *cmd); -static void transport_remove_cmd_from_queue(struct se_cmd *cmd, - struct se_queue_obj *qobj); +static void transport_put_cmd(struct se_cmd *cmd); +static void transport_remove_cmd_from_queue(struct se_cmd *cmd); static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); -static void transport_stop_all_task_timers(struct se_cmd *cmd); +static void transport_generic_request_failure(struct se_cmd *, int, int); +static void target_complete_ok_work(struct work_struct *work); int init_se_kmem_caches(void) { @@ -109,7 +97,7 @@ int init_se_kmem_caches(void) if (!se_tmr_req_cache) { pr_err("kmem_cache_create() for struct se_tmr_req" " failed\n"); - goto out; + goto out_free_cmd_cache; } se_sess_cache = kmem_cache_create("se_sess_cache", sizeof(struct se_session), __alignof__(struct se_session), @@ -117,14 +105,14 @@ int init_se_kmem_caches(void) if (!se_sess_cache) { pr_err("kmem_cache_create() for struct se_session" " failed\n"); - goto out; + goto out_free_tmr_req_cache; } se_ua_cache = kmem_cache_create("se_ua_cache", sizeof(struct se_ua), __alignof__(struct se_ua), 0, NULL); if (!se_ua_cache) { pr_err("kmem_cache_create() for struct se_ua failed\n"); - goto out; + goto out_free_sess_cache; } t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", sizeof(struct t10_pr_registration), @@ -132,7 +120,7 @@ int init_se_kmem_caches(void) if (!t10_pr_reg_cache) { pr_err("kmem_cache_create() for struct t10_pr_registration" " failed\n"); - goto out; + goto out_free_ua_cache; } t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), @@ -140,7 +128,7 @@ int init_se_kmem_caches(void) if (!t10_alua_lu_gp_cache) { pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" " failed\n"); - goto out; + goto out_free_pr_reg_cache; } t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", sizeof(struct t10_alua_lu_gp_member), @@ -148,7 +136,7 @@ int init_se_kmem_caches(void) if (!t10_alua_lu_gp_mem_cache) { pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" "cache failed\n"); - goto out; + goto out_free_lu_gp_cache; } t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", sizeof(struct t10_alua_tg_pt_gp), @@ -156,7 +144,7 @@ int init_se_kmem_caches(void) if (!t10_alua_tg_pt_gp_cache) { pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" "cache failed\n"); - goto out; + goto out_free_lu_gp_mem_cache; } t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( "t10_alua_tg_pt_gp_mem_cache", @@ -166,34 +154,41 @@ int init_se_kmem_caches(void) if (!t10_alua_tg_pt_gp_mem_cache) { pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" "mem_t failed\n"); - goto out; + goto out_free_tg_pt_gp_cache; } + target_completion_wq = alloc_workqueue("target_completion", + WQ_MEM_RECLAIM, 0); + if (!target_completion_wq) + goto out_free_tg_pt_gp_mem_cache; + return 0; + +out_free_tg_pt_gp_mem_cache: + kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); +out_free_tg_pt_gp_cache: + kmem_cache_destroy(t10_alua_tg_pt_gp_cache); +out_free_lu_gp_mem_cache: + kmem_cache_destroy(t10_alua_lu_gp_mem_cache); +out_free_lu_gp_cache: + kmem_cache_destroy(t10_alua_lu_gp_cache); +out_free_pr_reg_cache: + kmem_cache_destroy(t10_pr_reg_cache); +out_free_ua_cache: + kmem_cache_destroy(se_ua_cache); +out_free_sess_cache: + kmem_cache_destroy(se_sess_cache); +out_free_tmr_req_cache: + kmem_cache_destroy(se_tmr_req_cache); +out_free_cmd_cache: + kmem_cache_destroy(se_cmd_cache); out: - if (se_cmd_cache) - kmem_cache_destroy(se_cmd_cache); - if (se_tmr_req_cache) - kmem_cache_destroy(se_tmr_req_cache); - if (se_sess_cache) - kmem_cache_destroy(se_sess_cache); - if (se_ua_cache) - kmem_cache_destroy(se_ua_cache); - if (t10_pr_reg_cache) - kmem_cache_destroy(t10_pr_reg_cache); - if (t10_alua_lu_gp_cache) - kmem_cache_destroy(t10_alua_lu_gp_cache); - if (t10_alua_lu_gp_mem_cache) - kmem_cache_destroy(t10_alua_lu_gp_mem_cache); - if (t10_alua_tg_pt_gp_cache) - kmem_cache_destroy(t10_alua_tg_pt_gp_cache); - if (t10_alua_tg_pt_gp_mem_cache) - kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); return -ENOMEM; } void release_se_kmem_caches(void) { + destroy_workqueue(target_completion_wq); kmem_cache_destroy(se_cmd_cache); kmem_cache_destroy(se_tmr_req_cache); kmem_cache_destroy(se_sess_cache); @@ -234,10 +229,13 @@ void transport_init_queue_obj(struct se_queue_obj *qobj) } EXPORT_SYMBOL(transport_init_queue_obj); -static int transport_subsystem_reqmods(void) +void transport_subsystem_check_init(void) { int ret; + if (sub_api_initialized) + return; + ret = request_module("target_core_iblock"); if (ret != 0) pr_err("Unable to load target_core_iblock\n"); @@ -254,24 +252,8 @@ static int transport_subsystem_reqmods(void) if (ret != 0) pr_err("Unable to load target_core_stgt\n"); - return 0; -} - -int transport_subsystem_check_init(void) -{ - int ret; - - if (sub_api_initialized) - return 0; - /* - * Request the loading of known TCM subsystem plugins.. - */ - ret = transport_subsystem_reqmods(); - if (ret < 0) - return ret; - sub_api_initialized = 1; - return 0; + return; } struct se_session *transport_init_session(void) @@ -438,16 +420,15 @@ EXPORT_SYMBOL(transport_deregister_session); */ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) { - struct se_device *dev; + struct se_device *dev = cmd->se_dev; struct se_task *task; unsigned long flags; - list_for_each_entry(task, &cmd->t_task_list, t_list) { - dev = task->se_dev; - if (!dev) - continue; + if (!dev) + return; - if (atomic_read(&task->task_active)) + list_for_each_entry(task, &cmd->t_task_list, t_list) { + if (task->task_flags & TF_ACTIVE) continue; if (!atomic_read(&task->task_state_active)) @@ -489,8 +470,6 @@ static int transport_cmd_check_stop( " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); - cmd->deferred_t_state = cmd->t_state; - cmd->t_state = TRANSPORT_DEFERRED_CMD; atomic_set(&cmd->t_transport_active, 0); if (transport_off == 2) transport_all_task_dev_remove_state(cmd); @@ -508,8 +487,6 @@ static int transport_cmd_check_stop( " TRUE for ITT: 0x%08x\n", __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); - cmd->deferred_t_state = cmd->t_state; - cmd->t_state = TRANSPORT_DEFERRED_CMD; if (transport_off == 2) transport_all_task_dev_remove_state(cmd); @@ -594,35 +571,24 @@ check_lun: void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) { - transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); - transport_lun_remove_cmd(cmd); + if (!cmd->se_tmr_req) + transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop_to_fabric(cmd)) return; - if (remove) - transport_generic_remove(cmd, 0); -} - -void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) -{ - transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); - - if (transport_cmd_check_stop_to_fabric(cmd)) - return; - - transport_generic_remove(cmd, 0); + if (remove) { + transport_remove_cmd_from_queue(cmd); + transport_put_cmd(cmd); + } } -static void transport_add_cmd_to_queue( - struct se_cmd *cmd, - int t_state) +static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state, + bool at_head) { struct se_device *dev = cmd->se_dev; struct se_queue_obj *qobj = &dev->dev_queue_obj; unsigned long flags; - INIT_LIST_HEAD(&cmd->se_queue_node); - if (t_state) { spin_lock_irqsave(&cmd->t_state_lock, flags); cmd->t_state = t_state; @@ -631,15 +597,20 @@ static void transport_add_cmd_to_queue( } spin_lock_irqsave(&qobj->cmd_queue_lock, flags); - if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) { - cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL; + + /* If the cmd is already on the list, remove it before we add it */ + if (!list_empty(&cmd->se_queue_node)) + list_del(&cmd->se_queue_node); + else + atomic_inc(&qobj->queue_cnt); + + if (at_head) list_add(&cmd->se_queue_node, &qobj->qobj_list); - } else + else list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); - atomic_inc(&cmd->t_transport_queue_active); + atomic_set(&cmd->t_transport_queue_active, 1); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - atomic_inc(&qobj->queue_cnt); wake_up_interruptible(&qobj->thread_wq); } @@ -656,19 +627,18 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj) } cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); - atomic_dec(&cmd->t_transport_queue_active); + atomic_set(&cmd->t_transport_queue_active, 0); - list_del(&cmd->se_queue_node); + list_del_init(&cmd->se_queue_node); atomic_dec(&qobj->queue_cnt); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); return cmd; } -static void transport_remove_cmd_from_queue(struct se_cmd *cmd, - struct se_queue_obj *qobj) +static void transport_remove_cmd_from_queue(struct se_cmd *cmd) { - struct se_cmd *t; + struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj; unsigned long flags; spin_lock_irqsave(&qobj->cmd_queue_lock, flags); @@ -676,14 +646,9 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd, spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); return; } - - list_for_each_entry(t, &qobj->qobj_list, se_queue_node) - if (t == cmd) { - atomic_dec(&cmd->t_transport_queue_active); - atomic_dec(&qobj->queue_cnt); - list_del(&cmd->se_queue_node); - break; - } + atomic_set(&cmd->t_transport_queue_active, 0); + atomic_dec(&qobj->queue_cnt); + list_del_init(&cmd->se_queue_node); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); if (atomic_read(&cmd->t_transport_queue_active)) { @@ -716,6 +681,13 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good) } EXPORT_SYMBOL(transport_complete_sync_cache); +static void target_complete_failure_work(struct work_struct *work) +{ + struct se_cmd *cmd = container_of(work, struct se_cmd, work); + + transport_generic_request_failure(cmd, 1, 1); +} + /* transport_complete_task(): * * Called from interrupt and non interrupt context depending @@ -724,8 +696,7 @@ EXPORT_SYMBOL(transport_complete_sync_cache); void transport_complete_task(struct se_task *task, int success) { struct se_cmd *cmd = task->task_se_cmd; - struct se_device *dev = task->se_dev; - int t_state; + struct se_device *dev = cmd->se_dev; unsigned long flags; #if 0 pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task, @@ -735,7 +706,7 @@ void transport_complete_task(struct se_task *task, int success) atomic_inc(&dev->depth_left); spin_lock_irqsave(&cmd->t_state_lock, flags); - atomic_set(&task->task_active, 0); + task->task_flags &= ~TF_ACTIVE; /* * See if any sense data exists, if so set the TASK_SENSE flag. @@ -754,68 +725,39 @@ void transport_complete_task(struct se_task *task, int success) * See if we are waiting for outstanding struct se_task * to complete for an exception condition */ - if (atomic_read(&task->task_stop)) { - /* - * Decrement cmd->t_se_count if this task had - * previously thrown its timeout exception handler. - */ - if (atomic_read(&task->task_timeout)) { - atomic_dec(&cmd->t_se_count); - atomic_set(&task->task_timeout, 0); - } + if (task->task_flags & TF_REQUEST_STOP) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&task->task_stop_comp); return; } /* - * If the task's timeout handler has fired, use the t_task_cdbs_timeout - * left counter to determine when the struct se_cmd is ready to be queued to - * the processing thread. - */ - if (atomic_read(&task->task_timeout)) { - if (!atomic_dec_and_test( - &cmd->t_task_cdbs_timeout_left)) { - spin_unlock_irqrestore(&cmd->t_state_lock, - flags); - return; - } - t_state = TRANSPORT_COMPLETE_TIMEOUT; - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - transport_add_cmd_to_queue(cmd, t_state); - return; - } - atomic_dec(&cmd->t_task_cdbs_timeout_left); - - /* * Decrement the outstanding t_task_cdbs_left count. The last * struct se_task from struct se_cmd will complete itself into the * device queue depending upon int success. */ if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { - if (!success) - cmd->t_tasks_failed = 1; - spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } if (!success || cmd->t_tasks_failed) { - t_state = TRANSPORT_COMPLETE_FAILURE; if (!task->task_error_status) { task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; cmd->transport_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } + INIT_WORK(&cmd->work, target_complete_failure_work); } else { atomic_set(&cmd->t_transport_complete, 1); - t_state = TRANSPORT_COMPLETE_OK; + INIT_WORK(&cmd->work, target_complete_ok_work); } + + cmd->t_state = TRANSPORT_COMPLETE; + atomic_set(&cmd->t_transport_active, 1); spin_unlock_irqrestore(&cmd->t_state_lock, flags); - transport_add_cmd_to_queue(cmd, t_state); + queue_work(target_completion_wq, &cmd->work); } EXPORT_SYMBOL(transport_complete_task); @@ -902,14 +844,12 @@ static void __transport_add_task_to_execute_queue( static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) { - struct se_device *dev; + struct se_device *dev = cmd->se_dev; struct se_task *task; unsigned long flags; spin_lock_irqsave(&cmd->t_state_lock, flags); list_for_each_entry(task, &cmd->t_task_list, t_list) { - dev = task->se_dev; - if (atomic_read(&task->task_state_active)) continue; @@ -934,38 +874,36 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd) spin_lock_irqsave(&dev->execute_task_lock, flags); list_for_each_entry(task, &cmd->t_task_list, t_list) { - if (atomic_read(&task->task_execute_queue)) + if (!list_empty(&task->t_execute_list)) continue; /* * __transport_add_task_to_execute_queue() handles the * SAM Task Attribute emulation if enabled */ __transport_add_task_to_execute_queue(task, task_prev, dev); - atomic_set(&task->task_execute_queue, 1); task_prev = task; } spin_unlock_irqrestore(&dev->execute_task_lock, flags); } -/* transport_remove_task_from_execute_queue(): - * - * - */ +void __transport_remove_task_from_execute_queue(struct se_task *task, + struct se_device *dev) +{ + list_del_init(&task->t_execute_list); + atomic_dec(&dev->execute_tasks); +} + void transport_remove_task_from_execute_queue( struct se_task *task, struct se_device *dev) { unsigned long flags; - if (atomic_read(&task->task_execute_queue) == 0) { - dump_stack(); + if (WARN_ON(list_empty(&task->t_execute_list))) return; - } spin_lock_irqsave(&dev->execute_task_lock, flags); - list_del(&task->t_execute_list); - atomic_set(&task->task_execute_queue, 0); - atomic_dec(&dev->execute_tasks); + __transport_remove_task_from_execute_queue(task, dev); spin_unlock_irqrestore(&dev->execute_task_lock, flags); } @@ -991,14 +929,11 @@ static void target_qf_do_work(struct work_struct *work) pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, - (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" : + (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" : "UNKNOWN"); - /* - * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd - * has been added to head of queue - */ - transport_add_cmd_to_queue(cmd, cmd->t_state); + + transport_add_cmd_to_queue(cmd, cmd->t_state, true); } } @@ -1053,41 +988,6 @@ void transport_dump_dev_state( *bl += sprintf(b + *bl, " "); } -/* transport_release_all_cmds(): - * - * - */ -static void transport_release_all_cmds(struct se_device *dev) -{ - struct se_cmd *cmd, *tcmd; - int bug_out = 0, t_state; - unsigned long flags; - - spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); - list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list, - se_queue_node) { - t_state = cmd->t_state; - list_del(&cmd->se_queue_node); - spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, - flags); - - pr_err("Releasing ITT: 0x%08x, i_state: %u," - " t_state: %u directly\n", - cmd->se_tfo->get_task_tag(cmd), - cmd->se_tfo->get_cmd_state(cmd), t_state); - - transport_release_fe_cmd(cmd); - bug_out = 1; - - spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); - } - spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); -#if 0 - if (bug_out) - BUG(); -#endif -} - void transport_dump_vpd_proto_id( struct t10_vpd *vpd, unsigned char *p_buf, @@ -1573,7 +1473,6 @@ transport_generic_get_task(struct se_cmd *cmd, INIT_LIST_HEAD(&task->t_state_list); init_completion(&task->task_stop_comp); task->task_se_cmd = cmd; - task->se_dev = dev; task->task_data_direction = data_direction; return task; @@ -1598,6 +1497,7 @@ void transport_init_se_cmd( INIT_LIST_HEAD(&cmd->se_delayed_node); INIT_LIST_HEAD(&cmd->se_ordered_node); INIT_LIST_HEAD(&cmd->se_qf_node); + INIT_LIST_HEAD(&cmd->se_queue_node); INIT_LIST_HEAD(&cmd->t_task_list); init_completion(&cmd->transport_lun_fe_stop_comp); @@ -1641,21 +1541,6 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd) return 0; } -void transport_free_se_cmd( - struct se_cmd *se_cmd) -{ - if (se_cmd->se_tmr_req) - core_tmr_release_req(se_cmd->se_tmr_req); - /* - * Check and free any extended CDB buffer that was allocated - */ - if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb) - kfree(se_cmd->t_task_cdb); -} -EXPORT_SYMBOL(transport_free_se_cmd); - -static void transport_generic_wait_for_tasks(struct se_cmd *, int, int); - /* transport_generic_allocate_tasks(): * * Called from fabric RX Thread. @@ -1667,12 +1552,6 @@ int transport_generic_allocate_tasks( int ret; transport_generic_prepare_cdb(cdb); - - /* - * This is needed for early exceptions. - */ - cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; - /* * Ensure that the received CDB is less than the max (252 + 8) bytes * for VARIABLE_LENGTH_CMD @@ -1730,26 +1609,6 @@ int transport_generic_allocate_tasks( EXPORT_SYMBOL(transport_generic_allocate_tasks); /* - * Used by fabric module frontends not defining a TFO->new_cmd_map() - * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis - */ -int transport_generic_handle_cdb( - struct se_cmd *cmd) -{ - if (!cmd->se_lun) { - dump_stack(); - pr_err("cmd->se_lun is NULL\n"); - return -EINVAL; - } - - transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); - return 0; -} -EXPORT_SYMBOL(transport_generic_handle_cdb); - -static void transport_generic_request_failure(struct se_cmd *, - struct se_device *, int, int); -/* * Used by fabric module frontends to queue tasks directly. * Many only be used from process context only */ @@ -1773,7 +1632,7 @@ int transport_handle_cdb_direct( * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue() * in existing usage to ensure that outstanding descriptors are handled - * correctly during shutdown via transport_generic_wait_for_tasks() + * correctly during shutdown via transport_wait_for_tasks() * * Also, we don't take cmd->t_state_lock here as we only expect * this to be called for initial descriptor submission. @@ -1790,7 +1649,7 @@ int transport_handle_cdb_direct( return 0; else if (ret < 0) { cmd->transport_error_status = ret; - transport_generic_request_failure(cmd, NULL, 0, + transport_generic_request_failure(cmd, 0, (cmd->data_direction != DMA_TO_DEVICE)); } return 0; @@ -1811,7 +1670,7 @@ int transport_generic_handle_cdb_map( return -EINVAL; } - transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); + transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false); return 0; } EXPORT_SYMBOL(transport_generic_handle_cdb_map); @@ -1841,7 +1700,7 @@ int transport_generic_handle_data( if (transport_check_aborted_status(cmd, 1) != 0) return 0; - transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE); + transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false); return 0; } EXPORT_SYMBOL(transport_generic_handle_data); @@ -1853,12 +1712,7 @@ EXPORT_SYMBOL(transport_generic_handle_data); int transport_generic_handle_tmr( struct se_cmd *cmd) { - /* - * This is needed for early exceptions. - */ - cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; - - transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); + transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false); return 0; } EXPORT_SYMBOL(transport_generic_handle_tmr); @@ -1866,10 +1720,36 @@ EXPORT_SYMBOL(transport_generic_handle_tmr); void transport_generic_free_cmd_intr( struct se_cmd *cmd) { - transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR); + transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR, false); } EXPORT_SYMBOL(transport_generic_free_cmd_intr); +/* + * If the task is active, request it to be stopped and sleep until it + * has completed. + */ +bool target_stop_task(struct se_task *task, unsigned long *flags) +{ + struct se_cmd *cmd = task->task_se_cmd; + bool was_active = false; + + if (task->task_flags & TF_ACTIVE) { + task->task_flags |= TF_REQUEST_STOP; + spin_unlock_irqrestore(&cmd->t_state_lock, *flags); + + pr_debug("Task %p waiting to complete\n", task); + wait_for_completion(&task->task_stop_comp); + pr_debug("Task %p stopped successfully\n", task); + + spin_lock_irqsave(&cmd->t_state_lock, *flags); + atomic_dec(&cmd->t_task_cdbs_left); + task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP); + was_active = true; + } + + return was_active; +} + static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) { struct se_task *task, *task_tmp; @@ -1885,51 +1765,26 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) spin_lock_irqsave(&cmd->t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, &cmd->t_task_list, t_list) { - pr_debug("task_no[%d] - Processing task %p\n", - task->task_no, task); + pr_debug("Processing task %p\n", task); /* * If the struct se_task has not been sent and is not active, * remove the struct se_task from the execution queue. */ - if (!atomic_read(&task->task_sent) && - !atomic_read(&task->task_active)) { + if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_remove_task_from_execute_queue(task, - task->se_dev); + cmd->se_dev); - pr_debug("task_no[%d] - Removed from execute queue\n", - task->task_no); + pr_debug("Task %p removed from execute queue\n", task); spin_lock_irqsave(&cmd->t_state_lock, flags); continue; } - /* - * If the struct se_task is active, sleep until it is returned - * from the plugin. - */ - if (atomic_read(&task->task_active)) { - atomic_set(&task->task_stop, 1); - spin_unlock_irqrestore(&cmd->t_state_lock, - flags); - - pr_debug("task_no[%d] - Waiting to complete\n", - task->task_no); - wait_for_completion(&task->task_stop_comp); - pr_debug("task_no[%d] - Stopped successfully\n", - task->task_no); - - spin_lock_irqsave(&cmd->t_state_lock, flags); - atomic_dec(&cmd->t_task_cdbs_left); - - atomic_set(&task->task_active, 0); - atomic_set(&task->task_stop, 0); - } else { - pr_debug("task_no[%d] - Did nothing\n", task->task_no); + if (!target_stop_task(task, &flags)) { + pr_debug("Task %p - did nothing\n", task); ret++; } - - __transport_stop_task_timer(task, &flags); } spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -1941,7 +1796,6 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) */ static void transport_generic_request_failure( struct se_cmd *cmd, - struct se_device *dev, int complete, int sc) { @@ -1950,10 +1804,9 @@ static void transport_generic_request_failure( pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), cmd->t_task_cdb[0]); - pr_debug("-----[ i_state: %d t_state/def_t_state:" - " %d/%d transport_error_status: %d\n", + pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n", cmd->se_tfo->get_cmd_state(cmd), - cmd->t_state, cmd->deferred_t_state, + cmd->t_state, cmd->transport_error_status); pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" @@ -1966,10 +1819,6 @@ static void transport_generic_request_failure( atomic_read(&cmd->t_transport_stop), atomic_read(&cmd->t_transport_sent)); - transport_stop_all_task_timers(cmd); - - if (dev) - atomic_inc(&dev->depth_left); /* * For SAM Task Attribute emulation for failed struct se_cmd */ @@ -1977,7 +1826,6 @@ static void transport_generic_request_failure( transport_complete_task_attr(cmd); if (complete) { - transport_direct_request_timeout(cmd); cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; } @@ -2076,46 +1924,8 @@ check_stop: return; queue_full: - cmd->t_state = TRANSPORT_COMPLETE_OK; - transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); -} - -static void transport_direct_request_timeout(struct se_cmd *cmd) -{ - unsigned long flags; - - spin_lock_irqsave(&cmd->t_state_lock, flags); - if (!atomic_read(&cmd->t_transport_timeout)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return; - } - if (atomic_read(&cmd->t_task_cdbs_timeout_left)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return; - } - - atomic_sub(atomic_read(&cmd->t_transport_timeout), - &cmd->t_se_count); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); -} - -static void transport_generic_request_timeout(struct se_cmd *cmd) -{ - unsigned long flags; - - /* - * Reset cmd->t_se_count to allow transport_generic_remove() - * to allow last call to free memory resources. - */ - spin_lock_irqsave(&cmd->t_state_lock, flags); - if (atomic_read(&cmd->t_transport_timeout) > 1) { - int tmp = (atomic_read(&cmd->t_transport_timeout) - 1); - - atomic_sub(tmp, &cmd->t_se_count); - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - transport_generic_remove(cmd, 0); + cmd->t_state = TRANSPORT_COMPLETE_QF_OK; + transport_handle_queue_full(cmd, cmd->se_dev); } static inline u32 transport_lba_21(unsigned char *cdb) @@ -2160,127 +1970,6 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); } -/* - * Called from interrupt context. - */ -static void transport_task_timeout_handler(unsigned long data) -{ - struct se_task *task = (struct se_task *)data; - struct se_cmd *cmd = task->task_se_cmd; - unsigned long flags; - - pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd); - - spin_lock_irqsave(&cmd->t_state_lock, flags); - if (task->task_flags & TF_STOP) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return; - } - task->task_flags &= ~TF_RUNNING; - - /* - * Determine if transport_complete_task() has already been called. - */ - if (!atomic_read(&task->task_active)) { - pr_debug("transport task: %p cmd: %p timeout task_active" - " == 0\n", task, cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return; - } - - atomic_inc(&cmd->t_se_count); - atomic_inc(&cmd->t_transport_timeout); - cmd->t_tasks_failed = 1; - - atomic_set(&task->task_timeout, 1); - task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; - task->task_scsi_status = 1; - - if (atomic_read(&task->task_stop)) { - pr_debug("transport task: %p cmd: %p timeout task_stop" - " == 1\n", task, cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&task->task_stop_comp); - return; - } - - if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { - pr_debug("transport task: %p cmd: %p timeout non zero" - " t_task_cdbs_left\n", task, cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return; - } - pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", - task, cmd); - - cmd->t_state = TRANSPORT_COMPLETE_FAILURE; - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); -} - -/* - * Called with cmd->t_state_lock held. - */ -static void transport_start_task_timer(struct se_task *task) -{ - struct se_device *dev = task->se_dev; - int timeout; - - if (task->task_flags & TF_RUNNING) - return; - /* - * If the task_timeout is disabled, exit now. - */ - timeout = dev->se_sub_dev->se_dev_attrib.task_timeout; - if (!timeout) - return; - - init_timer(&task->task_timer); - task->task_timer.expires = (get_jiffies_64() + timeout * HZ); - task->task_timer.data = (unsigned long) task; - task->task_timer.function = transport_task_timeout_handler; - - task->task_flags |= TF_RUNNING; - add_timer(&task->task_timer); -#if 0 - pr_debug("Starting task timer for cmd: %p task: %p seconds:" - " %d\n", task->task_se_cmd, task, timeout); -#endif -} - -/* - * Called with spin_lock_irq(&cmd->t_state_lock) held. - */ -void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) -{ - struct se_cmd *cmd = task->task_se_cmd; - - if (!task->task_flags & TF_RUNNING) - return; - - task->task_flags |= TF_STOP; - spin_unlock_irqrestore(&cmd->t_state_lock, *flags); - - del_timer_sync(&task->task_timer); - - spin_lock_irqsave(&cmd->t_state_lock, *flags); - task->task_flags &= ~TF_RUNNING; - task->task_flags &= ~TF_STOP; -} - -static void transport_stop_all_task_timers(struct se_cmd *cmd) -{ - struct se_task *task = NULL, *task_tmp; - unsigned long flags; - - spin_lock_irqsave(&cmd->t_state_lock, flags); - list_for_each_entry_safe(task, task_tmp, - &cmd->t_task_list, t_list) - __transport_stop_task_timer(task, &flags); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); -} - static inline int transport_tcq_window_closed(struct se_device *dev) { if (dev->dev_tcq_window_closed++ < @@ -2385,7 +2074,7 @@ static int transport_execute_tasks(struct se_cmd *cmd) if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; - transport_generic_request_failure(cmd, NULL, 0, 1); + transport_generic_request_failure(cmd, 0, 1); return 0; } @@ -2448,9 +2137,7 @@ check_depth: } task = list_first_entry(&dev->execute_task_list, struct se_task, t_execute_list); - list_del(&task->t_execute_list); - atomic_set(&task->task_execute_queue, 0); - atomic_dec(&dev->execute_tasks); + __transport_remove_task_from_execute_queue(task, dev); spin_unlock_irq(&dev->execute_task_lock); atomic_dec(&dev->depth_left); @@ -2458,15 +2145,13 @@ check_depth: cmd = task->task_se_cmd; spin_lock_irqsave(&cmd->t_state_lock, flags); - atomic_set(&task->task_active, 1); - atomic_set(&task->task_sent, 1); + task->task_flags |= (TF_ACTIVE | TF_SENT); atomic_inc(&cmd->t_task_cdbs_sent); if (atomic_read(&cmd->t_task_cdbs_sent) == cmd->t_task_list_num) - atomic_set(&cmd->transport_sent, 1); + atomic_set(&cmd->t_transport_sent, 1); - transport_start_task_timer(task); spin_unlock_irqrestore(&cmd->t_state_lock, flags); /* * The struct se_cmd->transport_emulate_cdb() function pointer is used @@ -2477,10 +2162,13 @@ check_depth: error = cmd->transport_emulate_cdb(cmd); if (error != 0) { cmd->transport_error_status = error; - atomic_set(&task->task_active, 0); - atomic_set(&cmd->transport_sent, 0); + spin_lock_irqsave(&cmd->t_state_lock, flags); + task->task_flags &= ~TF_ACTIVE; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + atomic_set(&cmd->t_transport_sent, 0); transport_stop_tasks_for_cmd(cmd); - transport_generic_request_failure(cmd, dev, 0, 1); + atomic_inc(&dev->depth_left); + transport_generic_request_failure(cmd, 0, 1); goto check_depth; } /* @@ -2513,10 +2201,13 @@ check_depth: if (error != 0) { cmd->transport_error_status = error; - atomic_set(&task->task_active, 0); - atomic_set(&cmd->transport_sent, 0); + spin_lock_irqsave(&cmd->t_state_lock, flags); + task->task_flags &= ~TF_ACTIVE; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + atomic_set(&cmd->t_transport_sent, 0); transport_stop_tasks_for_cmd(cmd); - transport_generic_request_failure(cmd, dev, 0, 1); + atomic_inc(&dev->depth_left); + transport_generic_request_failure(cmd, 0, 1); } } @@ -2538,8 +2229,6 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd) spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); } -static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); - static inline u32 transport_get_sectors_6( unsigned char *cdb, struct se_cmd *cmd, @@ -2752,13 +2441,16 @@ out: static int transport_get_sense_data(struct se_cmd *cmd) { unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; - struct se_device *dev; + struct se_device *dev = cmd->se_dev; struct se_task *task = NULL, *task_tmp; unsigned long flags; u32 offset = 0; WARN_ON(!cmd->se_lun); + if (!dev) + return 0; + spin_lock_irqsave(&cmd->t_state_lock, flags); if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -2767,14 +2459,9 @@ static int transport_get_sense_data(struct se_cmd *cmd) list_for_each_entry_safe(task, task_tmp, &cmd->t_task_list, t_list) { - if (!task->task_sense) continue; - dev = task->se_dev; - if (!dev) - continue; - if (!dev->transport->get_sense_buffer) { pr_err("dev->transport->get_sense_buffer" " is NULL\n"); @@ -2783,9 +2470,9 @@ static int transport_get_sense_data(struct se_cmd *cmd) sense_buffer = dev->transport->get_sense_buffer(task); if (!sense_buffer) { - pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate" + pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate" " sense buffer for task with sense\n", - cmd->se_tfo->get_task_tag(cmd), task->task_no); + cmd->se_tfo->get_task_tag(cmd), task); continue; } spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -2814,7 +2501,6 @@ static int transport_get_sense_data(struct se_cmd *cmd) static int transport_handle_reservation_conflict(struct se_cmd *cmd) { - cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; @@ -2915,8 +2601,6 @@ static int transport_generic_cmd_sequencer( * Check for an existing UNIT ATTENTION condition */ if (core_scsi3_ua_check(cmd, cdb) < 0) { - cmd->transport_wait_for_tasks = - &transport_nop_wait_for_tasks; cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; return -EINVAL; @@ -2926,7 +2610,6 @@ static int transport_generic_cmd_sequencer( */ ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); if (ret != 0) { - cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; /* * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; * The ALUA additional sense code qualifier (ASCQ) is determined @@ -2965,7 +2648,6 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->transport_split_cdb = &split_cdb_XX_6; cmd->t_task_lba = transport_lba_21(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; @@ -2974,7 +2656,6 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->transport_split_cdb = &split_cdb_XX_10; cmd->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; @@ -2983,7 +2664,6 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->transport_split_cdb = &split_cdb_XX_12; cmd->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; @@ -2992,7 +2672,6 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->transport_split_cdb = &split_cdb_XX_16; cmd->t_task_lba = transport_lba_64(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; @@ -3001,7 +2680,6 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->transport_split_cdb = &split_cdb_XX_6; cmd->t_task_lba = transport_lba_21(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; @@ -3010,7 +2688,6 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->transport_split_cdb = &split_cdb_XX_10; cmd->t_task_lba = transport_lba_32(cdb); cmd->t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; @@ -3020,7 +2697,6 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->transport_split_cdb = &split_cdb_XX_12; cmd->t_task_lba = transport_lba_32(cdb); cmd->t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; @@ -3030,7 +2706,6 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->transport_split_cdb = &split_cdb_XX_16; cmd->t_task_lba = transport_lba_64(cdb); cmd->t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; @@ -3043,18 +2718,14 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->transport_split_cdb = &split_cdb_XX_10; cmd->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; - passthrough = (dev->transport->transport_type == - TRANSPORT_PLUGIN_PHBA_PDEV); - /* - * Skip the remaining assignments for TCM/PSCSI passthrough - */ - if (passthrough) - break; + + if (dev->transport->transport_type == + TRANSPORT_PLUGIN_PHBA_PDEV) + goto out_unsupported_cdb; /* - * Setup BIDI XOR callback to be run during transport_generic_complete_ok() + * Setup BIDI XOR callback to be run after I/O completion. */ cmd->transport_complete_callback = &transport_xor_callback; cmd->t_tasks_fua = (cdb[1] & 0x8); @@ -3078,19 +2749,14 @@ static int transport_generic_cmd_sequencer( * Use WRITE_32 and READ_32 opcodes for the emulated * XDWRITE_READ_32 logic. */ - cmd->transport_split_cdb = &split_cdb_XX_32; cmd->t_task_lba = transport_lba_64_ext(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; - /* - * Skip the remaining assignments for TCM/PSCSI passthrough - */ if (passthrough) - break; - + goto out_unsupported_cdb; /* - * Setup BIDI XOR callback to be run during - * transport_generic_complete_ok() + * Setup BIDI XOR callback to be run during after I/O + * completion. */ cmd->transport_complete_callback = &transport_xor_callback; cmd->t_tasks_fua = (cdb[10] & 0x8); @@ -3430,7 +3096,6 @@ static int transport_generic_cmd_sequencer( pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" " 0x%02x, sending CHECK_CONDITION.\n", cmd->se_tfo->get_fabric_name(), cdb[0]); - cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; goto out_unsupported_cdb; } @@ -3488,8 +3153,7 @@ out_invalid_cdb_field: } /* - * Called from transport_generic_complete_ok() and - * transport_generic_request_failure() to determine which dormant/delayed + * Called from I/O completion to determine which dormant/delayed * and ordered cmds need to have their tasks added to the execution queue. */ static void transport_complete_task_attr(struct se_cmd *cmd) @@ -3557,12 +3221,18 @@ static void transport_complete_task_attr(struct se_cmd *cmd) wake_up_interruptible(&dev->dev_queue_obj.thread_wq); } -static int transport_complete_qf(struct se_cmd *cmd) +static void transport_complete_qf(struct se_cmd *cmd) { int ret = 0; - if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) - return cmd->se_tfo->queue_status(cmd); + if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + transport_complete_task_attr(cmd); + + if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { + ret = cmd->se_tfo->queue_status(cmd); + if (ret) + goto out; + } switch (cmd->data_direction) { case DMA_FROM_DEVICE: @@ -3572,7 +3242,7 @@ static int transport_complete_qf(struct se_cmd *cmd) if (cmd->t_bidi_data_sg) { ret = cmd->se_tfo->queue_data_in(cmd); if (ret < 0) - return ret; + break; } /* Fall through for DMA_TO_DEVICE */ case DMA_NONE: @@ -3582,17 +3252,20 @@ static int transport_complete_qf(struct se_cmd *cmd) break; } - return ret; +out: + if (ret < 0) { + transport_handle_queue_full(cmd, cmd->se_dev); + return; + } + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop_to_fabric(cmd); } static void transport_handle_queue_full( struct se_cmd *cmd, - struct se_device *dev, - int (*qf_callback)(struct se_cmd *)) + struct se_device *dev) { spin_lock_irq(&dev->qf_cmd_lock); - cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL; - cmd->transport_qf_callback = qf_callback; list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); atomic_inc(&dev->dev_qf_count); smp_mb__after_atomic_inc(); @@ -3601,9 +3274,11 @@ static void transport_handle_queue_full( schedule_work(&cmd->se_dev->qf_work_queue); } -static void transport_generic_complete_ok(struct se_cmd *cmd) +static void target_complete_ok_work(struct work_struct *work) { + struct se_cmd *cmd = container_of(work, struct se_cmd, work); int reason = 0, ret; + /* * Check if we need to move delayed/dormant tasks from cmds on the * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task @@ -3618,14 +3293,6 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) schedule_work(&cmd->se_dev->qf_work_queue); - if (cmd->transport_qf_callback) { - ret = cmd->transport_qf_callback(cmd); - if (ret < 0) - goto queue_full; - - cmd->transport_qf_callback = NULL; - goto done; - } /* * Check if we need to retrieve a sense buffer from * the struct se_cmd in question. @@ -3701,7 +3368,6 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) break; } -done: transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; @@ -3709,34 +3375,35 @@ done: queue_full: pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," " data_direction: %d\n", cmd, cmd->data_direction); - transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); + cmd->t_state = TRANSPORT_COMPLETE_QF_OK; + transport_handle_queue_full(cmd, cmd->se_dev); } static void transport_free_dev_tasks(struct se_cmd *cmd) { struct se_task *task, *task_tmp; unsigned long flags; + LIST_HEAD(dispose_list); spin_lock_irqsave(&cmd->t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, &cmd->t_task_list, t_list) { - if (atomic_read(&task->task_active)) - continue; + if (!(task->task_flags & TF_ACTIVE)) + list_move_tail(&task->t_list, &dispose_list); + } + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + while (!list_empty(&dispose_list)) { + task = list_first_entry(&dispose_list, struct se_task, t_list); - kfree(task->task_sg_bidi); - kfree(task->task_sg); + if (task->task_sg != cmd->t_data_sg && + task->task_sg != cmd->t_bidi_data_sg) + kfree(task->task_sg); list_del(&task->t_list); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - if (task->se_dev) - task->se_dev->transport->free_task(task); - else - pr_err("task[%u] - task->se_dev is NULL\n", - task->task_no); - spin_lock_irqsave(&cmd->t_state_lock, flags); + cmd->se_dev->transport->free_task(task); } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); } static inline void transport_free_sgl(struct scatterlist *sgl, int nents) @@ -3764,89 +3431,43 @@ static inline void transport_free_pages(struct se_cmd *cmd) cmd->t_bidi_data_nents = 0; } -static inline void transport_release_tasks(struct se_cmd *cmd) -{ - transport_free_dev_tasks(cmd); -} - -static inline int transport_dec_and_check(struct se_cmd *cmd) +/** + * transport_put_cmd - release a reference to a command + * @cmd: command to release + * + * This routine releases our reference to the command and frees it if possible. + */ +static void transport_put_cmd(struct se_cmd *cmd) { unsigned long flags; + int free_tasks = 0; spin_lock_irqsave(&cmd->t_state_lock, flags); if (atomic_read(&cmd->t_fe_count)) { - if (!atomic_dec_and_test(&cmd->t_fe_count)) { - spin_unlock_irqrestore(&cmd->t_state_lock, - flags); - return 1; - } + if (!atomic_dec_and_test(&cmd->t_fe_count)) + goto out_busy; } if (atomic_read(&cmd->t_se_count)) { - if (!atomic_dec_and_test(&cmd->t_se_count)) { - spin_unlock_irqrestore(&cmd->t_state_lock, - flags); - return 1; - } + if (!atomic_dec_and_test(&cmd->t_se_count)) + goto out_busy; } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return 0; -} - -static void transport_release_fe_cmd(struct se_cmd *cmd) -{ - unsigned long flags; - - if (transport_dec_and_check(cmd)) - return; - - spin_lock_irqsave(&cmd->t_state_lock, flags); - if (!atomic_read(&cmd->transport_dev_active)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - goto free_pages; - } - atomic_set(&cmd->transport_dev_active, 0); - transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - transport_release_tasks(cmd); -free_pages: - transport_free_pages(cmd); - transport_free_se_cmd(cmd); - cmd->se_tfo->release_cmd(cmd); -} - -static int -transport_generic_remove(struct se_cmd *cmd, int session_reinstatement) -{ - unsigned long flags; - - if (transport_dec_and_check(cmd)) { - if (session_reinstatement) { - spin_lock_irqsave(&cmd->t_state_lock, flags); - transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, - flags); - } - return 1; - } - - spin_lock_irqsave(&cmd->t_state_lock, flags); - if (!atomic_read(&cmd->transport_dev_active)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - goto free_pages; + if (atomic_read(&cmd->transport_dev_active)) { + atomic_set(&cmd->transport_dev_active, 0); + transport_all_task_dev_remove_state(cmd); + free_tasks = 1; } - atomic_set(&cmd->transport_dev_active, 0); - transport_all_task_dev_remove_state(cmd); spin_unlock_irqrestore(&cmd->t_state_lock, flags); - transport_release_tasks(cmd); + if (free_tasks != 0) + transport_free_dev_tasks(cmd); -free_pages: transport_free_pages(cmd); transport_release_cmd(cmd); - return 0; + return; +out_busy: + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } /* @@ -3888,62 +3509,6 @@ int transport_generic_map_mem_to_cmd( } EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); -static int transport_new_cmd_obj(struct se_cmd *cmd) -{ - struct se_device *dev = cmd->se_dev; - int set_counts = 1, rc, task_cdbs; - - /* - * Setup any BIDI READ tasks and memory from - * cmd->t_mem_bidi_list so the READ struct se_tasks - * are queued first for the non pSCSI passthrough case. - */ - if (cmd->t_bidi_data_sg && - (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { - rc = transport_allocate_tasks(cmd, - cmd->t_task_lba, - DMA_FROM_DEVICE, - cmd->t_bidi_data_sg, - cmd->t_bidi_data_nents); - if (rc <= 0) { - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->scsi_sense_reason = - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - return -EINVAL; - } - atomic_inc(&cmd->t_fe_count); - atomic_inc(&cmd->t_se_count); - set_counts = 0; - } - /* - * Setup the tasks and memory from cmd->t_mem_list - * Note for BIDI transfers this will contain the WRITE payload - */ - task_cdbs = transport_allocate_tasks(cmd, - cmd->t_task_lba, - cmd->data_direction, - cmd->t_data_sg, - cmd->t_data_nents); - if (task_cdbs <= 0) { - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->scsi_sense_reason = - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - return -EINVAL; - } - - if (set_counts) { - atomic_inc(&cmd->t_fe_count); - atomic_inc(&cmd->t_se_count); - } - - cmd->t_task_list_num = task_cdbs; - - atomic_set(&cmd->t_task_cdbs_left, task_cdbs); - atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs); - atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs); - return 0; -} - void *transport_kmap_first_data_page(struct se_cmd *cmd) { struct scatterlist *sg = cmd->t_data_sg; @@ -4054,15 +3619,13 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) /* * For the padded tasks, use the extra SGL vector allocated * in transport_allocate_data_tasks() for the sg_prev_nents - * offset into sg_chain() above.. The last task of a - * multi-task list, or a single task will not have - * task->task_sg_padded set.. + * offset into sg_chain() above. + * + * We do not need the padding for the last task (or a single + * task), but in that case we will never use the sg_prev_nents + * value below which would be incorrect. */ - if (task->task_padded_sg) - sg_prev_nents = (task->task_sg_nents + 1); - else - sg_prev_nents = task->task_sg_nents; - + sg_prev_nents = (task->task_sg_nents + 1); sg_prev = task->task_sg; } /* @@ -4092,30 +3655,60 @@ EXPORT_SYMBOL(transport_do_task_sg_chain); /* * Break up cmd into chunks transport can handle */ -static int transport_allocate_data_tasks( - struct se_cmd *cmd, - unsigned long long lba, +static int +transport_allocate_data_tasks(struct se_cmd *cmd, enum dma_data_direction data_direction, - struct scatterlist *sgl, - unsigned int sgl_nents) + struct scatterlist *cmd_sg, unsigned int sgl_nents) { - unsigned char *cdb = NULL; - struct se_task *task; struct se_device *dev = cmd->se_dev; - unsigned long flags; - int task_count, i, ret; - sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; - u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; - struct scatterlist *sg; - struct scatterlist *cmd_sg; + int task_count, i; + unsigned long long lba; + sector_t sectors, dev_max_sectors; + u32 sector_size; + + if (transport_cmd_get_valid_sectors(cmd) < 0) + return -EINVAL; + + dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; + sector_size = dev->se_sub_dev->se_dev_attrib.block_size; WARN_ON(cmd->data_length % sector_size); + + lba = cmd->t_task_lba; sectors = DIV_ROUND_UP(cmd->data_length, sector_size); task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors); - - cmd_sg = sgl; + + /* + * If we need just a single task reuse the SG list in the command + * and avoid a lot of work. + */ + if (task_count == 1) { + struct se_task *task; + unsigned long flags; + + task = transport_generic_get_task(cmd, data_direction); + if (!task) + return -ENOMEM; + + task->task_sg = cmd_sg; + task->task_sg_nents = sgl_nents; + + task->task_lba = lba; + task->task_sectors = sectors; + task->task_size = task->task_sectors * sector_size; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + list_add_tail(&task->t_list, &cmd->t_task_list); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + return task_count; + } + for (i = 0; i < task_count; i++) { + struct se_task *task; unsigned int task_size, task_sg_nents_padded; + struct scatterlist *sg; + unsigned long flags; int count; task = transport_generic_get_task(cmd, data_direction); @@ -4126,14 +3719,6 @@ static int transport_allocate_data_tasks( task->task_sectors = min(sectors, dev_max_sectors); task->task_size = task->task_sectors * sector_size; - cdb = dev->transport->get_cdb(task); - BUG_ON(!cdb); - - memcpy(cdb, cmd->t_task_cdb, - scsi_command_size(cmd->t_task_cdb)); - - /* Update new cdb with updated lba/sectors */ - cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); /* * This now assumes that passed sg_ents are in PAGE_SIZE chunks * in order to calculate the number per task SGL entries @@ -4149,7 +3734,6 @@ static int transport_allocate_data_tasks( */ if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) { task_sg_nents_padded = (task->task_sg_nents + 1); - task->task_padded_sg = 1; } else task_sg_nents_padded = task->task_sg_nents; @@ -4181,20 +3765,6 @@ static int transport_allocate_data_tasks( list_add_tail(&task->t_list, &cmd->t_task_list); spin_unlock_irqrestore(&cmd->t_state_lock, flags); } - /* - * Now perform the memory map of task->task_sg[] into backend - * subsystem memory.. - */ - list_for_each_entry(task, &cmd->t_task_list, t_list) { - if (atomic_read(&task->task_sent)) - continue; - if (!dev->transport->map_data_SG) - continue; - - ret = dev->transport->map_data_SG(task); - if (ret < 0) - return 0; - } return task_count; } @@ -4202,30 +3772,14 @@ static int transport_allocate_data_tasks( static int transport_allocate_control_task(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_dev; - unsigned char *cdb; struct se_task *task; unsigned long flags; - int ret = 0; task = transport_generic_get_task(cmd, cmd->data_direction); if (!task) return -ENOMEM; - cdb = dev->transport->get_cdb(task); - BUG_ON(!cdb); - memcpy(cdb, cmd->t_task_cdb, - scsi_command_size(cmd->t_task_cdb)); - - task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, - GFP_KERNEL); - if (!task->task_sg) { - cmd->se_dev->transport->free_task(task); - return -ENOMEM; - } - - memcpy(task->task_sg, cmd->t_data_sg, - sizeof(struct scatterlist) * cmd->t_data_nents); + task->task_sg = cmd->t_data_sg; task->task_size = cmd->data_length; task->task_sg_nents = cmd->t_data_nents; @@ -4233,53 +3787,20 @@ transport_allocate_control_task(struct se_cmd *cmd) list_add_tail(&task->t_list, &cmd->t_task_list); spin_unlock_irqrestore(&cmd->t_state_lock, flags); - if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { - if (dev->transport->map_control_SG) - ret = dev->transport->map_control_SG(task); - } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { - if (dev->transport->cdb_none) - ret = dev->transport->cdb_none(task); - } else { - pr_err("target: Unknown control cmd type!\n"); - BUG(); - } - /* Success! Return number of tasks allocated */ - if (ret == 0) - return 1; - return ret; -} - -static u32 transport_allocate_tasks( - struct se_cmd *cmd, - unsigned long long lba, - enum dma_data_direction data_direction, - struct scatterlist *sgl, - unsigned int sgl_nents) -{ - if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { - if (transport_cmd_get_valid_sectors(cmd) < 0) - return -EINVAL; - - return transport_allocate_data_tasks(cmd, lba, data_direction, - sgl, sgl_nents); - } else - return transport_allocate_control_task(cmd); - + return 1; } - -/* transport_generic_new_cmd(): Called from transport_processing_thread() - * - * Allocate storage transport resources from a set of values predefined - * by transport_generic_cmd_sequencer() from the iSCSI Target RX process. - * Any non zero return here is treated as an "out of resource' op here. +/* + * Allocate any required ressources to execute the command, and either place + * it on the execution queue if possible. For writes we might not have the + * payload yet, thus notify the fabric via a call to ->write_pending instead. */ - /* - * Generate struct se_task(s) and/or their payloads for this CDB. - */ int transport_generic_new_cmd(struct se_cmd *cmd) { + struct se_device *dev = cmd->se_dev; + int task_cdbs, task_cdbs_bidi = 0; + int set_counts = 1; int ret = 0; /* @@ -4293,16 +3814,45 @@ int transport_generic_new_cmd(struct se_cmd *cmd) if (ret < 0) return ret; } + /* - * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for - * control or data CDB types, and perform the map to backend subsystem - * code from SGL memory allocated here by transport_generic_get_mem(), or - * via pre-existing SGL memory setup explictly by fabric module code with - * transport_generic_map_mem_to_cmd(). + * For BIDI command set up the read tasks first. */ - ret = transport_new_cmd_obj(cmd); - if (ret < 0) - return ret; + if (cmd->t_bidi_data_sg && + dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { + BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)); + + task_cdbs_bidi = transport_allocate_data_tasks(cmd, + DMA_FROM_DEVICE, cmd->t_bidi_data_sg, + cmd->t_bidi_data_nents); + if (task_cdbs_bidi <= 0) + goto out_fail; + + atomic_inc(&cmd->t_fe_count); + atomic_inc(&cmd->t_se_count); + set_counts = 0; + } + + if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { + task_cdbs = transport_allocate_data_tasks(cmd, + cmd->data_direction, cmd->t_data_sg, + cmd->t_data_nents); + } else { + task_cdbs = transport_allocate_control_task(cmd); + } + + if (task_cdbs <= 0) + goto out_fail; + + if (set_counts) { + atomic_inc(&cmd->t_fe_count); + atomic_inc(&cmd->t_se_count); + } + + cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi); + atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num); + atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num); + /* * For WRITEs, let the fabric know its buffer is ready.. * This WRITE struct se_cmd (and all of its associated struct se_task's) @@ -4320,6 +3870,11 @@ int transport_generic_new_cmd(struct se_cmd *cmd) */ transport_execute_tasks(cmd); return 0; + +out_fail: + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } EXPORT_SYMBOL(transport_generic_new_cmd); @@ -4333,15 +3888,15 @@ void transport_generic_process_write(struct se_cmd *cmd) } EXPORT_SYMBOL(transport_generic_process_write); -static int transport_write_pending_qf(struct se_cmd *cmd) +static void transport_write_pending_qf(struct se_cmd *cmd) { - return cmd->se_tfo->write_pending(cmd); + if (cmd->se_tfo->write_pending(cmd) == -EAGAIN) { + pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", + cmd); + transport_handle_queue_full(cmd, cmd->se_dev); + } } -/* transport_generic_write_pending(): - * - * - */ static int transport_generic_write_pending(struct se_cmd *cmd) { unsigned long flags; @@ -4351,17 +3906,6 @@ static int transport_generic_write_pending(struct se_cmd *cmd) cmd->t_state = TRANSPORT_WRITE_PENDING; spin_unlock_irqrestore(&cmd->t_state_lock, flags); - if (cmd->transport_qf_callback) { - ret = cmd->transport_qf_callback(cmd); - if (ret == -EAGAIN) - goto queue_full; - else if (ret < 0) - return ret; - - cmd->transport_qf_callback = NULL; - return 0; - } - /* * Clear the se_cmd for WRITE_PENDING status in order to set * cmd->t_transport_active=0 so that transport_generic_handle_data @@ -4386,61 +3930,52 @@ static int transport_generic_write_pending(struct se_cmd *cmd) queue_full: pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); cmd->t_state = TRANSPORT_COMPLETE_QF_WP; - transport_handle_queue_full(cmd, cmd->se_dev, - transport_write_pending_qf); + transport_handle_queue_full(cmd, cmd->se_dev); return ret; } +/** + * transport_release_cmd - free a command + * @cmd: command to free + * + * This routine unconditionally frees a command, and reference counting + * or list removal must be done in the caller. + */ void transport_release_cmd(struct se_cmd *cmd) { BUG_ON(!cmd->se_tfo); - transport_free_se_cmd(cmd); + if (cmd->se_tmr_req) + core_tmr_release_req(cmd->se_tmr_req); + if (cmd->t_task_cdb != cmd->__t_task_cdb) + kfree(cmd->t_task_cdb); cmd->se_tfo->release_cmd(cmd); } EXPORT_SYMBOL(transport_release_cmd); -/* transport_generic_free_cmd(): - * - * Called from processing frontend to release storage engine resources - */ -void transport_generic_free_cmd( - struct se_cmd *cmd, - int wait_for_tasks, - int session_reinstatement) +void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) { - if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { + if (wait_for_tasks && cmd->se_tmr_req) + transport_wait_for_tasks(cmd); + transport_release_cmd(cmd); - else { + } else { + if (wait_for_tasks) + transport_wait_for_tasks(cmd); + core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); - if (cmd->se_lun) { -#if 0 - pr_debug("cmd: %p ITT: 0x%08x contains" - " cmd->se_lun\n", cmd, - cmd->se_tfo->get_task_tag(cmd)); -#endif + if (cmd->se_lun) transport_lun_remove_cmd(cmd); - } - - if (wait_for_tasks && cmd->transport_wait_for_tasks) - cmd->transport_wait_for_tasks(cmd, 0, 0); transport_free_dev_tasks(cmd); - transport_generic_remove(cmd, session_reinstatement); + transport_put_cmd(cmd); } } EXPORT_SYMBOL(transport_generic_free_cmd); -static void transport_nop_wait_for_tasks( - struct se_cmd *cmd, - int remove_cmd, - int session_reinstatement) -{ - return; -} - /* transport_lun_wait_for_tasks(): * * Called from ConfigFS context to stop the passed struct se_cmd to allow @@ -4479,7 +4014,7 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", cmd->se_tfo->get_task_tag(cmd)); } - transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); + transport_remove_cmd_from_queue(cmd); return 0; } @@ -4610,22 +4145,30 @@ int transport_clear_lun_from_sessions(struct se_lun *lun) return 0; } -/* transport_generic_wait_for_tasks(): +/** + * transport_wait_for_tasks - wait for completion to occur + * @cmd: command to wait * - * Called from frontend or passthrough context to wait for storage engine - * to pause and/or release frontend generated struct se_cmd. + * Called from frontend fabric context to wait for storage engine + * to pause and/or release frontend generated struct se_cmd. */ -static void transport_generic_wait_for_tasks( - struct se_cmd *cmd, - int remove_cmd, - int session_reinstatement) +void transport_wait_for_tasks(struct se_cmd *cmd) { unsigned long flags; - if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) - return; - spin_lock_irqsave(&cmd->t_state_lock, flags); + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return; + } + /* + * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE + * has been set in transport_set_supported_SAM_opcode(). + */ + if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return; + } /* * If we are already stopped due to an external event (ie: LUN shutdown) * sleep until the connection can have the passed struct se_cmd back. @@ -4665,16 +4208,17 @@ static void transport_generic_wait_for_tasks( atomic_set(&cmd->transport_lun_stop, 0); } if (!atomic_read(&cmd->t_transport_active) || - atomic_read(&cmd->t_transport_aborted)) - goto remove; + atomic_read(&cmd->t_transport_aborted)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return; + } atomic_set(&cmd->t_transport_stop, 1); pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" - " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" - " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd), - cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, - cmd->deferred_t_state); + " i_state: %d, t_state: %d, t_transport_stop = TRUE\n", + cmd, cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -4689,13 +4233,10 @@ static void transport_generic_wait_for_tasks( pr_debug("wait_for_tasks: Stopped wait_for_compltion(" "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", cmd->se_tfo->get_task_tag(cmd)); -remove: - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - if (!remove_cmd) - return; - transport_generic_free_cmd(cmd, 0, session_reinstatement); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } +EXPORT_SYMBOL(transport_wait_for_tasks); static int transport_get_sense_codes( struct se_cmd *cmd, @@ -4920,6 +4461,15 @@ EXPORT_SYMBOL(transport_check_aborted_status); void transport_send_task_abort(struct se_cmd *cmd) { + unsigned long flags; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return; + } + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + /* * If there are still expected incoming fabric WRITEs, we wait * until until they have completed before sending a TASK_ABORTED @@ -4984,184 +4534,10 @@ int transport_generic_do_tmr(struct se_cmd *cmd) cmd->t_state = TRANSPORT_ISTATE_PROCESSING; cmd->se_tfo->queue_tm_rsp(cmd); - transport_cmd_check_stop(cmd, 2, 0); + transport_cmd_check_stop_to_fabric(cmd); return 0; } -/* - * Called with spin_lock_irq(&dev->execute_task_lock); held - * - */ -static struct se_task * -transport_get_task_from_state_list(struct se_device *dev) -{ - struct se_task *task; - - if (list_empty(&dev->state_task_list)) - return NULL; - - list_for_each_entry(task, &dev->state_task_list, t_state_list) - break; - - list_del(&task->t_state_list); - atomic_set(&task->task_state_active, 0); - - return task; -} - -static void transport_processing_shutdown(struct se_device *dev) -{ - struct se_cmd *cmd; - struct se_task *task; - unsigned long flags; - /* - * Empty the struct se_device's struct se_task state list. - */ - spin_lock_irqsave(&dev->execute_task_lock, flags); - while ((task = transport_get_task_from_state_list(dev))) { - if (!task->task_se_cmd) { - pr_err("task->task_se_cmd is NULL!\n"); - continue; - } - cmd = task->task_se_cmd; - - spin_unlock_irqrestore(&dev->execute_task_lock, flags); - - spin_lock_irqsave(&cmd->t_state_lock, flags); - - pr_debug("PT: cmd: %p task: %p ITT: 0x%08x," - " i_state: %d, t_state/def_t_state:" - " %d/%d cdb: 0x%02x\n", cmd, task, - cmd->se_tfo->get_task_tag(cmd), - cmd->se_tfo->get_cmd_state(cmd), - cmd->t_state, cmd->deferred_t_state, - cmd->t_task_cdb[0]); - pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:" - " %d t_task_cdbs_sent: %d -- t_transport_active: %d" - " t_transport_stop: %d t_transport_sent: %d\n", - cmd->se_tfo->get_task_tag(cmd), - cmd->t_task_list_num, - atomic_read(&cmd->t_task_cdbs_left), - atomic_read(&cmd->t_task_cdbs_sent), - atomic_read(&cmd->t_transport_active), - atomic_read(&cmd->t_transport_stop), - atomic_read(&cmd->t_transport_sent)); - - if (atomic_read(&task->task_active)) { - atomic_set(&task->task_stop, 1); - spin_unlock_irqrestore( - &cmd->t_state_lock, flags); - - pr_debug("Waiting for task: %p to shutdown for dev:" - " %p\n", task, dev); - wait_for_completion(&task->task_stop_comp); - pr_debug("Completed task: %p shutdown for dev: %p\n", - task, dev); - - spin_lock_irqsave(&cmd->t_state_lock, flags); - atomic_dec(&cmd->t_task_cdbs_left); - - atomic_set(&task->task_active, 0); - atomic_set(&task->task_stop, 0); - } else { - if (atomic_read(&task->task_execute_queue) != 0) - transport_remove_task_from_execute_queue(task, dev); - } - __transport_stop_task_timer(task, &flags); - - if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { - spin_unlock_irqrestore( - &cmd->t_state_lock, flags); - - pr_debug("Skipping task: %p, dev: %p for" - " t_task_cdbs_ex_left: %d\n", task, dev, - atomic_read(&cmd->t_task_cdbs_ex_left)); - - spin_lock_irqsave(&dev->execute_task_lock, flags); - continue; - } - - if (atomic_read(&cmd->t_transport_active)) { - pr_debug("got t_transport_active = 1 for task: %p, dev:" - " %p\n", task, dev); - - if (atomic_read(&cmd->t_fe_count)) { - spin_unlock_irqrestore( - &cmd->t_state_lock, flags); - transport_send_check_condition_and_sense( - cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, - 0); - transport_remove_cmd_from_queue(cmd, - &cmd->se_dev->dev_queue_obj); - - transport_lun_remove_cmd(cmd); - transport_cmd_check_stop(cmd, 1, 0); - } else { - spin_unlock_irqrestore( - &cmd->t_state_lock, flags); - - transport_remove_cmd_from_queue(cmd, - &cmd->se_dev->dev_queue_obj); - - transport_lun_remove_cmd(cmd); - - if (transport_cmd_check_stop(cmd, 1, 0)) - transport_generic_remove(cmd, 0); - } - - spin_lock_irqsave(&dev->execute_task_lock, flags); - continue; - } - pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n", - task, dev); - - if (atomic_read(&cmd->t_fe_count)) { - spin_unlock_irqrestore( - &cmd->t_state_lock, flags); - transport_send_check_condition_and_sense(cmd, - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); - transport_remove_cmd_from_queue(cmd, - &cmd->se_dev->dev_queue_obj); - - transport_lun_remove_cmd(cmd); - transport_cmd_check_stop(cmd, 1, 0); - } else { - spin_unlock_irqrestore( - &cmd->t_state_lock, flags); - - transport_remove_cmd_from_queue(cmd, - &cmd->se_dev->dev_queue_obj); - transport_lun_remove_cmd(cmd); - - if (transport_cmd_check_stop(cmd, 1, 0)) - transport_generic_remove(cmd, 0); - } - - spin_lock_irqsave(&dev->execute_task_lock, flags); - } - spin_unlock_irqrestore(&dev->execute_task_lock, flags); - /* - * Empty the struct se_device's struct se_cmd list. - */ - while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) { - - pr_debug("From Device Queue: cmd: %p t_state: %d\n", - cmd, cmd->t_state); - - if (atomic_read(&cmd->t_fe_count)) { - transport_send_check_condition_and_sense(cmd, - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); - - transport_lun_remove_cmd(cmd); - transport_cmd_check_stop(cmd, 1, 0); - } else { - transport_lun_remove_cmd(cmd); - if (transport_cmd_check_stop(cmd, 1, 0)) - transport_generic_remove(cmd, 0); - } - } -} - /* transport_processing_thread(): * * @@ -5181,14 +4557,6 @@ static int transport_processing_thread(void *param) if (ret < 0) goto out; - spin_lock_irq(&dev->dev_status_lock); - if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) { - spin_unlock_irq(&dev->dev_status_lock); - transport_processing_shutdown(dev); - continue; - } - spin_unlock_irq(&dev->dev_status_lock); - get_cmd: __transport_execute_tasks(dev); @@ -5197,6 +4565,9 @@ get_cmd: continue; switch (cmd->t_state) { + case TRANSPORT_NEW_CMD: + BUG(); + break; case TRANSPORT_NEW_CMD_MAP: if (!cmd->se_tfo->new_cmd_map) { pr_err("cmd->se_tfo->new_cmd_map is" @@ -5206,19 +4577,17 @@ get_cmd: ret = cmd->se_tfo->new_cmd_map(cmd); if (ret < 0) { cmd->transport_error_status = ret; - transport_generic_request_failure(cmd, NULL, + transport_generic_request_failure(cmd, 0, (cmd->data_direction != DMA_TO_DEVICE)); break; } - /* Fall through */ - case TRANSPORT_NEW_CMD: ret = transport_generic_new_cmd(cmd); if (ret == -EAGAIN) break; else if (ret < 0) { cmd->transport_error_status = ret; - transport_generic_request_failure(cmd, NULL, + transport_generic_request_failure(cmd, 0, (cmd->data_direction != DMA_TO_DEVICE)); } @@ -5226,33 +4595,22 @@ get_cmd: case TRANSPORT_PROCESS_WRITE: transport_generic_process_write(cmd); break; - case TRANSPORT_COMPLETE_OK: - transport_stop_all_task_timers(cmd); - transport_generic_complete_ok(cmd); - break; - case TRANSPORT_REMOVE: - transport_generic_remove(cmd, 0); - break; case TRANSPORT_FREE_CMD_INTR: - transport_generic_free_cmd(cmd, 0, 0); + transport_generic_free_cmd(cmd, 0); break; case TRANSPORT_PROCESS_TMR: transport_generic_do_tmr(cmd); break; - case TRANSPORT_COMPLETE_FAILURE: - transport_generic_request_failure(cmd, NULL, 1, 1); - break; - case TRANSPORT_COMPLETE_TIMEOUT: - transport_stop_all_task_timers(cmd); - transport_generic_request_timeout(cmd); - break; case TRANSPORT_COMPLETE_QF_WP: - transport_generic_write_pending(cmd); + transport_write_pending_qf(cmd); + break; + case TRANSPORT_COMPLETE_QF_OK: + transport_complete_qf(cmd); break; default: - pr_err("Unknown t_state: %d deferred_t_state:" - " %d for ITT: 0x%08x i_state: %d on SE LUN:" - " %u\n", cmd->t_state, cmd->deferred_t_state, + pr_err("Unknown t_state: %d for ITT: 0x%08x " + "i_state: %d on SE LUN: %u\n", + cmd->t_state, cmd->se_tfo->get_task_tag(cmd), cmd->se_tfo->get_cmd_state(cmd), cmd->se_lun->unpacked_lun); @@ -5263,7 +4621,8 @@ get_cmd: } out: - transport_release_all_cmds(dev); + WARN_ON(!list_empty(&dev->state_task_list)); + WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list)); dev->process_thread = NULL; return 0; } diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index 31e3c652527e..50a480db7a66 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c @@ -24,7 +24,6 @@ * ******************************************************************************/ -#include <linux/version.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <scsi/scsi.h> diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 80fbcde00cb6..6195026cc7b0 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -19,7 +19,6 @@ #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/version.h> #include <generated/utsrelease.h> #include <linux/utsname.h> #include <linux/init.h> @@ -115,7 +114,7 @@ void ft_release_cmd(struct se_cmd *se_cmd) void ft_check_stop_free(struct se_cmd *se_cmd) { - transport_generic_free_cmd(se_cmd, 0, 0); + transport_generic_free_cmd(se_cmd, 0); } /* @@ -268,9 +267,8 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) if (IS_ERR(fp)) { /* XXX need to find cmd if queued */ - cmd->se_cmd.t_state = TRANSPORT_REMOVE; cmd->seq = NULL; - transport_generic_free_cmd(&cmd->se_cmd, 0, 0); + transport_generic_free_cmd(&cmd->se_cmd, 0); return; } @@ -288,7 +286,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) __func__, fh->fh_r_ctl); ft_invl_hw_context(cmd); fc_frame_free(fp); - transport_generic_free_cmd(&cmd->se_cmd, 0, 0); + transport_generic_free_cmd(&cmd->se_cmd, 0); break; } } @@ -397,7 +395,7 @@ static void ft_send_tm(struct ft_cmd *cmd) } pr_debug("alloc tm cmd fn %d\n", tm_func); - tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func); + tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func, GFP_KERNEL); if (!tmr) { pr_debug("alloc failed\n"); ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED); @@ -421,7 +419,7 @@ static void ft_send_tm(struct ft_cmd *cmd) sess = cmd->sess; transport_send_check_condition_and_sense(&cmd->se_cmd, cmd->se_cmd.scsi_sense_reason, 0); - transport_generic_free_cmd(&cmd->se_cmd, 0, 0); + transport_generic_free_cmd(&cmd->se_cmd, 0); ft_sess_put(sess); return; } @@ -628,7 +626,7 @@ static void ft_send_work(struct work_struct *work) if (ret == -ENOMEM) { transport_send_check_condition_and_sense(se_cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); - transport_generic_free_cmd(se_cmd, 0, 0); + transport_generic_free_cmd(se_cmd, 0); return; } if (ret == -EINVAL) { @@ -637,10 +635,10 @@ static void ft_send_work(struct work_struct *work) else transport_send_check_condition_and_sense(se_cmd, se_cmd->scsi_sense_reason, 0); - transport_generic_free_cmd(se_cmd, 0, 0); + transport_generic_free_cmd(se_cmd, 0); return; } - transport_generic_handle_cdb(se_cmd); + transport_handle_cdb_direct(se_cmd); return; err: diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 8fa39b74f22c..5f770412ca40 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -23,7 +23,6 @@ #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/version.h> #include <generated/utsrelease.h> #include <linux/utsname.h> #include <linux/init.h> @@ -32,6 +31,7 @@ #include <linux/types.h> #include <linux/string.h> #include <linux/configfs.h> +#include <linux/kernel.h> #include <linux/ctype.h> #include <asm/unaligned.h> #include <scsi/scsi.h> @@ -71,10 +71,10 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict) { const char *cp; char c; - u32 nibble; u32 byte = 0; u32 pos = 0; u32 err; + int val; *wwn = 0; for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) { @@ -95,13 +95,10 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict) return cp - name; } err = 3; - if (isdigit(c)) - nibble = c - '0'; - else if (isxdigit(c) && (islower(c) || !strict)) - nibble = tolower(c) - 'a' + 10; - else + val = hex_to_bin(c); + if (val < 0 || (strict && isupper(c))) goto fail; - *wwn = (*wwn << 4) | nibble; + *wwn = (*wwn << 4) | val; } err = 4; fail: diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index d35ea5a3d56c..1369b1cb103d 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -28,7 +28,6 @@ #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/version.h> #include <generated/utsrelease.h> #include <linux/utsname.h> #include <linux/init.h> diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index dbb5eaeee399..326921385aff 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -19,7 +19,6 @@ #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/version.h> #include <generated/utsrelease.h> #include <linux/utsname.h> #include <linux/init.h> |