diff options
Diffstat (limited to 'drivers/target')
27 files changed, 703 insertions, 328 deletions
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig index 72171ea3dd53..92641d39126a 100644 --- a/drivers/target/Kconfig +++ b/drivers/target/Kconfig @@ -47,5 +47,6 @@ source "drivers/target/loopback/Kconfig" source "drivers/target/tcm_fc/Kconfig" source "drivers/target/iscsi/Kconfig" source "drivers/target/sbp/Kconfig" +source "drivers/target/tcm_remote/Kconfig" endif diff --git a/drivers/target/Makefile b/drivers/target/Makefile index 45634747377e..431b84abfb94 100644 --- a/drivers/target/Makefile +++ b/drivers/target/Makefile @@ -30,3 +30,4 @@ obj-$(CONFIG_LOOPBACK_TARGET) += loopback/ obj-$(CONFIG_TCM_FC) += tcm_fc/ obj-$(CONFIG_ISCSI_TARGET) += iscsi/ obj-$(CONFIG_SBP_TARGET) += sbp/ +obj-$(CONFIG_REMOTE_TARGET) += tcm_remote/ diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index baf4da7bb3b4..834cce50f9b0 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -26,6 +26,7 @@ #include <target/target_core_base.h> #include <target/target_core_fabric.h> +#include <target/target_core_backend.h> #include <target/iscsi/iscsi_target_core.h> #include "iscsi_target_parameters.h" #include "iscsi_target_seq_pdu_list.h" @@ -1190,9 +1191,10 @@ int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd, * Initialize struct se_cmd descriptor from target_core_mod infrastructure */ __target_init_cmd(&cmd->se_cmd, &iscsi_ops, - conn->sess->se_sess, be32_to_cpu(hdr->data_length), - cmd->data_direction, sam_task_attr, - cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun)); + conn->sess->se_sess, be32_to_cpu(hdr->data_length), + cmd->data_direction, sam_task_attr, + cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun), + conn->cmd_cnt); pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x," " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt, @@ -2055,7 +2057,8 @@ iscsit_handle_task_mgt_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd, __target_init_cmd(&cmd->se_cmd, &iscsi_ops, conn->sess->se_sess, 0, DMA_NONE, TCM_SIMPLE_TAG, cmd->sense_buffer + 2, - scsilun_to_int(&hdr->lun)); + scsilun_to_int(&hdr->lun), + conn->cmd_cnt); target_get_sess_cmd(&cmd->se_cmd, true); @@ -4218,9 +4221,12 @@ static void iscsit_release_commands_from_conn(struct iscsit_conn *conn) list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) { struct se_cmd *se_cmd = &cmd->se_cmd; - if (se_cmd->se_tfo != NULL) { - spin_lock_irq(&se_cmd->t_state_lock); - if (se_cmd->transport_state & CMD_T_ABORTED) { + if (!se_cmd->se_tfo) + continue; + + spin_lock_irq(&se_cmd->t_state_lock); + if (se_cmd->transport_state & CMD_T_ABORTED) { + if (!(se_cmd->transport_state & CMD_T_TAS)) /* * LIO's abort path owns the cleanup for this, * so put it back on the list and let @@ -4228,11 +4234,20 @@ static void iscsit_release_commands_from_conn(struct iscsit_conn *conn) */ list_move_tail(&cmd->i_conn_node, &conn->conn_cmd_list); - } else { - se_cmd->transport_state |= CMD_T_FABRIC_STOP; - } + } else { + se_cmd->transport_state |= CMD_T_FABRIC_STOP; + } + + if (cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) { + /* + * We never submitted the cmd to LIO core, so we have + * to tell LIO to perform the completion process. + */ spin_unlock_irq(&se_cmd->t_state_lock); + target_complete_cmd(&cmd->se_cmd, SAM_STAT_TASK_ABORTED); + continue; } + spin_unlock_irq(&se_cmd->t_state_lock); } spin_unlock_bh(&conn->cmd_lock); @@ -4243,6 +4258,16 @@ static void iscsit_release_commands_from_conn(struct iscsit_conn *conn) iscsit_free_cmd(cmd, true); } + + /* + * Wait on commands that were cleaned up via the aborted_task path. + * LLDs that implement iscsit_wait_conn will already have waited for + * commands. + */ + if (!conn->conn_transport->iscsit_wait_conn) { + target_stop_cmd_counter(conn->cmd_cnt); + target_wait_for_cmds(conn->cmd_cnt); + } } static void iscsit_stop_timers_for_cmds( @@ -4517,6 +4542,9 @@ int iscsit_close_session(struct iscsit_session *sess, bool can_sleep) iscsit_stop_time2retain_timer(sess); spin_unlock_bh(&se_tpg->session_lock); + if (sess->sess_ops->ErrorRecoveryLevel == 2) + iscsit_free_connection_recovery_entries(sess); + /* * transport_deregister_session_configfs() will clear the * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context @@ -4540,9 +4568,6 @@ int iscsit_close_session(struct iscsit_session *sess, bool can_sleep) transport_deregister_session(sess->se_sess); - if (sess->sess_ops->ErrorRecoveryLevel == 2) - iscsit_free_connection_recovery_entries(sess); - iscsit_free_all_ooo_cmdsns(sess); spin_lock_bh(&se_tpg->session_lock); diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 27e448c2d066..274bdd7845ca 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -1147,8 +1147,14 @@ static struct iscsit_conn *iscsit_alloc_conn(struct iscsi_np *np) goto free_conn_cpumask; } + conn->cmd_cnt = target_alloc_cmd_counter(); + if (!conn->cmd_cnt) + goto free_conn_allowed_cpumask; + return conn; +free_conn_allowed_cpumask: + free_cpumask_var(conn->allowed_cpumask); free_conn_cpumask: free_cpumask_var(conn->conn_cpumask); free_conn_ops: @@ -1162,6 +1168,7 @@ free_conn: void iscsit_free_conn(struct iscsit_conn *conn) { + target_free_cmd_counter(conn->cmd_cnt); free_cpumask_var(conn->allowed_cpumask); free_cpumask_var(conn->conn_cpumask); kfree(conn->conn_ops); diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 2317fb077db0..5b90c22ee3dc 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -726,8 +726,8 @@ static int iscsi_add_notunderstood_response( } INIT_LIST_HEAD(&extra_response->er_list); - strlcpy(extra_response->key, key, sizeof(extra_response->key)); - strlcpy(extra_response->value, NOTUNDERSTOOD, + strscpy(extra_response->key, key, sizeof(extra_response->key)); + strscpy(extra_response->value, NOTUNDERSTOOD, sizeof(extra_response->value)); list_add_tail(&extra_response->er_list, @@ -1262,18 +1262,20 @@ static struct iscsi_param *iscsi_check_key( return param; if (!(param->phase & phase)) { - pr_err("Key \"%s\" may not be negotiated during ", - param->name); + char *phase_name; + switch (phase) { case PHASE_SECURITY: - pr_debug("Security phase.\n"); + phase_name = "Security"; break; case PHASE_OPERATIONAL: - pr_debug("Operational phase.\n"); + phase_name = "Operational"; break; default: - pr_debug("Unknown phase.\n"); + phase_name = "Unknown"; } + pr_err("Key \"%s\" may not be negotiated during %s phase.\n", + param->name, phase_name); return NULL; } diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 26dc8ed3045b..dc1ac5a0f806 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -1321,7 +1321,7 @@ void iscsit_collect_login_stats( if (conn->param_list) intrname = iscsi_find_param_from_key(INITIATORNAME, conn->param_list); - strlcpy(ls->last_intr_fail_name, + strscpy(ls->last_intr_fail_name, (intrname ? intrname->value : "Unknown"), sizeof(ls->last_intr_fail_name)); @@ -1360,7 +1360,7 @@ void iscsit_fill_cxn_timeout_err_stats(struct iscsit_session *sess) return; spin_lock_bh(&tiqn->sess_err_stats.lock); - strlcpy(tiqn->sess_err_stats.last_sess_fail_rem_name, + strscpy(tiqn->sess_err_stats.last_sess_fail_rem_name, sess->sess_ops->InitiatorName, sizeof(tiqn->sess_err_stats.last_sess_fail_rem_name)); tiqn->sess_err_stats.last_sess_failure_type = diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 139031ccb700..4ec99a55ac30 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -83,15 +83,8 @@ static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host) static int tcm_loop_driver_probe(struct device *); static void tcm_loop_driver_remove(struct device *); -static int pseudo_lld_bus_match(struct device *dev, - struct device_driver *dev_driver) -{ - return 1; -} - static struct bus_type tcm_loop_lld_bus = { .name = "tcm_loop_bus", - .match = pseudo_lld_bus_match, .probe = tcm_loop_driver_probe, .remove = tcm_loop_driver_remove, }; @@ -298,7 +291,7 @@ static int tcm_loop_target_reset(struct scsi_cmnd *sc) return FAILED; } -static struct scsi_host_template tcm_loop_driver_template = { +static const struct scsi_host_template tcm_loop_driver_template = { .show_info = tcm_loop_show_info, .proc_name = "tcm_loopback", .name = "TCM_Loopback", @@ -480,30 +473,6 @@ static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg) return 1; } -static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg) -{ - return 0; -} - -/* - * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for - * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest - */ -static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg) -{ - return 0; -} - -/* - * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will - * never be called for TCM_Loop by target_core_fabric_configfs.c code. - * It has been added here as a nop for target_fabric_tf_ops_check() - */ -static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg) -{ - return 0; -} - static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg) { struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, @@ -511,21 +480,11 @@ static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg) return tl_tpg->tl_fabric_prot_type; } -static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg) -{ - return 1; -} - static u32 tcm_loop_sess_get_index(struct se_session *se_sess) { return 1; } -static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl) -{ - return; -} - static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) { struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, @@ -1124,18 +1083,11 @@ static const struct target_core_fabric_ops loop_ops = { .tpg_get_wwn = tcm_loop_get_endpoint_wwn, .tpg_get_tag = tcm_loop_get_tag, .tpg_check_demo_mode = tcm_loop_check_demo_mode, - .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache, - .tpg_check_demo_mode_write_protect = - tcm_loop_check_demo_mode_write_protect, - .tpg_check_prod_mode_write_protect = - tcm_loop_check_prod_mode_write_protect, .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only, - .tpg_get_inst_index = tcm_loop_get_inst_index, .check_stop_free = tcm_loop_check_stop_free, .release_cmd = tcm_loop_release_cmd, .sess_get_index = tcm_loop_sess_get_index, .write_pending = tcm_loop_write_pending, - .set_default_node_attributes = tcm_loop_set_default_node_attributes, .get_cmd_state = tcm_loop_get_cmd_state, .queue_data_in = tcm_loop_queue_data_in, .queue_status = tcm_loop_queue_status, diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index 504670994fb4..2a761bc09193 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -1673,11 +1673,6 @@ static int sbp_check_true(struct se_portal_group *se_tpg) return 1; } -static int sbp_check_false(struct se_portal_group *se_tpg) -{ - return 0; -} - static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg) { struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); @@ -1692,11 +1687,6 @@ static u16 sbp_get_tag(struct se_portal_group *se_tpg) return tpg->tport_tpgt; } -static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg) -{ - return 1; -} - static void sbp_release_cmd(struct se_cmd *se_cmd) { struct sbp_target_request *req = container_of(se_cmd, @@ -1705,11 +1695,6 @@ static void sbp_release_cmd(struct se_cmd *se_cmd) sbp_free_request(req); } -static u32 sbp_sess_get_index(struct se_session *se_sess) -{ - return 0; -} - static int sbp_write_pending(struct se_cmd *se_cmd) { struct sbp_target_request *req = container_of(se_cmd, @@ -1733,16 +1718,6 @@ static int sbp_write_pending(struct se_cmd *se_cmd) return 0; } -static void sbp_set_default_node_attrs(struct se_node_acl *nacl) -{ - return; -} - -static int sbp_get_cmd_state(struct se_cmd *se_cmd) -{ - return 0; -} - static int sbp_queue_data_in(struct se_cmd *se_cmd) { struct sbp_target_request *req = container_of(se_cmd, @@ -2281,14 +2256,8 @@ static const struct target_core_fabric_ops sbp_ops = { .tpg_get_tag = sbp_get_tag, .tpg_check_demo_mode = sbp_check_true, .tpg_check_demo_mode_cache = sbp_check_true, - .tpg_check_demo_mode_write_protect = sbp_check_false, - .tpg_check_prod_mode_write_protect = sbp_check_false, - .tpg_get_inst_index = sbp_tpg_get_inst_index, .release_cmd = sbp_release_cmd, - .sess_get_index = sbp_sess_get_index, .write_pending = sbp_write_pending, - .set_default_node_attributes = sbp_set_default_node_attrs, - .get_cmd_state = sbp_get_cmd_state, .queue_data_in = sbp_queue_data_in, .queue_status = sbp_queue_status, .queue_tm_rsp = sbp_queue_tm_rsp, diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index c8470e7c0e10..3372856319f7 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -225,7 +225,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) /* * Set RELATIVE TARGET PORT IDENTIFIER */ - put_unaligned_be16(lun->lun_rtpi, &buf[off]); + put_unaligned_be16(lun->lun_tpg->tpg_rtpi, &buf[off]); off += 2; rd_len += 4; } @@ -399,7 +399,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) spin_lock(&dev->se_port_lock); list_for_each_entry(lun, &dev->dev_sep_list, lun_dev_link) { - if (lun->lun_rtpi != rtpi) + if (lun->lun_tpg->tpg_rtpi != rtpi) continue; // XXX: racy unlock diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 611b0424e305..936e5ff1b209 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -335,6 +335,29 @@ EXPORT_SYMBOL(target_undepend_item); /*############################################################################## // Start functions called by external Target Fabrics Modules //############################################################################*/ +static int target_disable_feature(struct se_portal_group *se_tpg) +{ + return 0; +} + +static u32 target_default_get_inst_index(struct se_portal_group *se_tpg) +{ + return 1; +} + +static u32 target_default_sess_get_index(struct se_session *se_sess) +{ + return 0; +} + +static void target_set_default_node_attributes(struct se_node_acl *se_acl) +{ +} + +static int target_default_get_cmd_state(struct se_cmd *se_cmd) +{ + return 0; +} static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) { @@ -362,46 +385,14 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) pr_err("Missing tfo->tpg_get_tag()\n"); return -EINVAL; } - if (!tfo->tpg_check_demo_mode) { - pr_err("Missing tfo->tpg_check_demo_mode()\n"); - return -EINVAL; - } - if (!tfo->tpg_check_demo_mode_cache) { - pr_err("Missing tfo->tpg_check_demo_mode_cache()\n"); - return -EINVAL; - } - if (!tfo->tpg_check_demo_mode_write_protect) { - pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n"); - return -EINVAL; - } - if (!tfo->tpg_check_prod_mode_write_protect) { - pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n"); - return -EINVAL; - } - if (!tfo->tpg_get_inst_index) { - pr_err("Missing tfo->tpg_get_inst_index()\n"); - return -EINVAL; - } if (!tfo->release_cmd) { pr_err("Missing tfo->release_cmd()\n"); return -EINVAL; } - if (!tfo->sess_get_index) { - pr_err("Missing tfo->sess_get_index()\n"); - return -EINVAL; - } if (!tfo->write_pending) { pr_err("Missing tfo->write_pending()\n"); return -EINVAL; } - if (!tfo->set_default_node_attributes) { - pr_err("Missing tfo->set_default_node_attributes()\n"); - return -EINVAL; - } - if (!tfo->get_cmd_state) { - pr_err("Missing tfo->get_cmd_state()\n"); - return -EINVAL; - } if (!tfo->queue_data_in) { pr_err("Missing tfo->queue_data_in()\n"); return -EINVAL; @@ -447,8 +438,36 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) return 0; } +static void target_set_default_ops(struct target_core_fabric_ops *tfo) +{ + if (!tfo->tpg_check_demo_mode) + tfo->tpg_check_demo_mode = target_disable_feature; + + if (!tfo->tpg_check_demo_mode_cache) + tfo->tpg_check_demo_mode_cache = target_disable_feature; + + if (!tfo->tpg_check_demo_mode_write_protect) + tfo->tpg_check_demo_mode_write_protect = target_disable_feature; + + if (!tfo->tpg_check_prod_mode_write_protect) + tfo->tpg_check_prod_mode_write_protect = target_disable_feature; + + if (!tfo->tpg_get_inst_index) + tfo->tpg_get_inst_index = target_default_get_inst_index; + + if (!tfo->sess_get_index) + tfo->sess_get_index = target_default_sess_get_index; + + if (!tfo->set_default_node_attributes) + tfo->set_default_node_attributes = target_set_default_node_attributes; + + if (!tfo->get_cmd_state) + tfo->get_cmd_state = target_default_get_cmd_state; +} + int target_register_template(const struct target_core_fabric_ops *fo) { + struct target_core_fabric_ops *tfo; struct target_fabric_configfs *tf; int ret; @@ -461,10 +480,18 @@ int target_register_template(const struct target_core_fabric_ops *fo) pr_err("%s: could not allocate memory!\n", __func__); return -ENOMEM; } + tfo = kzalloc(sizeof(struct target_core_fabric_ops), GFP_KERNEL); + if (!tfo) { + kfree(tf); + pr_err("%s: could not allocate memory!\n", __func__); + return -ENOMEM; + } + memcpy(tfo, fo, sizeof(*tfo)); + target_set_default_ops(tfo); INIT_LIST_HEAD(&tf->tf_list); atomic_set(&tf->tf_access_cnt, 0); - tf->tf_ops = fo; + tf->tf_ops = tfo; target_fabric_setup_cits(tf); mutex_lock(&g_tf_lock); @@ -492,6 +519,7 @@ void target_unregister_template(const struct target_core_fabric_ops *fo) */ rcu_barrier(); kfree(t->tf_tpg_base_cit.ct_attrs); + kfree(t->tf_ops); kfree(t); return; } @@ -621,7 +649,7 @@ static void dev_set_t10_wwn_model_alias(struct se_device *dev) * here without potentially breaking existing setups, so continue to * truncate one byte shorter than what can be carried in INQUIRY. */ - strlcpy(dev->t10_wwn.model, configname, INQUIRY_MODEL_LEN); + strscpy(dev->t10_wwn.model, configname, INQUIRY_MODEL_LEN); } static ssize_t emulate_model_alias_store(struct config_item *item, @@ -647,7 +675,7 @@ static ssize_t emulate_model_alias_store(struct config_item *item, if (flag) { dev_set_t10_wwn_model_alias(dev); } else { - strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod, + strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod, sizeof(dev->t10_wwn.model)); } da->emulate_model_alias = flag; @@ -1398,7 +1426,7 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item, } BUILD_BUG_ON(sizeof(dev->t10_wwn.vendor) != INQUIRY_VENDOR_LEN + 1); - strlcpy(dev->t10_wwn.vendor, stripped, sizeof(dev->t10_wwn.vendor)); + strscpy(dev->t10_wwn.vendor, stripped, sizeof(dev->t10_wwn.vendor)); pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:" " %s\n", dev->t10_wwn.vendor); @@ -1454,7 +1482,7 @@ static ssize_t target_wwn_product_id_store(struct config_item *item, } BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1); - strlcpy(dev->t10_wwn.model, stripped, sizeof(dev->t10_wwn.model)); + strscpy(dev->t10_wwn.model, stripped, sizeof(dev->t10_wwn.model)); pr_debug("Target_Core_ConfigFS: Set emulated T10 Model Identification: %s\n", dev->t10_wwn.model); @@ -1510,7 +1538,7 @@ static ssize_t target_wwn_revision_store(struct config_item *item, } BUILD_BUG_ON(sizeof(dev->t10_wwn.revision) != INQUIRY_REVISION_LEN + 1); - strlcpy(dev->t10_wwn.revision, stripped, sizeof(dev->t10_wwn.revision)); + strscpy(dev->t10_wwn.revision, stripped, sizeof(dev->t10_wwn.revision)); pr_debug("Target_Core_ConfigFS: Set emulated T10 Revision: %s\n", dev->t10_wwn.revision); diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index f6e58410ec3f..b7ac60f4a219 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -223,7 +223,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi( tpg->se_tpg_tfo->fabric_name); continue; } - if (lun->lun_rtpi != rtpi) + if (lun->lun_tpg->tpg_rtpi != rtpi) continue; kref_get(&deve->pr_kref); @@ -479,47 +479,6 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) mutex_unlock(&tpg->acl_node_mutex); } -int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev) -{ - struct se_lun *tmp; - - spin_lock(&dev->se_port_lock); - if (dev->export_count == 0x0000ffff) { - pr_warn("Reached dev->dev_port_count ==" - " 0x0000ffff\n"); - spin_unlock(&dev->se_port_lock); - return -ENOSPC; - } -again: - /* - * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device - * Here is the table from spc4r17 section 7.7.3.8. - * - * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field - * - * Code Description - * 0h Reserved - * 1h Relative port 1, historically known as port A - * 2h Relative port 2, historically known as port B - * 3h to FFFFh Relative port 3 through 65 535 - */ - lun->lun_rtpi = dev->dev_rpti_counter++; - if (!lun->lun_rtpi) - goto again; - - list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) { - /* - * Make sure RELATIVE TARGET PORT IDENTIFIER is unique - * for 16-bit wrap.. - */ - if (lun->lun_rtpi == tmp->lun_rtpi) - goto again; - } - spin_unlock(&dev->se_port_lock); - - return 0; -} - static void se_release_vpd_for_dev(struct se_device *dev) { struct t10_vpd *vpd, *vpd_tmp; @@ -782,6 +741,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) spin_lock_init(&dev->t10_alua.lba_map_lock); INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work); + mutex_init(&dev->lun_reset_mutex); dev->t10_wwn.t10_dev = dev; /* @@ -829,10 +789,10 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) xcopy_lun->lun_tpg = &xcopy_pt_tpg; /* Preload the default INQUIRY const values */ - strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor)); - strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod, + strscpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor)); + strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod, sizeof(dev->t10_wwn.model)); - strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev, + strscpy(dev->t10_wwn.revision, dev->transport->inquiry_rev, sizeof(dev->t10_wwn.revision)); return dev; diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 67b18a67317a..b7c637644cd4 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -836,17 +836,49 @@ static ssize_t target_fabric_tpg_base_enable_store(struct config_item *item, if (se_tpg->enabled == op) return count; - - ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, op); + if (op) + ret = target_tpg_enable(se_tpg); + else + ret = target_tpg_disable(se_tpg); if (ret) return ret; + return count; +} +static ssize_t target_fabric_tpg_base_rtpi_show(struct config_item *item, char *page) +{ + struct se_portal_group *se_tpg = to_tpg(item); - se_tpg->enabled = op; + return sysfs_emit(page, "%#x\n", se_tpg->tpg_rtpi); +} + +static ssize_t target_fabric_tpg_base_rtpi_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_portal_group *se_tpg = to_tpg(item); + u16 val; + int ret; + + ret = kstrtou16(page, 0, &val); + if (ret < 0) + return ret; + if (val == 0) + return -EINVAL; + + if (se_tpg->enabled) { + pr_info("%s_TPG[%hu] - Can not change RTPI on enabled TPG", + se_tpg->se_tpg_tfo->fabric_name, + se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); + return -EINVAL; + } + + se_tpg->tpg_rtpi = val; + se_tpg->rtpi_manual = true; return count; } CONFIGFS_ATTR(target_fabric_tpg_base_, enable); +CONFIGFS_ATTR(target_fabric_tpg_base_, rtpi); static int target_fabric_setup_tpg_base_cit(struct target_fabric_configfs *tf) @@ -863,8 +895,8 @@ target_fabric_setup_tpg_base_cit(struct target_fabric_configfs *tf) if (tf->tf_ops->fabric_enable_tpg) nr_attrs++; - if (nr_attrs == 0) - goto done; + /* + 1 for target_fabric_tpg_base_attr_rtpi */ + nr_attrs++; /* + 1 for final NULL in the array */ attrs = kcalloc(nr_attrs + 1, sizeof(*attrs), GFP_KERNEL); @@ -876,9 +908,10 @@ target_fabric_setup_tpg_base_cit(struct target_fabric_configfs *tf) attrs[i] = tf->tf_ops->tfc_tpg_base_attrs[i]; if (tf->tf_ops->fabric_enable_tpg) - attrs[i] = &target_fabric_tpg_base_attr_enable; + attrs[i++] = &target_fabric_tpg_base_attr_enable; + + attrs[i++] = &target_fabric_tpg_base_attr_rtpi; -done: cit->ct_item_ops = &target_fabric_tpg_base_item_ops; cit->ct_attrs = attrs; cit->ct_owner = tf->tf_ops->module; diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 38a6d08f75b3..408be26d2e9b 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -59,7 +59,6 @@ struct target_fabric_configfs { extern struct t10_alua_lu_gp *default_lu_gp; /* target_core_device.c */ -int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev); struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); void target_pr_kref_release(struct kref *); void core_free_device_list_for_node(struct se_node_acl *, @@ -132,13 +131,14 @@ void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *); struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *tpg, const char *initiatorname); void core_tpg_del_initiator_node_acl(struct se_node_acl *acl); +int target_tpg_enable(struct se_portal_group *se_tpg); +int target_tpg_disable(struct se_portal_group *se_tpg); /* target_core_transport.c */ int init_se_kmem_caches(void); void release_se_kmem_caches(void); u32 scsi_get_new_index(scsi_index_t); void transport_subsystem_check_init(void); -void transport_uninit_session(struct se_session *); unsigned char *transport_dump_cmd_direction(struct se_cmd *); void transport_dump_dev_state(struct se_device *, char *, int *); void transport_dump_dev_info(struct se_device *, struct se_lun *, diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 7a3f07979a02..49d9167bb263 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -663,7 +663,7 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration( } pr_reg->pr_res_mapped_lun = mapped_lun; pr_reg->pr_aptpl_target_lun = lun->unpacked_lun; - pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi; + pr_reg->tg_pt_sep_rtpi = lun->lun_tpg->tpg_rtpi; pr_reg->pr_res_key = sa_res_key; pr_reg->pr_reg_all_tg_pt = all_tg_pt; pr_reg->pr_reg_aptpl = aptpl; @@ -967,7 +967,7 @@ static int __core_scsi3_check_aptpl_registration( rcu_read_unlock(); pr_reg->pr_reg_nacl = nacl; - pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi; + pr_reg->tg_pt_sep_rtpi = lun->lun_tpg->tpg_rtpi; list_del(&pr_reg->pr_reg_aptpl_list); spin_unlock(&pr_tmpl->aptpl_reg_lock); /* @@ -1567,7 +1567,7 @@ core_scsi3_decode_spec_i_port( */ if (tmp_tpg->proto_id != proto_ident) continue; - dest_rtpi = tmp_lun->lun_rtpi; + dest_rtpi = tmp_lun->lun_tpg->tpg_rtpi; iport_ptr = NULL; i_str = target_parse_pr_out_transport_id(tmp_tpg, @@ -3225,7 +3225,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, spin_lock(&dev->se_port_lock); list_for_each_entry(tmp_lun, &dev->dev_sep_list, lun_dev_link) { - if (tmp_lun->lun_rtpi != rtpi) + if (tmp_lun->lun_tpg->tpg_rtpi != rtpi) continue; dest_se_tpg = tmp_lun->lun_tpg; dest_tf_ops = dest_se_tpg->se_tpg_tfo; diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 053bd2eea0e6..50290abc07bc 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -226,7 +226,6 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) struct t10_alua_lu_gp_member *lu_gp_mem; struct t10_alua_tg_pt_gp *tg_pt_gp; unsigned char *prod = &dev->t10_wwn.model[0]; - u32 prod_len; u32 off = 0; u16 len = 0, id_len; @@ -267,10 +266,6 @@ check_t10_vend_desc: * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4 */ id_len = 8; /* For Vendor field */ - prod_len = 4; /* For VPD Header */ - prod_len += 8; /* For Vendor field */ - prod_len += strlen(prod); - prod_len++; /* For : */ if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) id_len += sprintf(&buf[off+12], "%s:%s", prod, @@ -317,7 +312,7 @@ check_t10_vend_desc: /* Skip over Obsolete field in RTPI payload * in Table 472 */ off += 2; - put_unaligned_be16(lun->lun_rtpi, &buf[off]); + put_unaligned_be16(lun->lun_tpg->tpg_rtpi, &buf[off]); off += 2; len += 8; /* Header size + Designation descriptor */ /* diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index f85ee5b0fd80..c42cbde8a31b 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -455,7 +455,7 @@ static ssize_t target_stat_port_indx_show(struct config_item *item, char *page) rcu_read_lock(); dev = rcu_dereference(lun->lun_se_dev); if (dev) - ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_rtpi); + ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_tpg->tpg_rtpi); rcu_read_unlock(); return ret; } @@ -561,7 +561,7 @@ static ssize_t target_stat_tgt_port_indx_show(struct config_item *item, rcu_read_lock(); dev = rcu_dereference(lun->lun_se_dev); if (dev) - ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_rtpi); + ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_tpg->tpg_rtpi); rcu_read_unlock(); return ret; } @@ -579,7 +579,7 @@ static ssize_t target_stat_tgt_port_name_show(struct config_item *item, if (dev) ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n", tpg->se_tpg_tfo->fabric_name, - lun->lun_rtpi); + lun->lun_tpg->tpg_rtpi); rcu_read_unlock(); return ret; } diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 2b95b4550a63..4718db628222 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -188,14 +188,23 @@ static void core_tmr_drain_tmr_list( * LUN_RESET tmr.. */ spin_lock_irqsave(&dev->se_tmr_lock, flags); - if (tmr) - list_del_init(&tmr->tmr_list); list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { + if (tmr_p == tmr) + continue; + cmd = tmr_p->task_cmd; if (!cmd) { pr_err("Unable to locate struct se_cmd for TMR\n"); continue; } + + /* + * We only execute one LUN_RESET at a time so we can't wait + * on them below. + */ + if (tmr_p->function == TMR_LUN_RESET) + continue; + /* * If this function was called with a valid pr_res_key * parameter (eg: for PROUT PREEMPT_AND_ABORT service action @@ -379,14 +388,25 @@ int core_tmr_lun_reset( tmr_nacl->initiatorname); } } + + + /* + * We only allow one reset or preempt and abort to execute at a time + * to prevent one call from claiming all the cmds causing a second + * call from returning while cmds it should have waited on are still + * running. + */ + mutex_lock(&dev->lun_reset_mutex); + pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", (preempt_and_abort_list) ? "Preempt" : "TMR", dev->transport->name, tas); - core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas, preempt_and_abort_list); + mutex_unlock(&dev->lun_reset_mutex); + /* * Clear any legacy SPC-2 reservation when called during * LOGICAL UNIT RESET diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 736847c933e5..c0e429e5ef31 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -31,6 +31,7 @@ #include "target_core_ua.h" extern struct se_device *g_lun0_dev; +static DEFINE_XARRAY_ALLOC(tpg_xa); /* __core_tpg_get_initiator_node_acl(): * @@ -328,7 +329,7 @@ static void target_shutdown_sessions(struct se_node_acl *acl) restart: spin_lock_irqsave(&acl->nacl_sess_lock, flags); list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) { - if (atomic_read(&sess->stopped)) + if (sess->cmd_cnt && atomic_read(&sess->cmd_cnt->stopped)) continue; list_del_init(&sess->sess_acl_list); @@ -439,6 +440,68 @@ static void core_tpg_lun_ref_release(struct percpu_ref *ref) complete(&lun->lun_shutdown_comp); } +static int target_tpg_register_rtpi(struct se_portal_group *se_tpg) +{ + u32 val; + int ret; + + if (se_tpg->rtpi_manual) { + ret = xa_insert(&tpg_xa, se_tpg->tpg_rtpi, se_tpg, GFP_KERNEL); + if (ret) { + pr_info("%s_TPG[%hu] - Can not set RTPI %#x, it is already busy", + se_tpg->se_tpg_tfo->fabric_name, + se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg), + se_tpg->tpg_rtpi); + return -EINVAL; + } + } else { + ret = xa_alloc(&tpg_xa, &val, se_tpg, + XA_LIMIT(1, USHRT_MAX), GFP_KERNEL); + if (!ret) + se_tpg->tpg_rtpi = val; + } + + return ret; +} + +static void target_tpg_deregister_rtpi(struct se_portal_group *se_tpg) +{ + if (se_tpg->tpg_rtpi && se_tpg->enabled) + xa_erase(&tpg_xa, se_tpg->tpg_rtpi); +} + +int target_tpg_enable(struct se_portal_group *se_tpg) +{ + int ret; + + ret = target_tpg_register_rtpi(se_tpg); + if (ret) + return ret; + + ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, true); + if (ret) { + target_tpg_deregister_rtpi(se_tpg); + return ret; + } + + se_tpg->enabled = true; + + return 0; +} + +int target_tpg_disable(struct se_portal_group *se_tpg) +{ + int ret; + + target_tpg_deregister_rtpi(se_tpg); + + ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, false); + if (!ret) + se_tpg->enabled = false; + + return ret; +} + /* Does not change se_wwn->priv. */ int core_tpg_register( struct se_wwn *se_wwn, @@ -535,6 +598,8 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head); } + target_tpg_deregister_rtpi(se_tpg); + return 0; } EXPORT_SYMBOL(core_tpg_deregister); @@ -578,10 +643,6 @@ int core_tpg_add_lun( if (ret < 0) goto out; - ret = core_alloc_rtpi(lun, dev); - if (ret) - goto out_kill_ref; - if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) && !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); @@ -605,8 +666,6 @@ int core_tpg_add_lun( return 0; -out_kill_ref: - percpu_ref_exit(&lun->lun_ref); out: return ret; } diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 5926316252eb..86adff2a86ed 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -220,12 +220,52 @@ void transport_subsystem_check_init(void) sub_api_initialized = 1; } -static void target_release_sess_cmd_refcnt(struct percpu_ref *ref) +static void target_release_cmd_refcnt(struct percpu_ref *ref) { - struct se_session *sess = container_of(ref, typeof(*sess), cmd_count); + struct target_cmd_counter *cmd_cnt = container_of(ref, + typeof(*cmd_cnt), + refcnt); + wake_up(&cmd_cnt->refcnt_wq); +} + +struct target_cmd_counter *target_alloc_cmd_counter(void) +{ + struct target_cmd_counter *cmd_cnt; + int rc; + + cmd_cnt = kzalloc(sizeof(*cmd_cnt), GFP_KERNEL); + if (!cmd_cnt) + return NULL; + + init_completion(&cmd_cnt->stop_done); + init_waitqueue_head(&cmd_cnt->refcnt_wq); + atomic_set(&cmd_cnt->stopped, 0); - wake_up(&sess->cmd_count_wq); + rc = percpu_ref_init(&cmd_cnt->refcnt, target_release_cmd_refcnt, 0, + GFP_KERNEL); + if (rc) + goto free_cmd_cnt; + + return cmd_cnt; + +free_cmd_cnt: + kfree(cmd_cnt); + return NULL; } +EXPORT_SYMBOL_GPL(target_alloc_cmd_counter); + +void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt) +{ + /* + * Drivers like loop do not call target_stop_session during session + * shutdown so we have to drop the ref taken at init time here. + */ + if (!atomic_read(&cmd_cnt->stopped)) + percpu_ref_put(&cmd_cnt->refcnt); + + percpu_ref_exit(&cmd_cnt->refcnt); +} +EXPORT_SYMBOL_GPL(target_free_cmd_counter); /** * transport_init_session - initialize a session object @@ -233,32 +273,14 @@ static void target_release_sess_cmd_refcnt(struct percpu_ref *ref) * * The caller must have zero-initialized @se_sess before calling this function. */ -int transport_init_session(struct se_session *se_sess) +void transport_init_session(struct se_session *se_sess) { INIT_LIST_HEAD(&se_sess->sess_list); INIT_LIST_HEAD(&se_sess->sess_acl_list); spin_lock_init(&se_sess->sess_cmd_lock); - init_waitqueue_head(&se_sess->cmd_count_wq); - init_completion(&se_sess->stop_done); - atomic_set(&se_sess->stopped, 0); - return percpu_ref_init(&se_sess->cmd_count, - target_release_sess_cmd_refcnt, 0, GFP_KERNEL); } EXPORT_SYMBOL(transport_init_session); -void transport_uninit_session(struct se_session *se_sess) -{ - /* - * Drivers like iscsi and loop do not call target_stop_session - * during session shutdown so we have to drop the ref taken at init - * time here. - */ - if (!atomic_read(&se_sess->stopped)) - percpu_ref_put(&se_sess->cmd_count); - - percpu_ref_exit(&se_sess->cmd_count); -} - /** * transport_alloc_session - allocate a session object and initialize it * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. @@ -266,7 +288,6 @@ void transport_uninit_session(struct se_session *se_sess) struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) { struct se_session *se_sess; - int ret; se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); if (!se_sess) { @@ -274,11 +295,7 @@ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) " se_sess_cache\n"); return ERR_PTR(-ENOMEM); } - ret = transport_init_session(se_sess); - if (ret < 0) { - kmem_cache_free(se_sess_cache, se_sess); - return ERR_PTR(ret); - } + transport_init_session(se_sess); se_sess->sup_prot_ops = sup_prot_ops; return se_sess; @@ -444,8 +461,13 @@ target_setup_session(struct se_portal_group *tpg, int (*callback)(struct se_portal_group *, struct se_session *, void *)) { + struct target_cmd_counter *cmd_cnt; struct se_session *sess; + int rc; + cmd_cnt = target_alloc_cmd_counter(); + if (!cmd_cnt) + return ERR_PTR(-ENOMEM); /* * If the fabric driver is using percpu-ida based pre allocation * of I/O descriptor tags, go ahead and perform that setup now.. @@ -455,29 +477,36 @@ target_setup_session(struct se_portal_group *tpg, else sess = transport_alloc_session(prot_op); - if (IS_ERR(sess)) - return sess; + if (IS_ERR(sess)) { + rc = PTR_ERR(sess); + goto free_cnt; + } + sess->cmd_cnt = cmd_cnt; sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg, (unsigned char *)initiatorname); if (!sess->se_node_acl) { - transport_free_session(sess); - return ERR_PTR(-EACCES); + rc = -EACCES; + goto free_sess; } /* * Go ahead and perform any remaining fabric setup that is * required before transport_register_session(). */ if (callback != NULL) { - int rc = callback(tpg, sess, private); - if (rc) { - transport_free_session(sess); - return ERR_PTR(rc); - } + rc = callback(tpg, sess, private); + if (rc) + goto free_sess; } transport_register_session(tpg, sess->se_node_acl, sess, private); return sess; + +free_sess: + transport_free_session(sess); +free_cnt: + target_free_cmd_counter(cmd_cnt); + return ERR_PTR(rc); } EXPORT_SYMBOL(target_setup_session); @@ -602,7 +631,8 @@ void transport_free_session(struct se_session *se_sess) sbitmap_queue_free(&se_sess->sess_tag_pool); kvfree(se_sess->sess_cmd_map); } - transport_uninit_session(se_sess); + if (se_sess->cmd_cnt) + target_free_cmd_counter(se_sess->cmd_cnt); kmem_cache_free(se_sess_cache, se_sess); } EXPORT_SYMBOL(transport_free_session); @@ -1412,14 +1442,12 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size) * * Preserves the value of @cmd->tag. */ -void __target_init_cmd( - struct se_cmd *cmd, - const struct target_core_fabric_ops *tfo, - struct se_session *se_sess, - u32 data_length, - int data_direction, - int task_attr, - unsigned char *sense_buffer, u64 unpacked_lun) +void __target_init_cmd(struct se_cmd *cmd, + const struct target_core_fabric_ops *tfo, + struct se_session *se_sess, u32 data_length, + int data_direction, int task_attr, + unsigned char *sense_buffer, u64 unpacked_lun, + struct target_cmd_counter *cmd_cnt) { INIT_LIST_HEAD(&cmd->se_delayed_node); INIT_LIST_HEAD(&cmd->se_qf_node); @@ -1439,6 +1467,7 @@ void __target_init_cmd( cmd->sam_task_attr = task_attr; cmd->sense_buffer = sense_buffer; cmd->orig_fe_lun = unpacked_lun; + cmd->cmd_cnt = cmd_cnt; if (!(cmd->se_cmd_flags & SCF_USE_CPUID)) cmd->cpuid = raw_smp_processor_id(); @@ -1658,7 +1687,8 @@ int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, * target_core_fabric_ops->queue_status() callback */ __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length, - data_dir, task_attr, sense, unpacked_lun); + data_dir, task_attr, sense, unpacked_lun, + se_sess->cmd_cnt); /* * Obtain struct se_cmd->cmd_kref reference. A second kref_get here is @@ -1953,7 +1983,8 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, BUG_ON(!se_tpg); __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, - 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun); + 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun, + se_sess->cmd_cnt); /* * FIXME: Currently expect caller to handle se_cmd->se_tmr_req * allocation failure. @@ -2957,7 +2988,6 @@ EXPORT_SYMBOL(transport_generic_free_cmd); */ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) { - struct se_session *se_sess = se_cmd->se_sess; int ret = 0; /* @@ -2970,9 +3000,14 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) se_cmd->se_cmd_flags |= SCF_ACK_KREF; } - if (!percpu_ref_tryget_live(&se_sess->cmd_count)) - ret = -ESHUTDOWN; - + /* + * Users like xcopy do not use counters since they never do a stop + * and wait. + */ + if (se_cmd->cmd_cnt) { + if (!percpu_ref_tryget_live(&se_cmd->cmd_cnt->refcnt)) + ret = -ESHUTDOWN; + } if (ret && ack_kref) target_put_sess_cmd(se_cmd); @@ -2993,7 +3028,7 @@ static void target_free_cmd_mem(struct se_cmd *cmd) static void target_release_cmd_kref(struct kref *kref) { struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); - struct se_session *se_sess = se_cmd->se_sess; + struct target_cmd_counter *cmd_cnt = se_cmd->cmd_cnt; struct completion *free_compl = se_cmd->free_compl; struct completion *abrt_compl = se_cmd->abrt_compl; @@ -3004,7 +3039,8 @@ static void target_release_cmd_kref(struct kref *kref) if (abrt_compl) complete(abrt_compl); - percpu_ref_put(&se_sess->cmd_count); + if (cmd_cnt) + percpu_ref_put(&cmd_cnt->refcnt); } /** @@ -3123,46 +3159,67 @@ void target_show_cmd(const char *pfx, struct se_cmd *cmd) } EXPORT_SYMBOL(target_show_cmd); -static void target_stop_session_confirm(struct percpu_ref *ref) +static void target_stop_cmd_counter_confirm(struct percpu_ref *ref) +{ + struct target_cmd_counter *cmd_cnt = container_of(ref, + struct target_cmd_counter, + refcnt); + complete_all(&cmd_cnt->stop_done); +} + +/** + * target_stop_cmd_counter - Stop new IO from being added to the counter. + * @cmd_cnt: counter to stop + */ +void target_stop_cmd_counter(struct target_cmd_counter *cmd_cnt) { - struct se_session *se_sess = container_of(ref, struct se_session, - cmd_count); - complete_all(&se_sess->stop_done); + pr_debug("Stopping command counter.\n"); + if (!atomic_cmpxchg(&cmd_cnt->stopped, 0, 1)) + percpu_ref_kill_and_confirm(&cmd_cnt->refcnt, + target_stop_cmd_counter_confirm); } +EXPORT_SYMBOL_GPL(target_stop_cmd_counter); /** * target_stop_session - Stop new IO from being queued on the session. - * @se_sess: session to stop + * @se_sess: session to stop */ void target_stop_session(struct se_session *se_sess) { - pr_debug("Stopping session queue.\n"); - if (atomic_cmpxchg(&se_sess->stopped, 0, 1) == 0) - percpu_ref_kill_and_confirm(&se_sess->cmd_count, - target_stop_session_confirm); + target_stop_cmd_counter(se_sess->cmd_cnt); } EXPORT_SYMBOL(target_stop_session); /** - * target_wait_for_sess_cmds - Wait for outstanding commands - * @se_sess: session to wait for active I/O + * target_wait_for_cmds - Wait for outstanding cmds. + * @cmd_cnt: counter to wait for active I/O for. */ -void target_wait_for_sess_cmds(struct se_session *se_sess) +void target_wait_for_cmds(struct target_cmd_counter *cmd_cnt) { int ret; - WARN_ON_ONCE(!atomic_read(&se_sess->stopped)); + WARN_ON_ONCE(!atomic_read(&cmd_cnt->stopped)); do { pr_debug("Waiting for running cmds to complete.\n"); - ret = wait_event_timeout(se_sess->cmd_count_wq, - percpu_ref_is_zero(&se_sess->cmd_count), - 180 * HZ); + ret = wait_event_timeout(cmd_cnt->refcnt_wq, + percpu_ref_is_zero(&cmd_cnt->refcnt), + 180 * HZ); } while (ret <= 0); - wait_for_completion(&se_sess->stop_done); + wait_for_completion(&cmd_cnt->stop_done); pr_debug("Waiting for cmds done.\n"); } +EXPORT_SYMBOL_GPL(target_wait_for_cmds); + +/** + * target_wait_for_sess_cmds - Wait for outstanding commands + * @se_sess: session to wait for active I/O + */ +void target_wait_for_sess_cmds(struct se_session *se_sess) +{ + target_wait_for_cmds(se_sess->cmd_cnt); +} EXPORT_SYMBOL(target_wait_for_sess_cmds); /* diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 49eaee022ef1..91ed015b588c 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -461,8 +461,6 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = { int target_xcopy_setup_pt(void) { - int ret; - xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0); if (!xcopy_wq) { pr_err("Unable to allocate xcopy_wq\n"); @@ -479,9 +477,7 @@ int target_xcopy_setup_pt(void) INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list); INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list); memset(&xcopy_pt_sess, 0, sizeof(struct se_session)); - ret = transport_init_session(&xcopy_pt_sess); - if (ret < 0) - goto destroy_wq; + transport_init_session(&xcopy_pt_sess); xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg; xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess; @@ -490,19 +486,12 @@ int target_xcopy_setup_pt(void) xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl; return 0; - -destroy_wq: - destroy_workqueue(xcopy_wq); - xcopy_wq = NULL; - return ret; } void target_xcopy_release_pt(void) { - if (xcopy_wq) { + if (xcopy_wq) destroy_workqueue(xcopy_wq); - transport_uninit_session(&xcopy_pt_sess); - } } /* @@ -602,8 +591,8 @@ static int target_xcopy_read_source( (unsigned long long)src_lba, transfer_length_block, src_bytes); __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, src_bytes, - DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0); - + DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0, + NULL); rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, src_dev, &cdb[0], remote_port); if (rc < 0) { @@ -647,8 +636,8 @@ static int target_xcopy_write_destination( (unsigned long long)dst_lba, transfer_length_block, dst_bytes); __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, dst_bytes, - DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0); - + DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0, + NULL); rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, dst_dev, &cdb[0], remote_port); if (rc < 0) { diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index 2ff716d8cbdd..00e5573c6296 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h @@ -146,7 +146,6 @@ void ft_release_cmd(struct se_cmd *); int ft_queue_status(struct se_cmd *); int ft_queue_data_in(struct se_cmd *); int ft_write_pending(struct se_cmd *); -int ft_get_cmd_state(struct se_cmd *); void ft_queue_tm_resp(struct se_cmd *); void ft_aborted_task(struct se_cmd *); diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 410b723f9d79..21783cd71c15 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -223,11 +223,6 @@ int ft_write_pending(struct se_cmd *se_cmd) return 0; } -int ft_get_cmd_state(struct se_cmd *se_cmd) -{ - return 0; -} - /* * FC sequence response handler for follow-on sequences (data) and aborts. */ diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 1a38c98f681b..6ac3fc1a7d39 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -398,15 +398,6 @@ static u16 ft_get_tag(struct se_portal_group *se_tpg) return ft_tpg(se_tpg)->index; } -static int ft_check_false(struct se_portal_group *se_tpg) -{ - return 0; -} - -static void ft_set_default_node_attr(struct se_node_acl *se_nacl) -{ -} - static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg) { return ft_tpg(se_tpg)->index; @@ -418,10 +409,6 @@ static const struct target_core_fabric_ops ft_fabric_ops = { .node_acl_size = sizeof(struct ft_node_acl), .tpg_get_wwn = ft_get_fabric_wwn, .tpg_get_tag = ft_get_tag, - .tpg_check_demo_mode = ft_check_false, - .tpg_check_demo_mode_cache = ft_check_false, - .tpg_check_demo_mode_write_protect = ft_check_false, - .tpg_check_prod_mode_write_protect = ft_check_false, .tpg_get_inst_index = ft_tpg_get_inst_index, .check_stop_free = ft_check_stop_free, .release_cmd = ft_release_cmd, @@ -429,8 +416,6 @@ static const struct target_core_fabric_ops ft_fabric_ops = { .sess_get_index = ft_sess_get_index, .sess_get_initiator_sid = NULL, .write_pending = ft_write_pending, - .set_default_node_attributes = ft_set_default_node_attr, - .get_cmd_state = ft_get_cmd_state, .queue_data_in = ft_queue_data_in, .queue_status = ft_queue_status, .queue_tm_rsp = ft_queue_tm_resp, diff --git a/drivers/target/tcm_remote/Kconfig b/drivers/target/tcm_remote/Kconfig new file mode 100644 index 000000000000..e6bebb5fe6f1 --- /dev/null +++ b/drivers/target/tcm_remote/Kconfig @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +config REMOTE_TARGET + tristate "TCM Virtual Remote target" + depends on SCSI + help + Say Y here to enable the TCM Virtual Remote fabric + That fabric is a dummy fabric to tell TCM about configuration + of TPG/ACL/LUN on peer nodes in a cluster. diff --git a/drivers/target/tcm_remote/Makefile b/drivers/target/tcm_remote/Makefile new file mode 100644 index 000000000000..5818ffd0b0fa --- /dev/null +++ b/drivers/target/tcm_remote/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_REMOTE_TARGET) += tcm_remote.o diff --git a/drivers/target/tcm_remote/tcm_remote.c b/drivers/target/tcm_remote/tcm_remote.c new file mode 100644 index 000000000000..cb8db2558056 --- /dev/null +++ b/drivers/target/tcm_remote/tcm_remote.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/configfs.h> +#include <scsi/scsi.h> +#include <scsi/scsi_tcq.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_cmnd.h> + +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> + +#include "tcm_remote.h" + +static inline struct tcm_remote_tpg *remote_tpg(struct se_portal_group *se_tpg) +{ + return container_of(se_tpg, struct tcm_remote_tpg, remote_se_tpg); +} + +static char *tcm_remote_get_endpoint_wwn(struct se_portal_group *se_tpg) +{ + /* + * Return the passed NAA identifier for the Target Port + */ + return &remote_tpg(se_tpg)->remote_hba->remote_wwn_address[0]; +} + +static u16 tcm_remote_get_tag(struct se_portal_group *se_tpg) +{ + /* + * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83 + * to represent the SCSI Target Port. + */ + return remote_tpg(se_tpg)->remote_tpgt; +} + +static int tcm_remote_dummy_cmd_fn(struct se_cmd *se_cmd) +{ + return 0; +} + +static void tcm_remote_dummy_cmd_void_fn(struct se_cmd *se_cmd) +{ + +} + +static char *tcm_remote_dump_proto_id(struct tcm_remote_hba *remote_hba) +{ + switch (remote_hba->remote_proto_id) { + case SCSI_PROTOCOL_SAS: + return "SAS"; + case SCSI_PROTOCOL_SRP: + return "SRP"; + case SCSI_PROTOCOL_FCP: + return "FCP"; + case SCSI_PROTOCOL_ISCSI: + return "iSCSI"; + default: + break; + } + + return "Unknown"; +} + +static int tcm_remote_port_link( + struct se_portal_group *se_tpg, + struct se_lun *lun) +{ + pr_debug("TCM_Remote_ConfigFS: Port Link LUN %lld Successful\n", + lun->unpacked_lun); + return 0; +} + +static void tcm_remote_port_unlink( + struct se_portal_group *se_tpg, + struct se_lun *lun) +{ + pr_debug("TCM_Remote_ConfigFS: Port Unlink LUN %lld Successful\n", + lun->unpacked_lun); +} + +static struct se_portal_group *tcm_remote_make_tpg( + struct se_wwn *wwn, + const char *name) +{ + struct tcm_remote_hba *remote_hba = container_of(wwn, + struct tcm_remote_hba, remote_hba_wwn); + struct tcm_remote_tpg *remote_tpg; + unsigned long tpgt; + int ret; + + if (strstr(name, "tpgt_") != name) { + pr_err("Unable to locate \"tpgt_#\" directory group\n"); + return ERR_PTR(-EINVAL); + } + if (kstrtoul(name + 5, 10, &tpgt)) + return ERR_PTR(-EINVAL); + + if (tpgt >= TL_TPGS_PER_HBA) { + pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n", + tpgt, TL_TPGS_PER_HBA); + return ERR_PTR(-EINVAL); + } + remote_tpg = &remote_hba->remote_hba_tpgs[tpgt]; + remote_tpg->remote_hba = remote_hba; + remote_tpg->remote_tpgt = tpgt; + /* + * Register the remote_tpg as a emulated TCM Target Endpoint + */ + ret = core_tpg_register(wwn, &remote_tpg->remote_se_tpg, + remote_hba->remote_proto_id); + if (ret < 0) + return ERR_PTR(-ENOMEM); + + pr_debug("TCM_Remote_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n", + tcm_remote_dump_proto_id(remote_hba), + config_item_name(&wwn->wwn_group.cg_item), tpgt); + return &remote_tpg->remote_se_tpg; +} + +static void tcm_remote_drop_tpg(struct se_portal_group *se_tpg) +{ + struct se_wwn *wwn = se_tpg->se_tpg_wwn; + struct tcm_remote_tpg *remote_tpg = container_of(se_tpg, + struct tcm_remote_tpg, remote_se_tpg); + struct tcm_remote_hba *remote_hba; + unsigned short tpgt; + + remote_hba = remote_tpg->remote_hba; + tpgt = remote_tpg->remote_tpgt; + + /* + * Deregister the remote_tpg as a emulated TCM Target Endpoint + */ + core_tpg_deregister(se_tpg); + + remote_tpg->remote_hba = NULL; + remote_tpg->remote_tpgt = 0; + + pr_debug("TCM_Remote_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n", + tcm_remote_dump_proto_id(remote_hba), + config_item_name(&wwn->wwn_group.cg_item), tpgt); +} + +static struct se_wwn *tcm_remote_make_wwn( + struct target_fabric_configfs *tf, + struct config_group *group, + const char *name) +{ + struct tcm_remote_hba *remote_hba; + char *ptr; + int ret, off = 0; + + remote_hba = kzalloc(sizeof(*remote_hba), GFP_KERNEL); + if (!remote_hba) + return ERR_PTR(-ENOMEM); + + /* + * Determine the emulated Protocol Identifier and Target Port Name + * based on the incoming configfs directory name. + */ + ptr = strstr(name, "naa."); + if (ptr) { + remote_hba->remote_proto_id = SCSI_PROTOCOL_SAS; + goto check_len; + } + ptr = strstr(name, "fc."); + if (ptr) { + remote_hba->remote_proto_id = SCSI_PROTOCOL_FCP; + off = 3; /* Skip over "fc." */ + goto check_len; + } + ptr = strstr(name, "0x"); + if (ptr) { + remote_hba->remote_proto_id = SCSI_PROTOCOL_SRP; + off = 2; /* Skip over "0x" */ + goto check_len; + } + ptr = strstr(name, "iqn."); + if (!ptr) { + pr_err("Unable to locate prefix for emulated Target Port: %s\n", + name); + ret = -EINVAL; + goto out; + } + remote_hba->remote_proto_id = SCSI_PROTOCOL_ISCSI; + +check_len: + if (strlen(name) >= TL_WWN_ADDR_LEN) { + pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n", + name, tcm_remote_dump_proto_id(remote_hba), TL_WWN_ADDR_LEN); + ret = -EINVAL; + goto out; + } + snprintf(&remote_hba->remote_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]); + + pr_debug("TCM_Remote_ConfigFS: Allocated emulated Target %s Address: %s\n", + tcm_remote_dump_proto_id(remote_hba), name); + return &remote_hba->remote_hba_wwn; +out: + kfree(remote_hba); + return ERR_PTR(ret); +} + +static void tcm_remote_drop_wwn(struct se_wwn *wwn) +{ + struct tcm_remote_hba *remote_hba = container_of(wwn, + struct tcm_remote_hba, remote_hba_wwn); + + pr_debug("TCM_Remote_ConfigFS: Deallocating emulated Target %s Address: %s\n", + tcm_remote_dump_proto_id(remote_hba), + remote_hba->remote_wwn_address); + kfree(remote_hba); +} + +static ssize_t tcm_remote_wwn_version_show(struct config_item *item, char *page) +{ + return sprintf(page, "TCM Remote Fabric module %s\n", TCM_REMOTE_VERSION); +} + +CONFIGFS_ATTR_RO(tcm_remote_wwn_, version); + +static struct configfs_attribute *tcm_remote_wwn_attrs[] = { + &tcm_remote_wwn_attr_version, + NULL, +}; + +static const struct target_core_fabric_ops remote_ops = { + .module = THIS_MODULE, + .fabric_name = "remote", + .tpg_get_wwn = tcm_remote_get_endpoint_wwn, + .tpg_get_tag = tcm_remote_get_tag, + .check_stop_free = tcm_remote_dummy_cmd_fn, + .release_cmd = tcm_remote_dummy_cmd_void_fn, + .write_pending = tcm_remote_dummy_cmd_fn, + .queue_data_in = tcm_remote_dummy_cmd_fn, + .queue_status = tcm_remote_dummy_cmd_fn, + .queue_tm_rsp = tcm_remote_dummy_cmd_void_fn, + .aborted_task = tcm_remote_dummy_cmd_void_fn, + .fabric_make_wwn = tcm_remote_make_wwn, + .fabric_drop_wwn = tcm_remote_drop_wwn, + .fabric_make_tpg = tcm_remote_make_tpg, + .fabric_drop_tpg = tcm_remote_drop_tpg, + .fabric_post_link = tcm_remote_port_link, + .fabric_pre_unlink = tcm_remote_port_unlink, + .tfc_wwn_attrs = tcm_remote_wwn_attrs, +}; + +static int __init tcm_remote_fabric_init(void) +{ + return target_register_template(&remote_ops); +} + +static void __exit tcm_remote_fabric_exit(void) +{ + target_unregister_template(&remote_ops); +} + +MODULE_DESCRIPTION("TCM virtual remote target"); +MODULE_AUTHOR("Dmitry Bogdanov <d.bogdanov@yadro.com>"); +MODULE_LICENSE("GPL"); +module_init(tcm_remote_fabric_init); +module_exit(tcm_remote_fabric_exit); diff --git a/drivers/target/tcm_remote/tcm_remote.h b/drivers/target/tcm_remote/tcm_remote.h new file mode 100644 index 000000000000..913d1a6eb3a2 --- /dev/null +++ b/drivers/target/tcm_remote/tcm_remote.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/types.h> +#include <linux/device.h> + +#define TCM_REMOTE_VERSION "v0.1" +#define TL_WWN_ADDR_LEN 256 +#define TL_TPGS_PER_HBA 32 + +struct tcm_remote_tpg { + unsigned short remote_tpgt; + struct se_portal_group remote_se_tpg; + struct tcm_remote_hba *remote_hba; +}; + +struct tcm_remote_hba { + u8 remote_proto_id; + unsigned char remote_wwn_address[TL_WWN_ADDR_LEN]; + struct tcm_remote_tpg remote_hba_tpgs[TL_TPGS_PER_HBA]; + struct se_wwn remote_hba_wwn; +}; |