diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-11-05 16:42:02 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-11-05 16:42:02 +0100 |
commit | fe91c4725aeed35023ba4f7a1e1adfebb6878c23 (patch) | |
tree | 7f0e5cbbbe3a1e24d3e3b66ae290625d48ec4b2f /drivers/target | |
parent | Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/hid... (diff) | |
parent | scsi: lpfc: Update lpfc version to 14.0.0.3 (diff) | |
download | linux-fe91c4725aeed35023ba4f7a1e1adfebb6878c23.tar.xz linux-fe91c4725aeed35023ba4f7a1e1adfebb6878c23.zip |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley:
"This consists of the usual driver updates (ufs, smartpqi, lpfc,
target, megaraid_sas, hisi_sas, qla2xxx) and minor updates and bug
fixes.
Notable core changes are the removal of scsi->tag which caused some
churn in obsolete drivers and a sweep through all drivers to call
scsi_done() directly instead of scsi->done() which removes a pointer
indirection from the hot path and a move to register core sysfs files
earlier, which means they're available to KOBJ_ADD processing, which
necessitates switching all drivers to using attribute groups"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (279 commits)
scsi: lpfc: Update lpfc version to 14.0.0.3
scsi: lpfc: Allow fabric node recovery if recovery is in progress before devloss
scsi: lpfc: Fix link down processing to address NULL pointer dereference
scsi: lpfc: Allow PLOGI retry if previous PLOGI was aborted
scsi: lpfc: Fix use-after-free in lpfc_unreg_rpi() routine
scsi: lpfc: Correct sysfs reporting of loop support after SFP status change
scsi: lpfc: Wait for successful restart of SLI3 adapter during host sg_reset
scsi: lpfc: Revert LOG_TRACE_EVENT back to LOG_INIT prior to driver_resource_setup()
scsi: ufs: ufshcd-pltfrm: Fix memory leak due to probe defer
scsi: ufs: mediatek: Avoid sched_clock() misuse
scsi: mpt3sas: Make mpt3sas_dev_attrs static
scsi: scsi_transport_sas: Add 22.5 Gbps link rate definitions
scsi: target: core: Stop using bdevname()
scsi: aha1542: Use memcpy_{from,to}_bvec()
scsi: sr: Add error handling support for add_disk()
scsi: sd: Add error handling support for add_disk()
scsi: target: Perform ALUA group changes in one step
scsi: target: Replace lun_tg_pt_gp_lock with rcu in I/O path
scsi: target: Fix alua_tg_pt_gps_count tracking
scsi: target: Fix ordered tag handling
...
Diffstat (limited to 'drivers/target')
-rw-r--r-- | drivers/target/iscsi/cxgbit/cxgbit_cm.c | 8 | ||||
-rw-r--r-- | drivers/target/iscsi/cxgbit/cxgbit_main.c | 17 | ||||
-rw-r--r-- | drivers/target/iscsi/cxgbit/cxgbit_target.c | 28 | ||||
-rw-r--r-- | drivers/target/iscsi/iscsi_target_configfs.c | 91 | ||||
-rw-r--r-- | drivers/target/loopback/tcm_loop.c | 4 | ||||
-rw-r--r-- | drivers/target/sbp/sbp_target.c | 30 | ||||
-rw-r--r-- | drivers/target/target_core_alua.c | 83 | ||||
-rw-r--r-- | drivers/target/target_core_configfs.c | 1 | ||||
-rw-r--r-- | drivers/target/target_core_device.c | 2 | ||||
-rw-r--r-- | drivers/target/target_core_fabric_configfs.c | 78 | ||||
-rw-r--r-- | drivers/target/target_core_iblock.c | 4 | ||||
-rw-r--r-- | drivers/target/target_core_internal.h | 1 | ||||
-rw-r--r-- | drivers/target/target_core_transport.c | 94 | ||||
-rw-r--r-- | drivers/target/target_core_user.c | 7 | ||||
-rw-r--r-- | drivers/target/target_core_xcopy.c | 14 |
15 files changed, 279 insertions, 183 deletions
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c index 518ded214e74..da31a308a064 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c @@ -836,11 +836,13 @@ static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi) csk->rcv_win = CXGBIT_10G_RCV_WIN; if (scale) csk->rcv_win *= scale; + csk->rcv_win = min(csk->rcv_win, RCV_BUFSIZ_M << 10); #define CXGBIT_10G_SND_WIN (256 * 1024) csk->snd_win = CXGBIT_10G_SND_WIN; if (scale) csk->snd_win *= scale; + csk->snd_win = min(csk->snd_win, 512U * 1024); pr_debug("%s snd_win %d rcv_win %d\n", __func__, csk->snd_win, csk->rcv_win); @@ -1065,7 +1067,7 @@ int cxgbit_rx_data_ack(struct cxgbit_sock *csk) if (!skb) return -1; - credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) | + credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(3) | RX_CREDITS_V(csk->rx_credits); cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx, @@ -1197,7 +1199,6 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req) if (tcph->ece && tcph->cwr) opt2 |= CCTRL_ECN_V(1); - opt2 |= RX_COALESCE_V(3); opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO); opt2 |= T5_ISS_F; @@ -1646,9 +1647,6 @@ cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb) csk->rcv_nxt = rcv_isn; - if (csk->rcv_win > (RCV_BUFSIZ_M << 10)) - csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10)); - csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt); cxgbit_set_emss(csk, tcp_opt); dst_confirm(csk->dst); diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c index bd37f2afadea..c6678dc8dd41 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_main.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c @@ -33,11 +33,18 @@ static void cxgbit_set_mdsl(struct cxgbit_device *cdev) struct cxgb4_lld_info *lldi = &cdev->lldi; u32 mdsl; -#define ULP2_MAX_PKT_LEN 16224 -#define ISCSI_PDU_NONPAYLOAD_LEN 312 - mdsl = min_t(u32, lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN, - ULP2_MAX_PKT_LEN - ISCSI_PDU_NONPAYLOAD_LEN); - mdsl = min_t(u32, mdsl, 8192); +#define CXGBIT_T5_MAX_PDU_LEN 16224 +#define CXGBIT_PDU_NONPAYLOAD_LEN 312 /* 48(BHS) + 256(AHS) + 8(Digest) */ + if (is_t5(lldi->adapter_type)) { + mdsl = min_t(u32, lldi->iscsi_iolen - CXGBIT_PDU_NONPAYLOAD_LEN, + CXGBIT_T5_MAX_PDU_LEN - CXGBIT_PDU_NONPAYLOAD_LEN); + } else { + mdsl = lldi->iscsi_iolen - CXGBIT_PDU_NONPAYLOAD_LEN; + mdsl = min(mdsl, 16384U); + } + + mdsl = round_down(mdsl, 4); + mdsl = min_t(u32, mdsl, 4 * PAGE_SIZE); mdsl = min_t(u32, mdsl, (MAX_SKB_FRAGS - 1) * PAGE_SIZE); cdev->mdsl = mdsl; diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c index 282297ffc404..d314ee120a48 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_target.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c @@ -189,8 +189,8 @@ cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen, wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP_MODE_ISCSI) | FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode); - req->tunnel_to_proxy = htonl((wr_ulp_mode) | force | - FW_OFLD_TX_DATA_WR_SHOVE_V(skb_peek(&csk->txq) ? 0 : 1)); + req->tunnel_to_proxy = htonl(wr_ulp_mode | force | + FW_OFLD_TX_DATA_WR_SHOVE_F); } static void cxgbit_arp_failure_skb_discard(void *handle, struct sk_buff *skb) @@ -1531,7 +1531,7 @@ out: return ret; } -static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) +static int cxgbit_t5_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) { struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0); @@ -1557,6 +1557,24 @@ static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) return ret; } +static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) +{ + struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); + int ret; + + ret = cxgbit_process_lro_skb(csk, skb); + if (ret) + return ret; + + csk->rx_credits += lro_cb->pdu_totallen; + if (csk->rx_credits >= csk->rcv_win) { + csk->rx_credits = 0; + cxgbit_rx_data_ack(csk); + } + + return 0; +} + static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb) { struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; @@ -1564,9 +1582,9 @@ static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb) if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) { if (is_t5(lldi->adapter_type)) - ret = cxgbit_rx_lro_skb(csk, skb); + ret = cxgbit_t5_rx_lro_skb(csk, skb); else - ret = cxgbit_process_lro_skb(csk, skb); + ret = cxgbit_rx_lro_skb(csk, skb); } __kfree_skb(skb); diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index f4a24fa5058e..2a9de24a8bbe 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -1005,74 +1005,15 @@ static struct configfs_attribute *lio_target_tpg_param_attrs[] = { /* Start items for lio_target_tpg_cit */ -static ssize_t lio_target_tpg_enable_show(struct config_item *item, char *page) -{ - struct se_portal_group *se_tpg = to_tpg(item); - struct iscsi_portal_group *tpg = container_of(se_tpg, - struct iscsi_portal_group, tpg_se_tpg); - ssize_t len; - - spin_lock(&tpg->tpg_state_lock); - len = sprintf(page, "%d\n", - (tpg->tpg_state == TPG_STATE_ACTIVE) ? 1 : 0); - spin_unlock(&tpg->tpg_state_lock); - - return len; -} - -static ssize_t lio_target_tpg_enable_store(struct config_item *item, - const char *page, size_t count) -{ - struct se_portal_group *se_tpg = to_tpg(item); - struct iscsi_portal_group *tpg = container_of(se_tpg, - struct iscsi_portal_group, tpg_se_tpg); - u32 op; - int ret; - - ret = kstrtou32(page, 0, &op); - if (ret) - return ret; - if ((op != 1) && (op != 0)) { - pr_err("Illegal value for tpg_enable: %u\n", op); - return -EINVAL; - } - - ret = iscsit_get_tpg(tpg); - if (ret < 0) - return -EINVAL; - - if (op) { - ret = iscsit_tpg_enable_portal_group(tpg); - if (ret < 0) - goto out; - } else { - /* - * iscsit_tpg_disable_portal_group() assumes force=1 - */ - ret = iscsit_tpg_disable_portal_group(tpg, 1); - if (ret < 0) - goto out; - } - - iscsit_put_tpg(tpg); - return count; -out: - iscsit_put_tpg(tpg); - return -EINVAL; -} - - static ssize_t lio_target_tpg_dynamic_sessions_show(struct config_item *item, char *page) { return target_show_dynamic_sessions(to_tpg(item), page); } -CONFIGFS_ATTR(lio_target_tpg_, enable); CONFIGFS_ATTR_RO(lio_target_tpg_, dynamic_sessions); static struct configfs_attribute *lio_target_tpg_attrs[] = { - &lio_target_tpg_attr_enable, &lio_target_tpg_attr_dynamic_sessions, NULL, }; @@ -1129,6 +1070,37 @@ free_out: return NULL; } +static int lio_target_tiqn_enabletpg(struct se_portal_group *se_tpg, + bool enable) +{ + struct iscsi_portal_group *tpg = container_of(se_tpg, + struct iscsi_portal_group, tpg_se_tpg); + int ret; + + ret = iscsit_get_tpg(tpg); + if (ret < 0) + return -EINVAL; + + if (enable) { + ret = iscsit_tpg_enable_portal_group(tpg); + if (ret < 0) + goto out; + } else { + /* + * iscsit_tpg_disable_portal_group() assumes force=1 + */ + ret = iscsit_tpg_disable_portal_group(tpg, 1); + if (ret < 0) + goto out; + } + + iscsit_put_tpg(tpg); + return 0; +out: + iscsit_put_tpg(tpg); + return -EINVAL; +} + static void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg) { struct iscsi_portal_group *tpg; @@ -1556,6 +1528,7 @@ const struct target_core_fabric_ops iscsi_ops = { .fabric_drop_wwn = lio_target_call_coredeltiqn, .add_wwn_groups = lio_target_add_wwn_groups, .fabric_make_tpg = lio_target_tiqn_addtpg, + .fabric_enable_tpg = lio_target_tiqn_enabletpg, .fabric_drop_tpg = lio_target_tiqn_deltpg, .fabric_make_np = lio_target_call_addnptotpg, .fabric_drop_np = lio_target_call_delnpfromtpg, diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 52db28d868d5..4407b56aa6d1 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -71,7 +71,7 @@ static void tcm_loop_release_cmd(struct se_cmd *se_cmd) if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); else - sc->scsi_done(sc); + scsi_done(sc); } static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host) @@ -165,7 +165,7 @@ static void tcm_loop_target_queue_cmd(struct tcm_loop_cmd *tl_cmd) return; out_done: - sc->scsi_done(sc); + scsi_done(sc); } /* diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index b9f9fb5d7e63..504670994fb4 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -2125,32 +2125,13 @@ static ssize_t sbp_tpg_directory_id_store(struct config_item *item, return count; } -static ssize_t sbp_tpg_enable_show(struct config_item *item, char *page) +static int sbp_enable_tpg(struct se_portal_group *se_tpg, bool enable) { - struct se_portal_group *se_tpg = to_tpg(item); struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); struct sbp_tport *tport = tpg->tport; - return sprintf(page, "%d\n", tport->enable); -} - -static ssize_t sbp_tpg_enable_store(struct config_item *item, - const char *page, size_t count) -{ - struct se_portal_group *se_tpg = to_tpg(item); - struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); - struct sbp_tport *tport = tpg->tport; - unsigned long val; int ret; - if (kstrtoul(page, 0, &val) < 0) - return -EINVAL; - if ((val != 0) && (val != 1)) - return -EINVAL; - - if (tport->enable == val) - return count; - - if (val) { + if (enable) { if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) { pr_err("Cannot enable a target with no LUNs!\n"); return -EINVAL; @@ -2165,7 +2146,7 @@ static ssize_t sbp_tpg_enable_store(struct config_item *item, spin_unlock_bh(&se_tpg->session_lock); } - tport->enable = val; + tport->enable = enable; ret = sbp_update_unit_directory(tport); if (ret < 0) { @@ -2173,15 +2154,13 @@ static ssize_t sbp_tpg_enable_store(struct config_item *item, return ret; } - return count; + return 0; } CONFIGFS_ATTR(sbp_tpg_, directory_id); -CONFIGFS_ATTR(sbp_tpg_, enable); static struct configfs_attribute *sbp_tpg_base_attrs[] = { &sbp_tpg_attr_directory_id, - &sbp_tpg_attr_enable, NULL, }; @@ -2319,6 +2298,7 @@ static const struct target_core_fabric_ops sbp_ops = { .fabric_make_wwn = sbp_make_tport, .fabric_drop_wwn = sbp_drop_tport, .fabric_make_tpg = sbp_make_tpg, + .fabric_enable_tpg = sbp_enable_tpg, .fabric_drop_tpg = sbp_drop_tpg, .fabric_post_link = sbp_post_link_lun, .fabric_pre_unlink = sbp_pre_unlink_lun, diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index cb1de1ecaaa6..b56ef8af66e7 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -247,11 +247,11 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) * this CDB was received upon to determine this value individually * for ALUA target port group. */ - spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock); - tg_pt_gp = cmd->se_lun->lun_tg_pt_gp; + rcu_read_lock(); + tg_pt_gp = rcu_dereference(cmd->se_lun->lun_tg_pt_gp); if (tg_pt_gp) buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs; - spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); } transport_kunmap_data_sg(cmd); @@ -292,24 +292,24 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed * for the local tg_pt_gp. */ - spin_lock(&l_lun->lun_tg_pt_gp_lock); - l_tg_pt_gp = l_lun->lun_tg_pt_gp; + rcu_read_lock(); + l_tg_pt_gp = rcu_dereference(l_lun->lun_tg_pt_gp); if (!l_tg_pt_gp) { - spin_unlock(&l_lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); pr_err("Unable to access l_lun->tg_pt_gp\n"); rc = TCM_UNSUPPORTED_SCSI_OPCODE; goto out; } if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) { - spin_unlock(&l_lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); pr_debug("Unable to process SET_TARGET_PORT_GROUPS" " while TPGS_EXPLICIT_ALUA is disabled\n"); rc = TCM_UNSUPPORTED_SCSI_OPCODE; goto out; } valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; - spin_unlock(&l_lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); ptr = &buf[4]; /* Skip over RESERVED area in header */ @@ -662,17 +662,17 @@ target_alua_state_check(struct se_cmd *cmd) " target port\n"); return TCM_ALUA_OFFLINE; } - - if (!lun->lun_tg_pt_gp) + rcu_read_lock(); + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); + if (!tg_pt_gp) { + rcu_read_unlock(); return 0; + } - spin_lock(&lun->lun_tg_pt_gp_lock); - tg_pt_gp = lun->lun_tg_pt_gp; out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state; nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; - - spin_unlock(&lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); /* * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional * statement so the compiler knows explicitly to check this case first. @@ -1219,10 +1219,10 @@ static int core_alua_set_tg_pt_secondary_state( struct t10_alua_tg_pt_gp *tg_pt_gp; int trans_delay_msecs; - spin_lock(&lun->lun_tg_pt_gp_lock); - tg_pt_gp = lun->lun_tg_pt_gp; + rcu_read_lock(); + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); if (!tg_pt_gp) { - spin_unlock(&lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); pr_err("Unable to complete secondary state" " transition\n"); return -EINVAL; @@ -1246,7 +1246,7 @@ static int core_alua_set_tg_pt_secondary_state( "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); - spin_unlock(&lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); /* * Do the optional transition delay after we set the secondary * ALUA access state. @@ -1674,7 +1674,6 @@ int core_alua_set_tg_pt_gp_id( pr_err("Maximum ALUA alua_tg_pt_gps_count:" " 0x0000ffff reached\n"); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); - kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); return -ENOSPC; } again: @@ -1755,13 +1754,14 @@ void core_alua_free_tg_pt_gp( __target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); } else - lun->lun_tg_pt_gp = NULL; + rcu_assign_pointer(lun->lun_tg_pt_gp, NULL); spin_unlock(&lun->lun_tg_pt_gp_lock); spin_lock(&tg_pt_gp->tg_pt_gp_lock); } spin_unlock(&tg_pt_gp->tg_pt_gp_lock); + synchronize_rcu(); kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); } @@ -1806,7 +1806,7 @@ static void __target_attach_tg_pt_gp(struct se_lun *lun, assert_spin_locked(&lun->lun_tg_pt_gp_lock); spin_lock(&tg_pt_gp->tg_pt_gp_lock); - lun->lun_tg_pt_gp = tg_pt_gp; + rcu_assign_pointer(lun->lun_tg_pt_gp, tg_pt_gp); list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list); tg_pt_gp->tg_pt_gp_members++; spin_lock(&lun->lun_deve_lock); @@ -1823,6 +1823,7 @@ void target_attach_tg_pt_gp(struct se_lun *lun, spin_lock(&lun->lun_tg_pt_gp_lock); __target_attach_tg_pt_gp(lun, tg_pt_gp); spin_unlock(&lun->lun_tg_pt_gp_lock); + synchronize_rcu(); } static void __target_detach_tg_pt_gp(struct se_lun *lun, @@ -1834,8 +1835,6 @@ static void __target_detach_tg_pt_gp(struct se_lun *lun, list_del_init(&lun->lun_tg_pt_gp_link); tg_pt_gp->tg_pt_gp_members--; spin_unlock(&tg_pt_gp->tg_pt_gp_lock); - - lun->lun_tg_pt_gp = NULL; } void target_detach_tg_pt_gp(struct se_lun *lun) @@ -1843,10 +1842,25 @@ void target_detach_tg_pt_gp(struct se_lun *lun) struct t10_alua_tg_pt_gp *tg_pt_gp; spin_lock(&lun->lun_tg_pt_gp_lock); - tg_pt_gp = lun->lun_tg_pt_gp; - if (tg_pt_gp) + tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp, + lockdep_is_held(&lun->lun_tg_pt_gp_lock)); + if (tg_pt_gp) { __target_detach_tg_pt_gp(lun, tg_pt_gp); + rcu_assign_pointer(lun->lun_tg_pt_gp, NULL); + } spin_unlock(&lun->lun_tg_pt_gp_lock); + synchronize_rcu(); +} + +static void target_swap_tg_pt_gp(struct se_lun *lun, + struct t10_alua_tg_pt_gp *old_tg_pt_gp, + struct t10_alua_tg_pt_gp *new_tg_pt_gp) +{ + assert_spin_locked(&lun->lun_tg_pt_gp_lock); + + if (old_tg_pt_gp) + __target_detach_tg_pt_gp(lun, old_tg_pt_gp); + __target_attach_tg_pt_gp(lun, new_tg_pt_gp); } ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) @@ -1855,8 +1869,8 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) struct t10_alua_tg_pt_gp *tg_pt_gp; ssize_t len = 0; - spin_lock(&lun->lun_tg_pt_gp_lock); - tg_pt_gp = lun->lun_tg_pt_gp; + rcu_read_lock(); + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); if (tg_pt_gp) { tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" @@ -1872,7 +1886,7 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) "Offline" : "None", core_alua_dump_status(lun->lun_tg_pt_secondary_stat)); } - spin_unlock(&lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); return len; } @@ -1919,7 +1933,8 @@ ssize_t core_alua_store_tg_pt_gp_info( } spin_lock(&lun->lun_tg_pt_gp_lock); - tg_pt_gp = lun->lun_tg_pt_gp; + tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp, + lockdep_is_held(&lun->lun_tg_pt_gp_lock)); if (tg_pt_gp) { /* * Clearing an existing tg_pt_gp association, and replacing @@ -1937,18 +1952,16 @@ ssize_t core_alua_store_tg_pt_gp_info( &tg_pt_gp->tg_pt_gp_group.cg_item), tg_pt_gp->tg_pt_gp_id); - __target_detach_tg_pt_gp(lun, tg_pt_gp); - __target_attach_tg_pt_gp(lun, + target_swap_tg_pt_gp(lun, tg_pt_gp, dev->t10_alua.default_tg_pt_gp); spin_unlock(&lun->lun_tg_pt_gp_lock); - return count; + goto sync_rcu; } - __target_detach_tg_pt_gp(lun, tg_pt_gp); move = 1; } - __target_attach_tg_pt_gp(lun, tg_pt_gp_new); + target_swap_tg_pt_gp(lun, tg_pt_gp, tg_pt_gp_new); spin_unlock(&lun->lun_tg_pt_gp_lock); pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" " Target Port Group: alua/%s, ID: %hu\n", (move) ? @@ -1959,6 +1972,8 @@ ssize_t core_alua_store_tg_pt_gp_info( tg_pt_gp_new->tg_pt_gp_id); core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); +sync_rcu: + synchronize_rcu(); return count; } diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 023bd4516a68..4c86697fe4ec 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -490,6 +490,7 @@ void target_unregister_template(const struct target_core_fabric_ops *fo) * fabric driver unload of TFO->module to proceed. */ rcu_barrier(); + kfree(t->tf_tpg_base_cit.ct_attrs); kfree(t); return; } diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 8cb1fa0c0585..44bb380e7390 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -772,6 +772,8 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); spin_lock_init(&dev->t10_alua.lba_map_lock); + INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work); + dev->t10_wwn.t10_dev = dev; /* * Use OpenFabrics IEEE Company ID: 00 14 05 diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index fc7edc04ee09..0b65de9f2df1 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -815,8 +815,76 @@ static struct configfs_item_operations target_fabric_tpg_base_item_ops = { .release = target_fabric_tpg_release, }; -TF_CIT_SETUP_DRV(tpg_base, &target_fabric_tpg_base_item_ops, NULL); +static ssize_t target_fabric_tpg_base_enable_show(struct config_item *item, + char *page) +{ + return sysfs_emit(page, "%d\n", to_tpg(item)->enabled); +} + +static ssize_t target_fabric_tpg_base_enable_store(struct config_item *item, + const char *page, + size_t count) +{ + struct se_portal_group *se_tpg = to_tpg(item); + int ret; + bool op; + + ret = strtobool(page, &op); + if (ret) + return ret; + + if (se_tpg->enabled == op) + return count; + + ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, op); + if (ret) + return ret; + + se_tpg->enabled = op; + + return count; +} + +CONFIGFS_ATTR(target_fabric_tpg_base_, enable); +static int +target_fabric_setup_tpg_base_cit(struct target_fabric_configfs *tf) +{ + struct config_item_type *cit = &tf->tf_tpg_base_cit; + struct configfs_attribute **attrs = NULL; + size_t nr_attrs = 0; + int i = 0; + + if (tf->tf_ops->tfc_tpg_base_attrs) + while (tf->tf_ops->tfc_tpg_base_attrs[nr_attrs] != NULL) + nr_attrs++; + + if (tf->tf_ops->fabric_enable_tpg) + nr_attrs++; + + if (nr_attrs == 0) + goto done; + + /* + 1 for final NULL in the array */ + attrs = kcalloc(nr_attrs + 1, sizeof(*attrs), GFP_KERNEL); + if (!attrs) + return -ENOMEM; + + if (tf->tf_ops->tfc_tpg_base_attrs) + for (; tf->tf_ops->tfc_tpg_base_attrs[i] != NULL; i++) + attrs[i] = tf->tf_ops->tfc_tpg_base_attrs[i]; + + if (tf->tf_ops->fabric_enable_tpg) + attrs[i] = &target_fabric_tpg_base_attr_enable; + +done: + cit->ct_item_ops = &target_fabric_tpg_base_item_ops; + cit->ct_attrs = attrs; + cit->ct_owner = tf->tf_ops->module; + pr_debug("Setup generic tpg_base\n"); + + return 0; +} /* End of tfc_tpg_base_cit */ /* Start of tfc_tpg_cit */ @@ -1028,12 +1096,18 @@ TF_CIT_SETUP_DRV(discovery, NULL, NULL); int target_fabric_setup_cits(struct target_fabric_configfs *tf) { + int ret; + target_fabric_setup_discovery_cit(tf); target_fabric_setup_wwn_cit(tf); target_fabric_setup_wwn_fabric_stats_cit(tf); target_fabric_setup_wwn_param_cit(tf); target_fabric_setup_tpg_cit(tf); - target_fabric_setup_tpg_base_cit(tf); + + ret = target_fabric_setup_tpg_base_cit(tf); + if (ret) + return ret; + target_fabric_setup_tpg_port_cit(tf); target_fabric_setup_tpg_port_stat_cit(tf); target_fabric_setup_tpg_lun_cit(tf); diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index b1ef041cacd8..bf8ae4825a06 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -636,12 +636,10 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b) { struct iblock_dev *ib_dev = IBLOCK_DEV(dev); struct block_device *bd = ib_dev->ibd_bd; - char buf[BDEVNAME_SIZE]; ssize_t bl = 0; if (bd) - bl += sprintf(b + bl, "iBlock device: %s", - bdevname(bd, buf)); + bl += sprintf(b + bl, "iBlock device: %pg", bd); if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH) bl += sprintf(b + bl, " UDEV PATH: %s", ib_dev->ibd_udev_path); diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index a343bcfa2180..a889a6237d9c 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -151,6 +151,7 @@ int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); void transport_clear_lun_ref(struct se_lun *); sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); void target_qf_do_work(struct work_struct *work); +void target_do_delayed_work(struct work_struct *work); bool target_check_wce(struct se_device *dev); bool target_check_fua(struct se_device *dev); void __target_execute_cmd(struct se_cmd *, bool); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 14c6f2bb1b01..4a2e749eb182 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1511,10 +1511,10 @@ target_cmd_parse_cdb(struct se_cmd *cmd) ret = dev->transport->parse_cdb(cmd); if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) - pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", - cmd->se_tfo->fabric_name, - cmd->se_sess->se_node_acl->initiatorname, - cmd->t_task_cdb[0]); + pr_debug_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", + cmd->se_tfo->fabric_name, + cmd->se_sess->se_node_acl->initiatorname, + cmd->t_task_cdb[0]); if (ret) return ret; @@ -2173,32 +2173,39 @@ static bool target_handle_task_attr(struct se_cmd *cmd) */ switch (cmd->sam_task_attr) { case TCM_HEAD_TAG: + atomic_inc_mb(&dev->non_ordered); pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", cmd->t_task_cdb[0]); return false; case TCM_ORDERED_TAG: - atomic_inc_mb(&dev->dev_ordered_sync); + atomic_inc_mb(&dev->delayed_cmd_count); pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", cmd->t_task_cdb[0]); - - /* - * Execute an ORDERED command if no other older commands - * exist that need to be completed first. - */ - if (!atomic_read(&dev->simple_cmds)) - return false; break; default: /* * For SIMPLE and UNTAGGED Task Attribute commands */ - atomic_inc_mb(&dev->simple_cmds); + atomic_inc_mb(&dev->non_ordered); + + if (atomic_read(&dev->delayed_cmd_count) == 0) + return false; break; } - if (atomic_read(&dev->dev_ordered_sync) == 0) - return false; + if (cmd->sam_task_attr != TCM_ORDERED_TAG) { + atomic_inc_mb(&dev->delayed_cmd_count); + /* + * We will account for this when we dequeue from the delayed + * list. + */ + atomic_dec_mb(&dev->non_ordered); + } + + spin_lock_irq(&cmd->t_state_lock); + cmd->transport_state &= ~CMD_T_SENT; + spin_unlock_irq(&cmd->t_state_lock); spin_lock(&dev->delayed_cmd_lock); list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); @@ -2206,6 +2213,12 @@ static bool target_handle_task_attr(struct se_cmd *cmd) pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", cmd->t_task_cdb[0], cmd->sam_task_attr); + /* + * We may have no non ordered cmds when this function started or we + * could have raced with the last simple/head cmd completing, so kick + * the delayed handler here. + */ + schedule_work(&dev->delayed_cmd_work); return true; } @@ -2228,12 +2241,8 @@ void target_execute_cmd(struct se_cmd *cmd) if (target_write_prot_action(cmd)) return; - if (target_handle_task_attr(cmd)) { - spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state &= ~CMD_T_SENT; - spin_unlock_irq(&cmd->t_state_lock); + if (target_handle_task_attr(cmd)) return; - } __target_execute_cmd(cmd, true); } @@ -2243,29 +2252,48 @@ EXPORT_SYMBOL(target_execute_cmd); * Process all commands up to the last received ORDERED task attribute which * requires another blocking boundary */ -static void target_restart_delayed_cmds(struct se_device *dev) +void target_do_delayed_work(struct work_struct *work) { - for (;;) { + struct se_device *dev = container_of(work, struct se_device, + delayed_cmd_work); + + spin_lock(&dev->delayed_cmd_lock); + while (!dev->ordered_sync_in_progress) { struct se_cmd *cmd; - spin_lock(&dev->delayed_cmd_lock); - if (list_empty(&dev->delayed_cmd_list)) { - spin_unlock(&dev->delayed_cmd_lock); + if (list_empty(&dev->delayed_cmd_list)) break; - } cmd = list_entry(dev->delayed_cmd_list.next, struct se_cmd, se_delayed_node); + + if (cmd->sam_task_attr == TCM_ORDERED_TAG) { + /* + * Check if we started with: + * [ordered] [simple] [ordered] + * and we are now at the last ordered so we have to wait + * for the simple cmd. + */ + if (atomic_read(&dev->non_ordered) > 0) + break; + + dev->ordered_sync_in_progress = true; + } + list_del(&cmd->se_delayed_node); + atomic_dec_mb(&dev->delayed_cmd_count); spin_unlock(&dev->delayed_cmd_lock); + if (cmd->sam_task_attr != TCM_ORDERED_TAG) + atomic_inc_mb(&dev->non_ordered); + cmd->transport_state |= CMD_T_SENT; __target_execute_cmd(cmd, true); - if (cmd->sam_task_attr == TCM_ORDERED_TAG) - break; + spin_lock(&dev->delayed_cmd_lock); } + spin_unlock(&dev->delayed_cmd_lock); } /* @@ -2283,14 +2311,17 @@ static void transport_complete_task_attr(struct se_cmd *cmd) goto restart; if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { - atomic_dec_mb(&dev->simple_cmds); + atomic_dec_mb(&dev->non_ordered); dev->dev_cur_ordered_id++; } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { + atomic_dec_mb(&dev->non_ordered); dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", dev->dev_cur_ordered_id); } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { - atomic_dec_mb(&dev->dev_ordered_sync); + spin_lock(&dev->delayed_cmd_lock); + dev->ordered_sync_in_progress = false; + spin_unlock(&dev->delayed_cmd_lock); dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", @@ -2299,7 +2330,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd) cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; restart: - target_restart_delayed_cmds(dev); + if (atomic_read(&dev->delayed_cmd_count) > 0) + schedule_work(&dev->delayed_cmd_work); } static void transport_complete_qf(struct se_cmd *cmd) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 9f552f48084c..7b2a89a67cdb 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -523,8 +523,8 @@ static inline int tcmu_get_empty_block(struct tcmu_dev *udev, rcu_read_unlock(); for (i = cnt; i < page_cnt; i++) { - /* try to get new page from the mm */ - page = alloc_page(GFP_NOIO); + /* try to get new zeroed page from the mm */ + page = alloc_page(GFP_NOIO | __GFP_ZERO); if (!page) break; @@ -1255,7 +1255,6 @@ tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf, { int i = 0, cmd_cnt = 0; bool unqueued = false; - uint16_t *cmd_ids = NULL; struct tcmu_cmd *cmd; struct se_cmd *se_cmd; struct tcmu_tmr *tmr; @@ -1292,7 +1291,7 @@ tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf, pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n", tcmu_tmr_type(tmf), udev->name, i, cmd_cnt); - tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_NOIO); + tmr = kmalloc(struct_size(tmr, tmr_cmd_ids, cmd_cnt), GFP_NOIO); if (!tmr) goto unlock; diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index d4fe7cb2bd00..6bb20aa9c5bc 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -295,8 +295,7 @@ out: return -EINVAL; } -static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop, - unsigned char *p) +static int target_xcopy_parse_segdesc_02(struct xcopy_op *xop, unsigned char *p) { unsigned char *desc = p; int dc = (desc[1] & 0x02); @@ -332,9 +331,9 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op return 0; } -static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd, - struct xcopy_op *xop, unsigned char *p, - unsigned int sdll, sense_reason_t *sense_ret) +static int target_xcopy_parse_segment_descriptors(struct xcopy_op *xop, + unsigned char *p, unsigned int sdll, + sense_reason_t *sense_ret) { unsigned char *desc = p; unsigned int start = 0; @@ -362,7 +361,7 @@ static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd, */ switch (desc[0]) { case 0x02: - rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc); + rc = target_xcopy_parse_segdesc_02(xop, desc); if (rc < 0) goto out; @@ -840,8 +839,7 @@ static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop) */ seg_desc = &p[16] + tdll; - rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, - sdll, &ret); + rc = target_xcopy_parse_segment_descriptors(xop, seg_desc, sdll, &ret); if (rc <= 0) goto out; |