summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_attr.c
diff options
context:
space:
mode:
authorJustin Tee <justin.tee@broadcom.com>2024-01-31 19:51:08 +0100
committerMartin K. Petersen <martin.petersen@oracle.com>2024-02-06 02:51:36 +0100
commit9bb36777d0a2a22f11264c36f91a2682bfedb9d4 (patch)
tree1b5773e1fe7da7a7ba5b5109d0f873da8cdcdf96 /drivers/scsi/lpfc/lpfc_attr.c
parentscsi: lpfc: Change nlp state statistic counters into atomic_t (diff)
downloadlinux-9bb36777d0a2a22f11264c36f91a2682bfedb9d4.tar.xz
linux-9bb36777d0a2a22f11264c36f91a2682bfedb9d4.zip
scsi: lpfc: Protect vport fc_nodes list with an explicit spin lock
In attempt to reduce the amount of unnecessary shost_lock acquisitions in the lpfc driver, replace shost_lock with an explicit fc_nodes_list_lock spinlock when accessing vport->fc_nodes lists. Although vport memory region is owned by shost->hostdata, it is driver private memory and an explicit fc_nodes list lock for fc_nodes list mutations is more appropriate than locking the entire shost. Signed-off-by: Justin Tee <justin.tee@broadcom.com> Link: https://lore.kernel.org/r/20240131185112.149731-14-justintee8345@gmail.com Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_attr.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c35
1 files changed, 18 insertions, 17 deletions
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 142c90eb210f..023f4f2c62a6 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -344,6 +344,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
struct lpfc_fc4_ctrl_stat *cstat;
uint64_t data1, data2, data3;
uint64_t totin, totout, tot;
+ unsigned long iflags;
char *statep;
int i;
int len = 0;
@@ -543,7 +544,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
goto buffer_done;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
nrport = NULL;
@@ -617,7 +618,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
goto unlock_buf_done;
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
if (!lport)
goto buffer_done;
@@ -681,7 +682,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
goto buffer_done;
unlock_buf_done:
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
buffer_done:
len = strnlen(buf, PAGE_SIZE);
@@ -3765,15 +3766,14 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
static void
lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
{
- struct Scsi_Host *shost;
struct lpfc_nodelist *ndlp;
+ unsigned long iflags;
#if (IS_ENABLED(CONFIG_NVME_FC))
struct lpfc_nvme_rport *rport;
struct nvme_fc_remote_port *remoteport = NULL;
#endif
- shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->rport)
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
@@ -3788,7 +3788,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
vport->cfg_devloss_tmo);
#endif
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
}
/**
@@ -3974,8 +3974,8 @@ lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
static int
lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
+ unsigned long iflags;
if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH))
return -EINVAL;
@@ -3983,14 +3983,13 @@ lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
if (val == vport->cfg_tgt_queue_depth)
return 0;
- spin_lock_irq(shost->host_lock);
vport->cfg_tgt_queue_depth = val;
/* Next loop thru nodelist and change cmd_qdepth */
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
-
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
return 0;
}
@@ -5236,8 +5235,8 @@ lpfc_vport_param_show(max_scsicmpl_time);
static int
lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
{
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp, *next_ndlp;
+ unsigned long iflags;
if (val == vport->cfg_max_scsicmpl_time)
return 0;
@@ -5245,13 +5244,13 @@ lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
return -EINVAL;
vport->cfg_max_scsicmpl_time = val;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
continue;
ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
return 0;
}
lpfc_vport_param_store(max_scsicmpl_time);
@@ -6853,17 +6852,19 @@ lpfc_get_node_by_target(struct scsi_target *starget)
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_nodelist *ndlp;
+ unsigned long iflags;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
/* Search for this, mapped, target ID */
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
starget->id == ndlp->nlp_sid) {
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock,
+ iflags);
return ndlp;
}
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
return NULL;
}