summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_nvme.h
diff options
context:
space:
mode:
authorJames Smart <jsmart2021@gmail.com>2019-01-28 20:14:22 +0100
committerMartin K. Petersen <martin.petersen@oracle.com>2019-02-06 04:24:22 +0100
commit5e5b511d8bfaf765cb92a695cda336c936cb86dc (patch)
tree317cb0e4afa34afee89e181a90e7066da05c069e /drivers/scsi/lpfc/lpfc_nvme.h
parentscsi: lpfc: Replace io_channels for nvme and fcp with general hdw_queues per cpu (diff)
downloadlinux-5e5b511d8bfaf765cb92a695cda336c936cb86dc.tar.xz
linux-5e5b511d8bfaf765cb92a695cda336c936cb86dc.zip
scsi: lpfc: Partition XRI buffer list across Hardware Queues
Once the IO buff allocations were made shared, there was a single XRI buffer list shared by all hardware queues. A single list isn't great for performance when shared across the per-cpu hardware queues. Create a separate XRI IO buffer get/put list for each Hardware Queue. As SGLs and associated IO buffers get allocated/posted to the firmware; round robin their assignment across all available hardware Queues so that there is an equitable assignment. Modify SCSI and NVME IO submit code paths to use the Hardware Queue logic for XRI allocation. Add a debugfs interface to display hardware queue statistics Added new empty_io_bufs counter to track if a cpu runs out of XRIs. Replace common_ variables/names with io_ to make meanings clearer. Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com> Signed-off-by: James Smart <jsmart2021@gmail.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_nvme.h')
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h3
1 files changed, 2 insertions, 1 deletions
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index e3a554417e98..4a020b9c8fbf 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -84,6 +84,8 @@ struct lpfc_nvme_buf {
dma_addr_t dma_phys_sgl;
struct sli4_sge *dma_sgl;
struct lpfc_iocbq cur_iocbq;
+ uint16_t hdwq;
+ uint16_t cpu;
/* NVME specific fields */
struct nvmefc_fcp_req *nvmeCmd;
@@ -95,7 +97,6 @@ struct lpfc_nvme_buf {
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
#define LPFC_BUMP_QDEPTH 0x2 /* bumped queue depth counter */
uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
- uint16_t cpu;
uint16_t status; /* From IOCB Word 7- ulpStatus */
uint32_t result; /* From IOCB Word 4. */