From e21e0243e7b0f1c2a21d21f4d115f7b37175772a Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Fri, 28 May 2021 11:02:34 -0500 Subject: nvme-pci: look for StorageD3Enable on companion ACPI device instead The documentation around the StorageD3Enable property hints that it should be made on the PCI device. This is where newer AMD systems set the property and it's required for S0i3 support. So rather than look for nodes of the root port only present on Intel systems, switch to the companion ACPI device for all systems. David Box from Intel indicated this should work on Intel as well. Link: https://lore.kernel.org/linux-nvme/YK6gmAWqaRmvpJXb@google.com/T/#m900552229fa455867ee29c33b854845fce80ba70 Link: https://docs.microsoft.com/en-us/windows-hardware/design/component-guidelines/power-management-for-storage-hardware-devices-intro Fixes: df4f9bc4fb9c ("nvme-pci: add support for ACPI StorageD3Enable property") Suggested-by: Liang Prike Acked-by: Raul E Rangel Signed-off-by: Mario Limonciello Reviewed-by: David E. Box Signed-off-by: Christoph Hellwig --- drivers/nvme/host/pci.c | 24 +----------------------- 1 file changed, 1 insertion(+), 23 deletions(-) (limited to 'drivers/nvme/host/pci.c') diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index a29b170701fc..3aa7245a505f 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2831,10 +2831,7 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) #ifdef CONFIG_ACPI static bool nvme_acpi_storage_d3(struct pci_dev *dev) { - struct acpi_device *adev; - struct pci_dev *root; - acpi_handle handle; - acpi_status status; + struct acpi_device *adev = ACPI_COMPANION(&dev->dev); u8 val; /* @@ -2842,28 +2839,9 @@ static bool nvme_acpi_storage_d3(struct pci_dev *dev) * must use D3 to support deep platform power savings during * suspend-to-idle. */ - root = pcie_find_root_port(dev); - if (!root) - return false; - adev = ACPI_COMPANION(&root->dev); if (!adev) return false; - - /* - * The property is defined in the PXSX device for South complex ports - * and in the PEGP device for North complex ports. - */ - status = acpi_get_handle(adev->handle, "PXSX", &handle); - if (ACPI_FAILURE(status)) { - status = acpi_get_handle(adev->handle, "PEGP", &handle); - if (ACPI_FAILURE(status)) - return false; - } - - if (acpi_bus_get_device(handle, &adev)) - return false; - if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable", &val)) return false; -- cgit v1.2.3 From 2744d7a0733503931b71c00d156119ced002f22c Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Wed, 9 Jun 2021 13:40:17 -0500 Subject: ACPI: Check StorageD3Enable _DSD property in ACPI code Although first implemented for NVME, this check may be usable by other drivers as well. Microsoft's specification explicitly mentions that is may be usable by SATA and AHCI devices. Google also indicates that they have used this with SDHCI in a downstream kernel tree that a user can plug a storage device into. Link: https://docs.microsoft.com/en-us/windows-hardware/design/component-guidelines/power-management-for-storage-hardware-devices-intro Suggested-by: Keith Busch CC: Shyam-sundar S-k CC: Alexander Deucher CC: Rafael J. Wysocki CC: Prike Liang Signed-off-by: Mario Limonciello Reviewed-by: Rafael J. Wysocki Signed-off-by: Christoph Hellwig --- drivers/acpi/device_pm.c | 29 +++++++++++++++++++++++++++++ drivers/nvme/host/pci.c | 28 +--------------------------- include/linux/acpi.h | 5 +++++ 3 files changed, 35 insertions(+), 27 deletions(-) (limited to 'drivers/nvme/host/pci.c') diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index d260bc1f3e6e..d76ab50c71dc 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -1340,4 +1340,33 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on) return 1; } EXPORT_SYMBOL_GPL(acpi_dev_pm_attach); + +/** + * acpi_storage_d3 - Check if D3 should be used in the suspend path + * @dev: Device to check + * + * Return %true if the platform firmware wants @dev to be programmed + * into D3hot or D3cold (if supported) in the suspend path, or %false + * when there is no specific preference. On some platforms, if this + * hint is ignored, @dev may remain unresponsive after suspending the + * platform as a whole. + * + * Although the property has storage in the name it actually is + * applied to the PCIe slot and plugging in a non-storage device the + * same platform restrictions will likely apply. + */ +bool acpi_storage_d3(struct device *dev) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + u8 val; + + if (!adev) + return false; + if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable", + &val)) + return false; + return val == 1; +} +EXPORT_SYMBOL_GPL(acpi_storage_d3); + #endif /* CONFIG_PM */ diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 3aa7245a505f..8fbc4c87a0d8 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2828,32 +2828,6 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) return 0; } -#ifdef CONFIG_ACPI -static bool nvme_acpi_storage_d3(struct pci_dev *dev) -{ - struct acpi_device *adev = ACPI_COMPANION(&dev->dev); - u8 val; - - /* - * Look for _DSD property specifying that the storage device on the port - * must use D3 to support deep platform power savings during - * suspend-to-idle. - */ - - if (!adev) - return false; - if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable", - &val)) - return false; - return val == 1; -} -#else -static inline bool nvme_acpi_storage_d3(struct pci_dev *dev) -{ - return false; -} -#endif /* CONFIG_ACPI */ - static void nvme_async_probe(void *data, async_cookie_t cookie) { struct nvme_dev *dev = data; @@ -2903,7 +2877,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) quirks |= check_vendor_combination_bug(pdev); - if (!noacpi && nvme_acpi_storage_d3(pdev)) { + if (!noacpi && acpi_storage_d3(&pdev->dev)) { /* * Some systems use a bios work around to ask for D3 on * platforms that support kernel managed suspend. diff --git a/include/linux/acpi.h b/include/linux/acpi.h index c60745f657e9..dd0dafd21e33 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1004,6 +1004,7 @@ int acpi_dev_resume(struct device *dev); int acpi_subsys_runtime_suspend(struct device *dev); int acpi_subsys_runtime_resume(struct device *dev); int acpi_dev_pm_attach(struct device *dev, bool power_on); +bool acpi_storage_d3(struct device *dev); #else static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } @@ -1011,6 +1012,10 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) { return 0; } +static inline bool acpi_storage_d3(struct device *dev) +{ + return false; +} #endif #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) -- cgit v1.2.3 From a0aac973a26d1ac814b9e131e209eb39472a67ce Mon Sep 17 00:00:00 2001 From: JK Kim Date: Thu, 17 Jun 2021 15:02:17 +0900 Subject: nvme-pci: fix var. type for increasing cq_head nvmeq->cq_head is compared with nvmeq->q_depth and changed the value and cq_phase for handling the next cq db. but, nvmeq->q_depth's type is u32 and max. value is 0x10000 when CQP.MSQE is 0xffff and io_queue_depth is 0x10000. current temp. variable for comparing with nvmeq->q_depth is overflowed when previous nvmeq->cq_head is 0xffff. in this case, nvmeq->cq_phase is not updated. so, fix data type for temp. variable to u32. Signed-off-by: JK Kim Signed-off-by: Christoph Hellwig --- drivers/nvme/host/pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/nvme/host/pci.c') diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 8fbc4c87a0d8..5a72bdf5ad03 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1032,7 +1032,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) { - u16 tmp = nvmeq->cq_head + 1; + u32 tmp = nvmeq->cq_head + 1; if (tmp == nvmeq->q_depth) { nvmeq->cq_head = 0; -- cgit v1.2.3 From cb1b10e7ac6c1438247ee3c7e4a2f2332a77ba07 Mon Sep 17 00:00:00 2001 From: Chaitanya Kulkarni Date: Mon, 7 Jun 2021 12:54:54 -0700 Subject: nvme-pci: remove trailing lines for helpers Remove the extra white line at the end of the functions. Signed-off-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/host/pci.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/nvme/host/pci.c') diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 5a72bdf5ad03..138e7e7453dd 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -559,7 +559,6 @@ static void nvme_free_prps(struct nvme_dev *dev, struct request *req) dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); dma_addr = next_dma_addr; } - } static void nvme_free_sgls(struct nvme_dev *dev, struct request *req) @@ -576,7 +575,6 @@ static void nvme_free_sgls(struct nvme_dev *dev, struct request *req) dma_pool_free(dev->prp_page_pool, sg_list, dma_addr); dma_addr = next_dma_addr; } - } static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req) -- cgit v1.2.3 From 253a0b76a12a4cce14095b3d74004e67a6434d79 Mon Sep 17 00:00:00 2001 From: Chaitanya Kulkarni Date: Wed, 9 Jun 2021 18:28:25 -0700 Subject: nvme-pci: use ctrl sgl check helper Use the helper to check NVMe controller's SGL support. Signed-off-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/host/pci.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/nvme/host/pci.c') diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 138e7e7453dd..12ffd58c27b1 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -536,7 +536,7 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req) avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); - if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1)))) + if (!nvme_ctrl_sgl_supported(&dev->ctrl)) return false; if (!iod->nvmeq->qid) return false; @@ -853,7 +853,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, &cmnd->rw, &bv); if (iod->nvmeq->qid && sgl_threshold && - dev->ctrl.sgls & ((1 << 0) | (1 << 1))) + nvme_ctrl_sgl_supported(&dev->ctrl)) return nvme_setup_sgl_simple(dev, req, &cmnd->rw, &bv); } -- cgit v1.2.3 From f66e2804d61aef690bb428d8de6a127f844bb240 Mon Sep 17 00:00:00 2001 From: Chaitanya Kulkarni Date: Wed, 16 Jun 2021 15:15:53 -0700 Subject: nvme-pci: remove zeroout memset call for struct Declare and initialize structure variables to zero values so that we can remove zeroout memset calls in the host/pci.c. Signed-off-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/host/pci.c | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) (limited to 'drivers/nvme/host/pci.c') diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 12ffd58c27b1..d3c5086673bc 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -307,13 +307,12 @@ static void nvme_dbbuf_free(struct nvme_queue *nvmeq) static void nvme_dbbuf_set(struct nvme_dev *dev) { - struct nvme_command c; + struct nvme_command c = { }; unsigned int i; if (!dev->dbbuf_dbs) return; - memset(&c, 0, sizeof(c)); c.dbbuf.opcode = nvme_admin_dbbuf; c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); @@ -1112,9 +1111,8 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) { struct nvme_dev *dev = to_nvme_dev(ctrl); struct nvme_queue *nvmeq = &dev->queues[0]; - struct nvme_command c; + struct nvme_command c = { }; - memset(&c, 0, sizeof(c)); c.common.opcode = nvme_admin_async_event; c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; nvme_submit_cmd(nvmeq, &c, true); @@ -1122,9 +1120,8 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) { - struct nvme_command c; + struct nvme_command c = { }; - memset(&c, 0, sizeof(c)); c.delete_queue.opcode = opcode; c.delete_queue.qid = cpu_to_le16(id); @@ -1134,7 +1131,7 @@ static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, struct nvme_queue *nvmeq, s16 vector) { - struct nvme_command c; + struct nvme_command c = { }; int flags = NVME_QUEUE_PHYS_CONTIG; if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) @@ -1144,7 +1141,6 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, * Note: we (ab)use the fact that the prp fields survive if no data * is attached to the request. */ - memset(&c, 0, sizeof(c)); c.create_cq.opcode = nvme_admin_create_cq; c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); c.create_cq.cqid = cpu_to_le16(qid); @@ -1159,7 +1155,7 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, struct nvme_queue *nvmeq) { struct nvme_ctrl *ctrl = &dev->ctrl; - struct nvme_command c; + struct nvme_command c = { }; int flags = NVME_QUEUE_PHYS_CONTIG; /* @@ -1174,7 +1170,6 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, * Note: we (ab)use the fact that the prp fields survive if no data * is attached to the request. */ - memset(&c, 0, sizeof(c)); c.create_sq.opcode = nvme_admin_create_sq; c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); c.create_sq.sqid = cpu_to_le16(qid); @@ -1255,7 +1250,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) struct nvme_queue *nvmeq = iod->nvmeq; struct nvme_dev *dev = nvmeq->dev; struct request *abort_req; - struct nvme_command cmd; + struct nvme_command cmd = { }; u32 csts = readl(dev->bar + NVME_REG_CSTS); /* If PCI error recovery process is happening, we cannot reset or @@ -1335,7 +1330,6 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) } iod->aborted = 1; - memset(&cmd, 0, sizeof(cmd)); cmd.abort.opcode = nvme_admin_abort_cmd; cmd.abort.cid = req->tag; cmd.abort.sqid = cpu_to_le16(nvmeq->qid); @@ -1886,10 +1880,9 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) { u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT; u64 dma_addr = dev->host_mem_descs_dma; - struct nvme_command c; + struct nvme_command c = { }; int ret; - memset(&c, 0, sizeof(c)); c.features.opcode = nvme_admin_set_features; c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); c.features.dword11 = cpu_to_le32(bits); @@ -2263,9 +2256,8 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) { struct request_queue *q = nvmeq->dev->ctrl.admin_q; struct request *req; - struct nvme_command cmd; + struct nvme_command cmd = { }; - memset(&cmd, 0, sizeof(cmd)); cmd.delete_queue.opcode = opcode; cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); -- cgit v1.2.3