summaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-10-11 20:08:18 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2022-10-11 20:08:18 +0200
commit041bc24d867a2a577a06534d6d25e500b24a01ef (patch)
treea653e985ec1995e9f126a8408da70653c11dc783 /drivers/pci
parentMerge tag 'i2c-for-6.1-rc1-batch2' of git://git.kernel.org/pub/scm/linux/kern... (diff)
parentMerge branch 'for-linus' into next (diff)
downloadlinux-041bc24d867a2a577a06534d6d25e500b24a01ef.tar.xz
linux-041bc24d867a2a577a06534d6d25e500b24a01ef.zip
Merge tag 'pci-v6.1-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
Pull pci updates from Bjorn Helgaas: "Resource management: - Distribute spare resources to unconfigured hotplug bridges at boot-time (not just when hot-adding such a bridge), which makes hot-adding devices to docks work better. - Revert to a BAR assignment inherited from firmware only when the address is actually reachable via any upstream bridges, which fixes some cases where firmware doesn't configure all devices. - Add a sysfs interface to resize BARs so this can be done before assigning devices to a VM through VFIO. Power management: - Disable Precision Time Management for all devices on suspend to enable lower-power PM state. We previously did this just for Root Ports, which isn't enough because downstream devices can still generate PTM messages, which cause errors if it's disabled in the Root Port. - Save and restore the ASPM L1 PM Substates configuration for suspend/ resume. Previously this configuration was lost, so L1.x states likely stopped working after resume. - Check whether the L1 PM Substates Capability exists. If it didn't exist, we previously read junk and tried to configure L1 Substates based on that. - Fix the LTR_L1.2_THRESHOLD computation, which previously set a threshold for entering L1.2 that was too low in some cases. - Reduce the delay after transitions to or from D3cold by using usleep_range() rather than msleep(), which often slept for ~19ms instead of the 10ms normally required. The spec says 10ms is enough, but it's possible we could trip over devices that need a little more. Error handling: - Work around a BIOS bug that caused Intel Root Ports to advertise a Root Port Programmed I/O (RP PIO) log size of zero, which caused annoying warnings and prevented the kernel from dumping log registers for DPC errors. Qualcomm PCIe controller driver: - Add support for SC8280XP and SA8540P host controllers and SM8450 endpoint controller. - Disable Master AXI clock on endpoint controllers to save power when link is idle or in L1.x. - Expose link state transition counts via debugfs to help debug issues with low-power states. - Add auto-loading module support. Synopsys DesignWare PCIe controller driver: - Remove a dependency on ZONE_DMA32 by allocating the MSI target page differently. There's more work to do related to eDMA controllers, so it's not completely settled" * tag 'pci-v6.1-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci: (71 commits) PCI: qcom-ep: Check platform_get_resource_byname() return value PCI: qcom-ep: Add support for SM8450 SoC dt-bindings: PCI: qcom-ep: Add support for SM8450 SoC dt-bindings: PCI: qcom-ep: Define clocks per platform PCI: qcom-ep: Make PERST separation optional dt-bindings: PCI: qcom-ep: Make PERST separation optional PCI: qcom-ep: Disable Master AXI Clock when there is no PCIe traffic PCI: Expose PCIe Resizable BAR support via sysfs PCI/ASPM: Correct LTR_L1.2_THRESHOLD computation PCI/ASPM: Ignore L1 PM Substates if device lacks capability PCI/ASPM: Factor out L1 PM Substates configuration PCI: qcom-ep: Gate Master AXI clock to MHI bus during L1SS PCI: qcom-ep: Expose link transition counts via debugfs PCI: qcom-ep: Disable IRQs during driver remove PCI/ASPM: Save L1 PM Substates Capability for suspend/resume PCI/ASPM: Refactor L1 PM Substates Control Register programming PCI: qcom-ep: Make use of the cached dev pointer PCI: qcom-ep: Rely on the clocks supplied by devicetree PCI: qcom-ep: Add kernel-doc for qcom_pcie_ep structure phy: freescale: imx8m-pcie: Fix the wrong order of phy_init() and phy_power_on() ...
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c33
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c28
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h1
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c164
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c128
-rw-r--r--drivers/pci/controller/pci-aardvark.c4
-rw-r--r--drivers/pci/controller/pci-ftpci100.c21
-rw-r--r--drivers/pci/controller/pci-mvebu.c13
-rw-r--r--drivers/pci/controller/pci-tegra.c11
-rw-r--r--drivers/pci/controller/pcie-apple.c4
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c2
-rw-r--r--drivers/pci/controller/pcie-mt7621.c17
-rw-r--r--drivers/pci/msi/msi.c2
-rw-r--r--drivers/pci/p2pdma.c2
-rw-r--r--drivers/pci/pci-bridge-emul.c48
-rw-r--r--drivers/pci/pci-bridge-emul.h2
-rw-r--r--drivers/pci/pci-driver.c30
-rw-r--r--drivers/pci/pci-sysfs.c108
-rw-r--r--drivers/pci/pci.c51
-rw-r--r--drivers/pci/pci.h63
-rw-r--r--drivers/pci/pcie/aspm.c258
-rw-r--r--drivers/pci/pcie/dpc.c15
-rw-r--r--drivers/pci/pcie/ptm.c300
-rw-r--r--drivers/pci/probe.c13
-rw-r--r--drivers/pci/quirks.c36
-rw-r--r--drivers/pci/setup-bus.c290
-rw-r--r--drivers/pci/setup-res.c11
28 files changed, 1049 insertions, 610 deletions
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 6e5debdbc55b..2616585ca5f8 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -51,6 +51,7 @@ enum imx6_pcie_variants {
IMX7D,
IMX8MQ,
IMX8MM,
+ IMX8MP,
};
#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
@@ -61,6 +62,7 @@ struct imx6_pcie_drvdata {
enum imx6_pcie_variants variant;
u32 flags;
int dbi_length;
+ const char *gpr;
};
struct imx6_pcie {
@@ -150,7 +152,8 @@ struct imx6_pcie {
static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
{
WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
- imx6_pcie->drvdata->variant != IMX8MM);
+ imx6_pcie->drvdata->variant != IMX8MM &&
+ imx6_pcie->drvdata->variant != IMX8MP);
return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
}
@@ -301,6 +304,7 @@ static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
{
switch (imx6_pcie->drvdata->variant) {
case IMX8MM:
+ case IMX8MP:
/*
* The PHY initialization had been done in the PHY
* driver, break here directly.
@@ -558,6 +562,7 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
break;
case IMX8MM:
case IMX8MQ:
+ case IMX8MP:
ret = clk_prepare_enable(imx6_pcie->pcie_aux);
if (ret) {
dev_err(dev, "unable to enable pcie_aux clock\n");
@@ -602,6 +607,7 @@ static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
break;
case IMX8MM:
case IMX8MQ:
+ case IMX8MP:
clk_disable_unprepare(imx6_pcie->pcie_aux);
break;
default:
@@ -669,6 +675,7 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
reset_control_assert(imx6_pcie->pciephy_reset);
fallthrough;
case IMX8MM:
+ case IMX8MP:
reset_control_assert(imx6_pcie->apps_reset);
break;
case IMX6SX:
@@ -744,6 +751,7 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
break;
case IMX6Q: /* Nothing to do */
case IMX8MM:
+ case IMX8MP:
break;
}
@@ -793,6 +801,7 @@ static void imx6_pcie_ltssm_enable(struct device *dev)
case IMX7D:
case IMX8MQ:
case IMX8MM:
+ case IMX8MP:
reset_control_deassert(imx6_pcie->apps_reset);
break;
}
@@ -812,6 +821,7 @@ static void imx6_pcie_ltssm_disable(struct device *dev)
case IMX7D:
case IMX8MQ:
case IMX8MM:
+ case IMX8MP:
reset_control_assert(imx6_pcie->apps_reset);
break;
}
@@ -935,7 +945,7 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
}
if (imx6_pcie->phy) {
- ret = phy_power_on(imx6_pcie->phy);
+ ret = phy_init(imx6_pcie->phy);
if (ret) {
dev_err(dev, "pcie PHY power up failed\n");
goto err_clk_disable;
@@ -949,7 +959,7 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
}
if (imx6_pcie->phy) {
- ret = phy_init(imx6_pcie->phy);
+ ret = phy_power_on(imx6_pcie->phy);
if (ret) {
dev_err(dev, "waiting for PHY ready timeout!\n");
goto err_phy_off;
@@ -961,7 +971,7 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
err_phy_off:
if (imx6_pcie->phy)
- phy_power_off(imx6_pcie->phy);
+ phy_exit(imx6_pcie->phy);
err_clk_disable:
imx6_pcie_clk_disable(imx6_pcie);
err_reg_disable:
@@ -1179,6 +1189,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
}
break;
case IMX8MM:
+ case IMX8MP:
imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
if (IS_ERR(imx6_pcie->pcie_aux))
return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
@@ -1216,7 +1227,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
/* Grab GPR config register range */
imx6_pcie->iomuxc_gpr =
- syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+ syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr);
if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
dev_err(dev, "unable to find iomuxc registers\n");
return PTR_ERR(imx6_pcie->iomuxc_gpr);
@@ -1295,12 +1306,14 @@ static const struct imx6_pcie_drvdata drvdata[] = {
.flags = IMX6_PCIE_FLAG_IMX6_PHY |
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
.dbi_length = 0x200,
+ .gpr = "fsl,imx6q-iomuxc-gpr",
},
[IMX6SX] = {
.variant = IMX6SX,
.flags = IMX6_PCIE_FLAG_IMX6_PHY |
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx6q-iomuxc-gpr",
},
[IMX6QP] = {
.variant = IMX6QP,
@@ -1308,17 +1321,26 @@ static const struct imx6_pcie_drvdata drvdata[] = {
IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
+ .gpr = "fsl,imx6q-iomuxc-gpr",
},
[IMX7D] = {
.variant = IMX7D,
.flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx7d-iomuxc-gpr",
},
[IMX8MQ] = {
.variant = IMX8MQ,
+ .gpr = "fsl,imx8mq-iomuxc-gpr",
},
[IMX8MM] = {
.variant = IMX8MM,
.flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx8mm-iomuxc-gpr",
+ },
+ [IMX8MP] = {
+ .variant = IMX8MP,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx8mp-iomuxc-gpr",
},
};
@@ -1329,6 +1351,7 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], },
{ .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
{ .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
+ { .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], },
{},
};
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 7746f94a715f..39f3b37d4033 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -267,15 +267,6 @@ static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
-
- if (pp->msi_data) {
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
-
- dma_unmap_page(dev, pp->msi_data, PAGE_SIZE, DMA_FROM_DEVICE);
- if (pp->msi_page)
- __free_page(pp->msi_page);
- }
}
static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
@@ -336,6 +327,7 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
struct platform_device *pdev = to_platform_device(dev);
+ u64 *msi_vaddr;
int ret;
u32 ctrl, num_ctrls;
@@ -375,22 +367,16 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
dw_chained_msi_isr, pp);
}
- ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
- pp->msi_page = alloc_page(GFP_DMA32);
- pp->msi_data = dma_map_page(dev, pp->msi_page, 0,
- PAGE_SIZE, DMA_FROM_DEVICE);
- ret = dma_mapping_error(dev, pp->msi_data);
- if (ret) {
- dev_err(pci->dev, "Failed to map MSI data\n");
- __free_page(pp->msi_page);
- pp->msi_page = NULL;
- pp->msi_data = 0;
+ msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
+ GFP_KERNEL);
+ if (!msi_vaddr) {
+ dev_err(dev, "Failed to alloc and map MSI data\n");
dw_pcie_free_msi(pp);
-
- return ret;
+ return -ENOMEM;
}
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 09b887093a84..a871ae7eb59e 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -243,7 +243,6 @@ struct dw_pcie_rp {
struct irq_domain *irq_domain;
struct irq_domain *msi_domain;
dma_addr_t msi_data;
- struct page *msi_page;
struct irq_chip *msi_irq_chip;
u32 num_vectors;
u32 irq_mask[MAX_MSI_CTRLS];
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index 7f67aad71df4..d09507f822a7 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/of_address.h>
@@ -366,12 +367,11 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
char name[32];
int ret, i;
/* This is an optional property */
- ret = of_gpio_named_count(np, "hisilicon,clken-gpios");
+ ret = gpiod_count(dev, "hisilicon,clken");
if (ret < 0)
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index ec99116ad05c..6d0d1b759ca2 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -10,6 +10,7 @@
*/
#include <linux/clk.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/mfd/syscon.h>
@@ -26,6 +27,7 @@
#define PARF_SYS_CTRL 0x00
#define PARF_DB_CTRL 0x10
#define PARF_PM_CTRL 0x20
+#define PARF_MHI_CLOCK_RESET_CTRL 0x174
#define PARF_MHI_BASE_ADDR_LOWER 0x178
#define PARF_MHI_BASE_ADDR_UPPER 0x17c
#define PARF_DEBUG_INT_EN 0x190
@@ -45,6 +47,11 @@
#define PARF_ATU_BASE_ADDR 0x634
#define PARF_ATU_BASE_ADDR_HI 0x638
#define PARF_SRIS_MODE 0x644
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10
+#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84
+#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88
#define PARF_DEVICE_TYPE 0x1000
#define PARF_BDF_TO_SID_CFG 0x2c00
@@ -83,6 +90,9 @@
#define PARF_PM_CTRL_READY_ENTR_L23 BIT(2)
#define PARF_PM_CTRL_REQ_NOT_ENTR_L1 BIT(5)
+/* PARF_MHI_CLOCK_RESET_CTRL fields */
+#define PARF_MSTR_AXI_CLK_EN BIT(1)
+
/* PARF_AXI_MSTR_RD_HALT_NO_WRITES register fields */
#define PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN BIT(0)
@@ -95,6 +105,7 @@
/* PARF_SYS_CTRL register fields */
#define PARF_SYS_CTRL_AUX_PWR_DET BIT(4)
#define PARF_SYS_CTRL_CORE_CLK_CGC_DIS BIT(6)
+#define PARF_SYS_CTRL_MSTR_ACLK_CGC_DIS BIT(10)
#define PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE BIT(11)
/* PARF_DB_CTRL register fields */
@@ -130,21 +141,33 @@ enum qcom_pcie_ep_link_status {
QCOM_PCIE_EP_LINK_DOWN,
};
-static struct clk_bulk_data qcom_pcie_ep_clks[] = {
- { .id = "cfg" },
- { .id = "aux" },
- { .id = "bus_master" },
- { .id = "bus_slave" },
- { .id = "ref" },
- { .id = "sleep" },
- { .id = "slave_q2a" },
-};
-
+/**
+ * struct qcom_pcie_ep - Qualcomm PCIe Endpoint Controller
+ * @pci: Designware PCIe controller struct
+ * @parf: Qualcomm PCIe specific PARF register base
+ * @elbi: Designware PCIe specific ELBI register base
+ * @mmio: MMIO register base
+ * @perst_map: PERST regmap
+ * @mmio_res: MMIO region resource
+ * @core_reset: PCIe Endpoint core reset
+ * @reset: PERST# GPIO
+ * @wake: WAKE# GPIO
+ * @phy: PHY controller block
+ * @debugfs: PCIe Endpoint Debugfs directory
+ * @clks: PCIe clocks
+ * @num_clks: PCIe clocks count
+ * @perst_en: Flag for PERST enable
+ * @perst_sep_en: Flag for PERST separation enable
+ * @link_status: PCIe Link status
+ * @global_irq: Qualcomm PCIe specific Global IRQ
+ * @perst_irq: PERST# IRQ
+ */
struct qcom_pcie_ep {
struct dw_pcie pci;
void __iomem *parf;
void __iomem *elbi;
+ void __iomem *mmio;
struct regmap *perst_map;
struct resource *mmio_res;
@@ -152,6 +175,10 @@ struct qcom_pcie_ep {
struct gpio_desc *reset;
struct gpio_desc *wake;
struct phy *phy;
+ struct dentry *debugfs;
+
+ struct clk_bulk_data *clks;
+ int num_clks;
u32 perst_en;
u32 perst_sep_en;
@@ -193,8 +220,10 @@ static int qcom_pcie_ep_core_reset(struct qcom_pcie_ep *pcie_ep)
*/
static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep)
{
- regmap_write(pcie_ep->perst_map, pcie_ep->perst_en, 0);
- regmap_write(pcie_ep->perst_map, pcie_ep->perst_sep_en, 0);
+ if (pcie_ep->perst_map) {
+ regmap_write(pcie_ep->perst_map, pcie_ep->perst_en, 0);
+ regmap_write(pcie_ep->perst_map, pcie_ep->perst_sep_en, 0);
+ }
}
static int qcom_pcie_dw_link_up(struct dw_pcie *pci)
@@ -227,8 +256,7 @@ static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
{
int ret;
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+ ret = clk_bulk_prepare_enable(pcie_ep->num_clks, pcie_ep->clks);
if (ret)
return ret;
@@ -249,8 +277,7 @@ static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
err_phy_exit:
phy_exit(pcie_ep->phy);
err_disable_clk:
- clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+ clk_bulk_disable_unprepare(pcie_ep->num_clks, pcie_ep->clks);
return ret;
}
@@ -259,8 +286,7 @@ static void qcom_pcie_disable_resources(struct qcom_pcie_ep *pcie_ep)
{
phy_power_off(pcie_ep->phy);
phy_exit(pcie_ep->phy);
- clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+ clk_bulk_disable_unprepare(pcie_ep->num_clks, pcie_ep->clks);
}
static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
@@ -318,8 +344,14 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
val &= ~PARF_Q2A_FLUSH_EN;
writel_relaxed(val, pcie_ep->parf + PARF_Q2A_FLUSH);
- /* Disable DBI Wakeup, core clock CGC and enable AUX power */
+ /*
+ * Disable Master AXI clock during idle. Do not allow DBI access
+ * to take the core out of L1. Disable core clock gating that
+ * gates PIPE clock from propagating to core clock. Report to the
+ * host that Vaux is present.
+ */
val = readl_relaxed(pcie_ep->parf + PARF_SYS_CTRL);
+ val &= ~PARF_SYS_CTRL_MSTR_ACLK_CGC_DIS;
val |= PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE |
PARF_SYS_CTRL_CORE_CLK_CGC_DIS |
PARF_SYS_CTRL_AUX_PWR_DET;
@@ -375,6 +407,11 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
pcie_ep->parf + PARF_MHI_BASE_ADDR_LOWER);
writel_relaxed(0, pcie_ep->parf + PARF_MHI_BASE_ADDR_UPPER);
+ /* Gate Master AXI clock to MHI bus during L1SS */
+ val = readl_relaxed(pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ val &= ~PARF_MSTR_AXI_CLK_EN;
+ val = readl_relaxed(pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
+
dw_pcie_ep_init_notify(&pcie_ep->pci.ep);
/* Enable LTSSM */
@@ -437,11 +474,19 @@ static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
pcie_ep->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"mmio");
+ if (!pcie_ep->mmio_res) {
+ dev_err(dev, "Failed to get mmio resource\n");
+ return -EINVAL;
+ }
+
+ pcie_ep->mmio = devm_pci_remap_cfg_resource(dev, pcie_ep->mmio_res);
+ if (IS_ERR(pcie_ep->mmio))
+ return PTR_ERR(pcie_ep->mmio);
syscon = of_parse_phandle(dev->of_node, "qcom,perst-regs", 0);
if (!syscon) {
- dev_err(dev, "Failed to parse qcom,perst-regs\n");
- return -EINVAL;
+ dev_dbg(dev, "PERST separation not available\n");
+ return 0;
}
pcie_ep->perst_map = syscon_node_to_regmap(syscon);
@@ -474,14 +519,15 @@ static int qcom_pcie_ep_get_resources(struct platform_device *pdev,
ret = qcom_pcie_ep_get_io_resources(pdev, pcie_ep);
if (ret) {
- dev_err(&pdev->dev, "Failed to get io resources %d\n", ret);
+ dev_err(dev, "Failed to get io resources %d\n", ret);
return ret;
}
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
- if (ret)
- return ret;
+ pcie_ep->num_clks = devm_clk_bulk_get_all(dev, &pcie_ep->clks);
+ if (pcie_ep->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return pcie_ep->num_clks;
+ }
pcie_ep->core_reset = devm_reset_control_get_exclusive(dev, "core");
if (IS_ERR(pcie_ep->core_reset))
@@ -495,7 +541,7 @@ static int qcom_pcie_ep_get_resources(struct platform_device *pdev,
if (IS_ERR(pcie_ep->wake))
return PTR_ERR(pcie_ep->wake);
- pcie_ep->phy = devm_phy_optional_get(&pdev->dev, "pciephy");
+ pcie_ep->phy = devm_phy_optional_get(dev, "pciephy");
if (IS_ERR(pcie_ep->phy))
ret = PTR_ERR(pcie_ep->phy);
@@ -571,13 +617,13 @@ static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data)
static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
struct qcom_pcie_ep *pcie_ep)
{
- int irq, ret;
+ int ret;
- irq = platform_get_irq_byname(pdev, "global");
- if (irq < 0)
- return irq;
+ pcie_ep->global_irq = platform_get_irq_byname(pdev, "global");
+ if (pcie_ep->global_irq < 0)
+ return pcie_ep->global_irq;
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->global_irq, NULL,
qcom_pcie_ep_global_irq_thread,
IRQF_ONESHOT,
"global_irq", pcie_ep);
@@ -594,7 +640,7 @@ static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
"perst_irq", pcie_ep);
if (ret) {
dev_err(&pdev->dev, "Failed to request PERST IRQ\n");
- disable_irq(irq);
+ disable_irq(pcie_ep->global_irq);
return ret;
}
@@ -617,6 +663,37 @@ static int qcom_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
}
+static int qcom_pcie_ep_link_transition_count(struct seq_file *s, void *data)
+{
+ struct qcom_pcie_ep *pcie_ep = (struct qcom_pcie_ep *)
+ dev_get_drvdata(s->private);
+
+ seq_printf(s, "L0s transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L0S));
+
+ seq_printf(s, "L1 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L1));
+
+ seq_printf(s, "L1.1 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1));
+
+ seq_printf(s, "L1.2 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2));
+
+ seq_printf(s, "L2 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L2));
+
+ return 0;
+}
+
+static void qcom_pcie_ep_init_debugfs(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = &pcie_ep->pci;
+
+ debugfs_create_devm_seqfile(pci->dev, "link_transition_count", pcie_ep->debugfs,
+ qcom_pcie_ep_link_transition_count);
+}
+
static const struct pci_epc_features qcom_pcie_epc_features = {
.linkup_notifier = true,
.core_init_notifier = true,
@@ -649,6 +726,7 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct qcom_pcie_ep *pcie_ep;
+ char *name;
int ret;
pcie_ep = devm_kzalloc(dev, sizeof(*pcie_ep), GFP_KERNEL);
@@ -680,8 +758,21 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
if (ret)
goto err_disable_resources;
+ name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+ if (!name) {
+ ret = -ENOMEM;
+ goto err_disable_irqs;
+ }
+
+ pcie_ep->debugfs = debugfs_create_dir(name, NULL);
+ qcom_pcie_ep_init_debugfs(pcie_ep);
+
return 0;
+err_disable_irqs:
+ disable_irq(pcie_ep->global_irq);
+ disable_irq(pcie_ep->perst_irq);
+
err_disable_resources:
qcom_pcie_disable_resources(pcie_ep);
@@ -692,6 +783,11 @@ static int qcom_pcie_ep_remove(struct platform_device *pdev)
{
struct qcom_pcie_ep *pcie_ep = platform_get_drvdata(pdev);
+ disable_irq(pcie_ep->global_irq);
+ disable_irq(pcie_ep->perst_irq);
+
+ debugfs_remove_recursive(pcie_ep->debugfs);
+
if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED)
return 0;
@@ -702,8 +798,10 @@ static int qcom_pcie_ep_remove(struct platform_device *pdev)
static const struct of_device_id qcom_pcie_ep_match[] = {
{ .compatible = "qcom,sdx55-pcie-ep", },
+ { .compatible = "qcom,sm8450-pcie-ep", },
{ }
};
+MODULE_DEVICE_TABLE(of, qcom_pcie_ep_match);
static struct platform_driver qcom_pcie_ep_driver = {
.probe = qcom_pcie_ep_probe,
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 66886dc6e777..f711acacaeaf 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -180,7 +180,7 @@ struct qcom_pcie_resources_2_3_3 {
/* 6 clocks typically, 7 for sm8250 */
struct qcom_pcie_resources_2_7_0 {
- struct clk_bulk_data clks[9];
+ struct clk_bulk_data clks[12];
int num_clks;
struct regulator_bulk_data supplies[2];
struct reset_control *pci_reset;
@@ -208,17 +208,12 @@ struct qcom_pcie_ops {
int (*init)(struct qcom_pcie *pcie);
int (*post_init)(struct qcom_pcie *pcie);
void (*deinit)(struct qcom_pcie *pcie);
- void (*post_deinit)(struct qcom_pcie *pcie);
void (*ltssm_enable)(struct qcom_pcie *pcie);
int (*config_sid)(struct qcom_pcie *pcie);
};
struct qcom_pcie_cfg {
const struct qcom_pcie_ops *ops;
- unsigned int has_tbu_clk:1;
- unsigned int has_ddrss_sf_tbu_clk:1;
- unsigned int has_aggre0_clk:1;
- unsigned int has_aggre1_clk:1;
};
struct qcom_pcie {
@@ -1175,6 +1170,7 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
+ unsigned int num_clks, num_opt_clks;
unsigned int idx;
int ret;
@@ -1195,18 +1191,25 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
res->clks[idx++].id = "bus_master";
res->clks[idx++].id = "bus_slave";
res->clks[idx++].id = "slave_q2a";
- if (pcie->cfg->has_tbu_clk)
- res->clks[idx++].id = "tbu";
- if (pcie->cfg->has_ddrss_sf_tbu_clk)
- res->clks[idx++].id = "ddrss_sf_tbu";
- if (pcie->cfg->has_aggre0_clk)
- res->clks[idx++].id = "aggre0";
- if (pcie->cfg->has_aggre1_clk)
- res->clks[idx++].id = "aggre1";
+ num_clks = idx;
+
+ ret = devm_clk_bulk_get(dev, num_clks, res->clks);
+ if (ret < 0)
+ return ret;
+
+ res->clks[idx++].id = "tbu";
+ res->clks[idx++].id = "ddrss_sf_tbu";
+ res->clks[idx++].id = "aggre0";
+ res->clks[idx++].id = "aggre1";
+ res->clks[idx++].id = "noc_aggr_4";
+ res->clks[idx++].id = "noc_aggr_south_sf";
+ res->clks[idx++].id = "cnoc_qx";
+
+ num_opt_clks = idx - num_clks;
res->num_clks = idx;
- ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
+ ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks);
if (ret < 0)
return ret;
@@ -1509,15 +1512,13 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
if (pcie->cfg->ops->config_sid) {
ret = pcie->cfg->ops->config_sid(pcie);
if (ret)
- goto err;
+ goto err_assert_reset;
}
return 0;
-err:
+err_assert_reset:
qcom_ep_reset_assert(pcie);
- if (pcie->cfg->ops->post_deinit)
- pcie->cfg->ops->post_deinit(pcie);
err_disable_phy:
phy_power_off(pcie->phy);
err_deinit:
@@ -1601,68 +1602,35 @@ static const struct qcom_pcie_ops ops_2_9_0 = {
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
-static const struct qcom_pcie_cfg apq8084_cfg = {
+static const struct qcom_pcie_cfg cfg_1_0_0 = {
.ops = &ops_1_0_0,
};
-static const struct qcom_pcie_cfg ipq8064_cfg = {
+static const struct qcom_pcie_cfg cfg_1_9_0 = {
+ .ops = &ops_1_9_0,
+};
+
+static const struct qcom_pcie_cfg cfg_2_1_0 = {
.ops = &ops_2_1_0,
};
-static const struct qcom_pcie_cfg msm8996_cfg = {
+static const struct qcom_pcie_cfg cfg_2_3_2 = {
.ops = &ops_2_3_2,
};
-static const struct qcom_pcie_cfg ipq8074_cfg = {
+static const struct qcom_pcie_cfg cfg_2_3_3 = {
.ops = &ops_2_3_3,
};
-static const struct qcom_pcie_cfg ipq4019_cfg = {
+static const struct qcom_pcie_cfg cfg_2_4_0 = {
.ops = &ops_2_4_0,
};
-static const struct qcom_pcie_cfg sdm845_cfg = {
+static const struct qcom_pcie_cfg cfg_2_7_0 = {
.ops = &ops_2_7_0,
- .has_tbu_clk = true,
-};
-
-static const struct qcom_pcie_cfg sm8150_cfg = {
- /* sm8150 has qcom IP rev 1.5.0. However 1.5.0 ops are same as
- * 1.9.0, so reuse the same.
- */
- .ops = &ops_1_9_0,
-};
-
-static const struct qcom_pcie_cfg sm8250_cfg = {
- .ops = &ops_1_9_0,
- .has_tbu_clk = true,
- .has_ddrss_sf_tbu_clk = true,
-};
-
-static const struct qcom_pcie_cfg sm8450_pcie0_cfg = {
- .ops = &ops_1_9_0,
- .has_ddrss_sf_tbu_clk = true,
- .has_aggre0_clk = true,
- .has_aggre1_clk = true,
-};
-
-static const struct qcom_pcie_cfg sm8450_pcie1_cfg = {
- .ops = &ops_1_9_0,
- .has_ddrss_sf_tbu_clk = true,
- .has_aggre1_clk = true,
-};
-
-static const struct qcom_pcie_cfg sc7280_cfg = {
- .ops = &ops_1_9_0,
- .has_tbu_clk = true,
-};
-
-static const struct qcom_pcie_cfg sc8180x_cfg = {
- .ops = &ops_1_9_0,
- .has_tbu_clk = true,
};
-static const struct qcom_pcie_cfg ipq6018_cfg = {
+static const struct qcom_pcie_cfg cfg_2_9_0 = {
.ops = &ops_2_9_0,
};
@@ -1761,22 +1729,24 @@ err_pm_runtime_put:
}
static const struct of_device_id qcom_pcie_match[] = {
- { .compatible = "qcom,pcie-apq8084", .data = &apq8084_cfg },
- { .compatible = "qcom,pcie-ipq8064", .data = &ipq8064_cfg },
- { .compatible = "qcom,pcie-ipq8064-v2", .data = &ipq8064_cfg },
- { .compatible = "qcom,pcie-apq8064", .data = &ipq8064_cfg },
- { .compatible = "qcom,pcie-msm8996", .data = &msm8996_cfg },
- { .compatible = "qcom,pcie-ipq8074", .data = &ipq8074_cfg },
- { .compatible = "qcom,pcie-ipq4019", .data = &ipq4019_cfg },
- { .compatible = "qcom,pcie-qcs404", .data = &ipq4019_cfg },
- { .compatible = "qcom,pcie-sdm845", .data = &sdm845_cfg },
- { .compatible = "qcom,pcie-sm8150", .data = &sm8150_cfg },
- { .compatible = "qcom,pcie-sm8250", .data = &sm8250_cfg },
- { .compatible = "qcom,pcie-sc8180x", .data = &sc8180x_cfg },
- { .compatible = "qcom,pcie-sm8450-pcie0", .data = &sm8450_pcie0_cfg },
- { .compatible = "qcom,pcie-sm8450-pcie1", .data = &sm8450_pcie1_cfg },
- { .compatible = "qcom,pcie-sc7280", .data = &sc7280_cfg },
- { .compatible = "qcom,pcie-ipq6018", .data = &ipq6018_cfg },
+ { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
+ { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
+ { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 },
+ { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 },
+ { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
+ { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
+ { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
+ { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
+ { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
+ { .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
+ { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
{ }
};
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 966c8b48bd96..ba36bbc5897d 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -33,6 +33,7 @@
#define PCIE_CORE_DEV_ID_REG 0x0
#define PCIE_CORE_CMD_STATUS_REG 0x4
#define PCIE_CORE_DEV_REV_REG 0x8
+#define PCIE_CORE_SSDEV_ID_REG 0x2c
#define PCIE_CORE_PCIEXP_CAP 0xc0
#define PCIE_CORE_PCIERR_CAP 0x100
#define PCIE_CORE_ERR_CAPCTL_REG 0x118
@@ -1077,7 +1078,10 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
/* Indicates supports for Completion Retry Status */
bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
+ bridge->subsystem_vendor_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) & 0xffff;
+ bridge->subsystem_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) >> 16;
bridge->has_pcie = true;
+ bridge->pcie_start = PCIE_CORE_PCIEXP_CAP;
bridge->data = pcie;
bridge->ops = &advk_pci_bridge_emul_ops;
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index 88980a44461d..0cfd9d5a497c 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -103,13 +103,6 @@
#define FARADAY_PCI_DMA_MEM2_BASE 0x00000000
#define FARADAY_PCI_DMA_MEM3_BASE 0x00000000
-/* Defines for PCI configuration command register */
-#define PCI_CONF_ENABLE BIT(31)
-#define PCI_CONF_WHERE(r) ((r) & 0xFC)
-#define PCI_CONF_BUS(b) (((b) & 0xFF) << 16)
-#define PCI_CONF_DEVICE(d) (((d) & 0x1F) << 11)
-#define PCI_CONF_FUNCTION(f) (((f) & 0x07) << 8)
-
/**
* struct faraday_pci_variant - encodes IP block differences
* @cascaded_irq: this host has cascaded IRQs from an interrupt controller
@@ -190,11 +183,8 @@ static int faraday_raw_pci_read_config(struct faraday_pci *p, int bus_number,
unsigned int fn, int config, int size,
u32 *value)
{
- writel(PCI_CONF_BUS(bus_number) |
- PCI_CONF_DEVICE(PCI_SLOT(fn)) |
- PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
- PCI_CONF_WHERE(config) |
- PCI_CONF_ENABLE,
+ writel(PCI_CONF1_ADDRESS(bus_number, PCI_SLOT(fn),
+ PCI_FUNC(fn), config),
p->base + FTPCI_CONFIG);
*value = readl(p->base + FTPCI_DATA);
@@ -225,11 +215,8 @@ static int faraday_raw_pci_write_config(struct faraday_pci *p, int bus_number,
{
int ret = PCIBIOS_SUCCESSFUL;
- writel(PCI_CONF_BUS(bus_number) |
- PCI_CONF_DEVICE(PCI_SLOT(fn)) |
- PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
- PCI_CONF_WHERE(config) |
- PCI_CONF_ENABLE,
+ writel(PCI_CONF1_ADDRESS(bus_number, PCI_SLOT(fn),
+ PCI_FUNC(fn), config),
p->base + FTPCI_CONFIG);
switch (size) {
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index af915c951f06..1ced73726a26 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -523,7 +523,7 @@ static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
/* Are the new iobase/iolimit values invalid? */
if (conf->iolimit < conf->iobase ||
- conf->iolimitupper < conf->iobaseupper)
+ le16_to_cpu(conf->iolimitupper) < le16_to_cpu(conf->iobaseupper))
return mvebu_pcie_set_window(port, port->io_target, port->io_attr,
&desired, &port->iowin);
@@ -535,10 +535,10 @@ static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
* is the CPU address.
*/
desired.remap = ((conf->iobase & 0xF0) << 8) |
- (conf->iobaseupper << 16);
+ (le16_to_cpu(conf->iobaseupper) << 16);
desired.base = port->pcie->io.start + desired.remap;
desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) |
- (conf->iolimitupper << 16)) -
+ (le16_to_cpu(conf->iolimitupper) << 16)) -
desired.remap) +
1;
@@ -552,7 +552,7 @@ static int mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
struct pci_bridge_emul_conf *conf = &port->bridge.conf;
/* Are the new membase/memlimit values invalid? */
- if (conf->memlimit < conf->membase)
+ if (le16_to_cpu(conf->memlimit) < le16_to_cpu(conf->membase))
return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
&desired, &port->memwin);
@@ -562,8 +562,8 @@ static int mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
* window to setup, according to the PCI-to-PCI bridge
* specifications.
*/
- desired.base = ((conf->membase & 0xFFF0) << 16);
- desired.size = (((conf->memlimit & 0xFFF0) << 16) | 0xFFFFF) -
+ desired.base = ((le16_to_cpu(conf->membase) & 0xFFF0) << 16);
+ desired.size = (((le16_to_cpu(conf->memlimit) & 0xFFF0) << 16) | 0xFFFFF) -
desired.base + 1;
return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
@@ -946,6 +946,7 @@ static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
bridge->subsystem_vendor_id = ssdev_id & 0xffff;
bridge->subsystem_id = ssdev_id >> 16;
bridge->has_pcie = true;
+ bridge->pcie_start = PCIE_CAP_PCIEXP;
bridge->data = port;
bridge->ops = &mvebu_pci_bridge_emul_ops;
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 8e323e93be91..24478ae5a345 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -415,13 +415,6 @@ static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
* address (access to which generates correct config transaction) falls in
* this 4 KiB region.
*/
-static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
- unsigned int where)
-{
- return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
- (PCI_FUNC(devfn) << 8) | (where & 0xff);
-}
-
static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
unsigned int devfn,
int where)
@@ -443,7 +436,9 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
unsigned int offset;
u32 base;
- offset = tegra_pcie_conf_offset(bus->number, devfn, where);
+ offset = PCI_CONF1_EXT_ADDRESS(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where) &
+ ~PCI_CONF1_ENABLE;
/* move 4 KiB window to offset within the FPCI region */
base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index a2c3c207a04b..66f37e403a09 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -516,8 +516,8 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
u32 stat, idx;
int ret, i;
- reset = gpiod_get_from_of_node(np, "reset-gpios", 0,
- GPIOD_OUT_LOW, "PERST#");
+ reset = devm_fwnode_gpiod_get(pcie->dev, of_fwnode_handle(np), "reset",
+ GPIOD_OUT_LOW, "PERST#");
if (IS_ERR(reset))
return PTR_ERR(reset);
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index 11cdb9b6f109..b8612ce5f4d0 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -1071,7 +1071,7 @@ static struct platform_driver mtk_pcie_driver = {
.probe = mtk_pcie_probe,
.remove = mtk_pcie_remove,
.driver = {
- .name = "mtk-pcie",
+ .name = "mtk-pcie-gen3",
.of_match_table = mtk_pcie_of_match,
.pm = &mtk_pcie_pm_ops,
},
diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
index 33eb37a2225c..4bd1abf26008 100644
--- a/drivers/pci/controller/pcie-mt7621.c
+++ b/drivers/pci/controller/pcie-mt7621.c
@@ -30,6 +30,8 @@
#include <linux/reset.h>
#include <linux/sys_soc.h>
+#include "../pci.h"
+
/* MediaTek-specific configuration registers */
#define PCIE_FTS_NUM 0x70c
#define PCIE_FTS_NUM_MASK GENMASK(15, 8)
@@ -120,19 +122,12 @@ static inline void pcie_port_write(struct mt7621_pcie_port *port,
writel_relaxed(val, port->base + reg);
}
-static inline u32 mt7621_pcie_get_cfgaddr(unsigned int bus, unsigned int slot,
- unsigned int func, unsigned int where)
-{
- return (((where & 0xf00) >> 8) << 24) | (bus << 16) | (slot << 11) |
- (func << 8) | (where & 0xfc) | 0x80000000;
-}
-
static void __iomem *mt7621_pcie_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
struct mt7621_pcie *pcie = bus->sysdata;
- u32 address = mt7621_pcie_get_cfgaddr(bus->number, PCI_SLOT(devfn),
- PCI_FUNC(devfn), where);
+ u32 address = PCI_CONF1_EXT_ADDRESS(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where);
writel_relaxed(address, pcie->base + RALINK_PCI_CONFIG_ADDR);
@@ -147,7 +142,7 @@ static struct pci_ops mt7621_pcie_ops = {
static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg)
{
- u32 address = mt7621_pcie_get_cfgaddr(0, dev, 0, reg);
+ u32 address = PCI_CONF1_EXT_ADDRESS(0, dev, 0, reg);
pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR);
return pcie_read(pcie, RALINK_PCI_CONFIG_DATA);
@@ -156,7 +151,7 @@ static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg)
static void write_config(struct mt7621_pcie *pcie, unsigned int dev,
u32 reg, u32 val)
{
- u32 address = mt7621_pcie_get_cfgaddr(0, dev, 0, reg);
+ u32 address = PCI_CONF1_EXT_ADDRESS(0, dev, 0, reg);
pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR);
pcie_write(pcie, val, RALINK_PCI_CONFIG_DATA);
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index 9037a7827eca..fdd2ec09651e 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -526,7 +526,7 @@ static int msix_setup_msi_descs(struct pci_dev *dev, void __iomem *base,
desc.pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
!desc.pci.msi_attrib.is_virtual;
- if (!desc.pci.msi_attrib.can_mask) {
+ if (desc.pci.msi_attrib.can_mask) {
addr = pci_msix_desc_addr(&desc);
desc.pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 4496a7c5c478..88dc66ee1c46 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -649,7 +649,7 @@ struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients)
if (!closest_pdevs)
return NULL;
- while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
+ for_each_pci_dev(pdev) {
if (!pci_has_p2pmem(pdev))
continue;
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index 9c2ca28e3ecf..9334b2dd4764 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -22,11 +22,7 @@
#define PCI_BRIDGE_CONF_END PCI_STD_HEADER_SIZEOF
#define PCI_CAP_SSID_SIZEOF (PCI_SSVID_DEVICE_ID + 2)
-#define PCI_CAP_SSID_START PCI_BRIDGE_CONF_END
-#define PCI_CAP_SSID_END (PCI_CAP_SSID_START + PCI_CAP_SSID_SIZEOF)
#define PCI_CAP_PCIE_SIZEOF (PCI_EXP_SLTSTA2 + 2)
-#define PCI_CAP_PCIE_START PCI_CAP_SSID_END
-#define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_CAP_PCIE_SIZEOF)
/**
* struct pci_bridge_reg_behavior - register bits behaviors
@@ -324,7 +320,7 @@ pci_bridge_emul_read_ssid(struct pci_bridge_emul *bridge, int reg, u32 *value)
switch (reg) {
case PCI_CAP_LIST_ID:
*value = PCI_CAP_ID_SSVID |
- (bridge->has_pcie ? (PCI_CAP_PCIE_START << 8) : 0);
+ ((bridge->pcie_start > bridge->ssid_start) ? (bridge->pcie_start << 8) : 0);
return PCI_BRIDGE_EMUL_HANDLED;
case PCI_SSVID_VENDOR_ID:
@@ -365,18 +361,33 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
if (!bridge->pci_regs_behavior)
return -ENOMEM;
- if (bridge->subsystem_vendor_id)
- bridge->conf.capabilities_pointer = PCI_CAP_SSID_START;
- else if (bridge->has_pcie)
- bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START;
- else
- bridge->conf.capabilities_pointer = 0;
+ /* If ssid_start and pcie_start were not specified then choose the lowest possible value. */
+ if (!bridge->ssid_start && !bridge->pcie_start) {
+ if (bridge->subsystem_vendor_id)
+ bridge->ssid_start = PCI_BRIDGE_CONF_END;
+ if (bridge->has_pcie)
+ bridge->pcie_start = bridge->ssid_start + PCI_CAP_SSID_SIZEOF;
+ } else if (!bridge->ssid_start && bridge->subsystem_vendor_id) {
+ if (bridge->pcie_start - PCI_BRIDGE_CONF_END >= PCI_CAP_SSID_SIZEOF)
+ bridge->ssid_start = PCI_BRIDGE_CONF_END;
+ else
+ bridge->ssid_start = bridge->pcie_start + PCI_CAP_PCIE_SIZEOF;
+ } else if (!bridge->pcie_start && bridge->has_pcie) {
+ if (bridge->ssid_start - PCI_BRIDGE_CONF_END >= PCI_CAP_PCIE_SIZEOF)
+ bridge->pcie_start = PCI_BRIDGE_CONF_END;
+ else
+ bridge->pcie_start = bridge->ssid_start + PCI_CAP_SSID_SIZEOF;
+ }
+
+ bridge->conf.capabilities_pointer = min(bridge->ssid_start, bridge->pcie_start);
if (bridge->conf.capabilities_pointer)
bridge->conf.status |= cpu_to_le16(PCI_STATUS_CAP_LIST);
if (bridge->has_pcie) {
bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP;
+ bridge->pcie_conf.next = (bridge->ssid_start > bridge->pcie_start) ?
+ bridge->ssid_start : 0;
bridge->pcie_conf.cap |= cpu_to_le16(PCI_EXP_TYPE_ROOT_PORT << 4);
bridge->pcie_cap_regs_behavior =
kmemdup(pcie_cap_regs_behavior,
@@ -459,15 +470,17 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
read_op = bridge->ops->read_base;
cfgspace = (__le32 *) &bridge->conf;
behavior = bridge->pci_regs_behavior;
- } else if (reg >= PCI_CAP_SSID_START && reg < PCI_CAP_SSID_END && bridge->subsystem_vendor_id) {
+ } else if (reg >= bridge->ssid_start && reg < bridge->ssid_start + PCI_CAP_SSID_SIZEOF &&
+ bridge->subsystem_vendor_id) {
/* Emulated PCI Bridge Subsystem Vendor ID capability */
- reg -= PCI_CAP_SSID_START;
+ reg -= bridge->ssid_start;
read_op = pci_bridge_emul_read_ssid;
cfgspace = NULL;
behavior = NULL;
- } else if (reg >= PCI_CAP_PCIE_START && reg < PCI_CAP_PCIE_END && bridge->has_pcie) {
+ } else if (reg >= bridge->pcie_start && reg < bridge->pcie_start + PCI_CAP_PCIE_SIZEOF &&
+ bridge->has_pcie) {
/* Our emulated PCIe capability */
- reg -= PCI_CAP_PCIE_START;
+ reg -= bridge->pcie_start;
read_op = bridge->ops->read_pcie;
cfgspace = (__le32 *) &bridge->pcie_conf;
behavior = bridge->pcie_cap_regs_behavior;
@@ -538,9 +551,10 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
write_op = bridge->ops->write_base;
cfgspace = (__le32 *) &bridge->conf;
behavior = bridge->pci_regs_behavior;
- } else if (reg >= PCI_CAP_PCIE_START && reg < PCI_CAP_PCIE_END && bridge->has_pcie) {
+ } else if (reg >= bridge->pcie_start && reg < bridge->pcie_start + PCI_CAP_PCIE_SIZEOF &&
+ bridge->has_pcie) {
/* Our emulated PCIe capability */
- reg -= PCI_CAP_PCIE_START;
+ reg -= bridge->pcie_start;
write_op = bridge->ops->write_pcie;
cfgspace = (__le32 *) &bridge->pcie_conf;
behavior = bridge->pcie_cap_regs_behavior;
diff --git a/drivers/pci/pci-bridge-emul.h b/drivers/pci/pci-bridge-emul.h
index 71392b67471d..2a0e59c7f0d9 100644
--- a/drivers/pci/pci-bridge-emul.h
+++ b/drivers/pci/pci-bridge-emul.h
@@ -131,6 +131,8 @@ struct pci_bridge_emul {
struct pci_bridge_reg_behavior *pci_regs_behavior;
struct pci_bridge_reg_behavior *pcie_cap_regs_behavior;
void *data;
+ u8 pcie_start;
+ u8 ssid_start;
bool has_pcie;
u16 subsystem_vendor_id;
u16 subsystem_id;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 49238ddd39ee..107d77f3c846 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -774,6 +774,12 @@ static int pci_pm_suspend(struct device *dev)
pci_dev->skip_bus_pm = false;
+ /*
+ * Disabling PTM allows some systems, e.g., Intel mobile chips
+ * since Coffee Lake, to enter a lower-power PM state.
+ */
+ pci_suspend_ptm(pci_dev);
+
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend(dev, PMSG_SUSPEND);
@@ -867,20 +873,15 @@ static int pci_pm_suspend_noirq(struct device *dev)
}
}
- if (pci_dev->skip_bus_pm) {
+ if (!pci_dev->state_saved) {
+ pci_save_state(pci_dev);
+
/*
- * Either the device is a bridge with a child in D0 below it, or
- * the function is running for the second time in a row without
- * going through full resume, which is possible only during
- * suspend-to-idle in a spurious wakeup case. The device should
- * be in D0 at this point, but if it is a bridge, it may be
- * necessary to save its state.
+ * If the device is a bridge with a child in D0 below it,
+ * it needs to stay in D0, so check skip_bus_pm to avoid
+ * putting it into a low-power state in that case.
*/
- if (!pci_dev->state_saved)
- pci_save_state(pci_dev);
- } else if (!pci_dev->state_saved) {
- pci_save_state(pci_dev);
- if (pci_power_manageable(pci_dev))
+ if (!pci_dev->skip_bus_pm && pci_power_manageable(pci_dev))
pci_prepare_to_sleep(pci_dev);
}
@@ -987,6 +988,8 @@ static int pci_pm_resume(struct device *dev)
if (pci_dev->state_saved)
pci_restore_standard_config(pci_dev);
+ pci_resume_ptm(pci_dev);
+
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume(dev);
@@ -1274,6 +1277,8 @@ static int pci_pm_runtime_suspend(struct device *dev)
pci_power_t prev = pci_dev->current_state;
int error;
+ pci_suspend_ptm(pci_dev);
+
/*
* If pci_dev->driver is not set (unbound), we leave the device in D0,
* but it may go to D3cold when the bridge above it runtime suspends.
@@ -1335,6 +1340,7 @@ static int pci_pm_runtime_resume(struct device *dev)
* D3cold when the bridge above it runtime suspended.
*/
pci_pm_default_resume_early(pci_dev);
+ pci_resume_ptm(pci_dev);
if (!pci_dev->driver)
return 0;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index fc804e08e3cb..0a2eeb82cebd 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -28,6 +28,7 @@
#include <linux/pm_runtime.h>
#include <linux/msi.h>
#include <linux/of.h>
+#include <linux/aperture.h>
#include "pci.h"
static int sysfs_initialized; /* = 0 */
@@ -1373,6 +1374,112 @@ static const struct attribute_group pci_dev_reset_attr_group = {
.is_visible = pci_dev_reset_attr_is_visible,
};
+#define pci_dev_resource_resize_attr(n) \
+static ssize_t resource##n##_resize_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char * buf) \
+{ \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ ssize_t ret; \
+ \
+ pci_config_pm_runtime_get(pdev); \
+ \
+ ret = sysfs_emit(buf, "%016llx\n", \
+ (u64)pci_rebar_get_possible_sizes(pdev, n)); \
+ \
+ pci_config_pm_runtime_put(pdev); \
+ \
+ return ret; \
+} \
+ \
+static ssize_t resource##n##_resize_store(struct device *dev, \
+ struct device_attribute *attr,\
+ const char *buf, size_t count)\
+{ \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ unsigned long size, flags; \
+ int ret, i; \
+ u16 cmd; \
+ \
+ if (kstrtoul(buf, 0, &size) < 0) \
+ return -EINVAL; \
+ \
+ device_lock(dev); \
+ if (dev->driver) { \
+ ret = -EBUSY; \
+ goto unlock; \
+ } \
+ \
+ pci_config_pm_runtime_get(pdev); \
+ \
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) { \
+ ret = aperture_remove_conflicting_pci_devices(pdev, \
+ "resourceN_resize"); \
+ if (ret) \
+ goto pm_put; \
+ } \
+ \
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd); \
+ pci_write_config_word(pdev, PCI_COMMAND, \
+ cmd & ~PCI_COMMAND_MEMORY); \
+ \
+ flags = pci_resource_flags(pdev, n); \
+ \
+ pci_remove_resource_files(pdev); \
+ \
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) { \
+ if (pci_resource_len(pdev, i) && \
+ pci_resource_flags(pdev, i) == flags) \
+ pci_release_resource(pdev, i); \
+ } \
+ \
+ ret = pci_resize_resource(pdev, n, size); \
+ \
+ pci_assign_unassigned_bus_resources(pdev->bus); \
+ \
+ if (pci_create_resource_files(pdev)) \
+ pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");\
+ \
+ pci_write_config_word(pdev, PCI_COMMAND, cmd); \
+pm_put: \
+ pci_config_pm_runtime_put(pdev); \
+unlock: \
+ device_unlock(dev); \
+ \
+ return ret ? ret : count; \
+} \
+static DEVICE_ATTR_RW(resource##n##_resize)
+
+pci_dev_resource_resize_attr(0);
+pci_dev_resource_resize_attr(1);
+pci_dev_resource_resize_attr(2);
+pci_dev_resource_resize_attr(3);
+pci_dev_resource_resize_attr(4);
+pci_dev_resource_resize_attr(5);
+
+static struct attribute *resource_resize_attrs[] = {
+ &dev_attr_resource0_resize.attr,
+ &dev_attr_resource1_resize.attr,
+ &dev_attr_resource2_resize.attr,
+ &dev_attr_resource3_resize.attr,
+ &dev_attr_resource4_resize.attr,
+ &dev_attr_resource5_resize.attr,
+ NULL,
+};
+
+static umode_t resource_resize_is_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ return pci_rebar_get_current_size(pdev, n) < 0 ? 0 : a->mode;
+}
+
+static const struct attribute_group pci_dev_resource_resize_group = {
+ .attrs = resource_resize_attrs,
+ .is_visible = resource_resize_is_visible,
+};
+
int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
{
if (!sysfs_initialized)
@@ -1494,6 +1601,7 @@ const struct attribute_group *pci_dev_groups[] = {
#ifdef CONFIG_ACPI
&pci_dev_acpi_attr_group,
#endif
+ &pci_dev_resource_resize_group,
NULL,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 95bc329e74c0..2127aba3550b 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -66,13 +66,15 @@ struct pci_pme_device {
static void pci_dev_d3_sleep(struct pci_dev *dev)
{
- unsigned int delay = dev->d3hot_delay;
-
- if (delay < pci_pm_d3hot_delay)
- delay = pci_pm_d3hot_delay;
-
- if (delay)
- msleep(delay);
+ unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay);
+ unsigned int upper;
+
+ if (delay_ms) {
+ /* Use a 20% upper bound, 1ms minimum */
+ upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U);
+ usleep_range(delay_ms * USEC_PER_MSEC,
+ (delay_ms + upper) * USEC_PER_MSEC);
+ }
}
bool pci_reset_supported(struct pci_dev *dev)
@@ -1663,6 +1665,7 @@ int pci_save_state(struct pci_dev *dev)
return i;
pci_save_ltr_state(dev);
+ pci_save_aspm_l1ss_state(dev);
pci_save_dpc_state(dev);
pci_save_aer_state(dev);
pci_save_ptm_state(dev);
@@ -1769,6 +1772,7 @@ void pci_restore_state(struct pci_dev *dev)
* LTR itself (in the PCIe capability).
*/
pci_restore_ltr_state(dev);
+ pci_restore_aspm_l1ss_state(dev);
pci_restore_pcie_state(dev);
pci_restore_pasid_state(dev);
@@ -2706,24 +2710,12 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
if (target_state == PCI_POWER_ERROR)
return -EIO;
- /*
- * There are systems (for example, Intel mobile chips since Coffee
- * Lake) where the power drawn while suspended can be significantly
- * reduced by disabling PTM on PCIe root ports as this allows the
- * port to enter a lower-power PM state and the SoC to reach a
- * lower-power idle state as a whole.
- */
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
- pci_disable_ptm(dev);
-
pci_enable_wake(dev, target_state, wakeup);
error = pci_set_power_state(dev, target_state);
- if (error) {
+ if (error)
pci_enable_wake(dev, target_state, false);
- pci_restore_ptm_state(dev);
- }
return error;
}
@@ -2764,24 +2756,12 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
if (target_state == PCI_POWER_ERROR)
return -EIO;
- /*
- * There are systems (for example, Intel mobile chips since Coffee
- * Lake) where the power drawn while suspended can be significantly
- * reduced by disabling PTM on PCIe root ports as this allows the
- * port to enter a lower-power PM state and the SoC to reach a
- * lower-power idle state as a whole.
- */
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
- pci_disable_ptm(dev);
-
__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
error = pci_set_power_state(dev, target_state);
- if (error) {
+ if (error)
pci_enable_wake(dev, target_state, false);
- pci_restore_ptm_state(dev);
- }
return error;
}
@@ -3485,6 +3465,11 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
if (error)
pci_err(dev, "unable to allocate suspend buffer for LTR\n");
+ error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_L1SS,
+ 2 * sizeof(u32));
+ if (error)
+ pci_err(dev, "unable to allocate suspend buffer for ASPM-L1SS\n");
+
pci_allocate_vc_save_buffers(dev);
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 785f31086313..b1ebb7ab8805 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -505,13 +505,17 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
#endif /* CONFIG_PCI_IOV */
#ifdef CONFIG_PCIE_PTM
+void pci_ptm_init(struct pci_dev *dev);
void pci_save_ptm_state(struct pci_dev *dev);
void pci_restore_ptm_state(struct pci_dev *dev);
-void pci_disable_ptm(struct pci_dev *dev);
+void pci_suspend_ptm(struct pci_dev *dev);
+void pci_resume_ptm(struct pci_dev *dev);
#else
+static inline void pci_ptm_init(struct pci_dev *dev) { }
static inline void pci_save_ptm_state(struct pci_dev *dev) { }
static inline void pci_restore_ptm_state(struct pci_dev *dev) { }
-static inline void pci_disable_ptm(struct pci_dev *dev) { }
+static inline void pci_suspend_ptm(struct pci_dev *dev) { }
+static inline void pci_resume_ptm(struct pci_dev *dev) { }
#endif
unsigned long pci_cardbus_resource_alignment(struct resource *);
@@ -561,10 +565,14 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
+void pci_save_aspm_l1ss_state(struct pci_dev *dev);
+void pci_restore_aspm_l1ss_state(struct pci_dev *dev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
+static inline void pci_save_aspm_l1ss_state(struct pci_dev *dev) { }
+static inline void pci_restore_aspm_l1ss_state(struct pci_dev *dev) { }
#endif
#ifdef CONFIG_PCIE_ECRC
@@ -575,12 +583,6 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
static inline void pcie_ecrc_get_policy(char *str) { }
#endif
-#ifdef CONFIG_PCIE_PTM
-void pci_ptm_init(struct pci_dev *dev);
-#else
-static inline void pci_ptm_init(struct pci_dev *dev) { }
-#endif
-
struct pci_dev_reset_methods {
u16 vendor;
u16 device;
@@ -774,4 +776,49 @@ static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
}
#endif
+/*
+ * Config Address for PCI Configuration Mechanism #1
+ *
+ * See PCI Local Bus Specification, Revision 3.0,
+ * Section 3.2.2.3.2, Figure 3-2, p. 50.
+ */
+
+#define PCI_CONF1_BUS_SHIFT 16 /* Bus number */
+#define PCI_CONF1_DEV_SHIFT 11 /* Device number */
+#define PCI_CONF1_FUNC_SHIFT 8 /* Function number */
+
+#define PCI_CONF1_BUS_MASK 0xff
+#define PCI_CONF1_DEV_MASK 0x1f
+#define PCI_CONF1_FUNC_MASK 0x7
+#define PCI_CONF1_REG_MASK 0xfc /* Limit aligned offset to a maximum of 256B */
+
+#define PCI_CONF1_ENABLE BIT(31)
+#define PCI_CONF1_BUS(x) (((x) & PCI_CONF1_BUS_MASK) << PCI_CONF1_BUS_SHIFT)
+#define PCI_CONF1_DEV(x) (((x) & PCI_CONF1_DEV_MASK) << PCI_CONF1_DEV_SHIFT)
+#define PCI_CONF1_FUNC(x) (((x) & PCI_CONF1_FUNC_MASK) << PCI_CONF1_FUNC_SHIFT)
+#define PCI_CONF1_REG(x) ((x) & PCI_CONF1_REG_MASK)
+
+#define PCI_CONF1_ADDRESS(bus, dev, func, reg) \
+ (PCI_CONF1_ENABLE | \
+ PCI_CONF1_BUS(bus) | \
+ PCI_CONF1_DEV(dev) | \
+ PCI_CONF1_FUNC(func) | \
+ PCI_CONF1_REG(reg))
+
+/*
+ * Extension of PCI Config Address for accessing extended PCIe registers
+ *
+ * No standardized specification, but used on lot of non-ECAM-compliant ARM SoCs
+ * or on AMD Barcelona and new CPUs. Reserved bits [27:24] of PCI Config Address
+ * are used for specifying additional 4 high bits of PCI Express register.
+ */
+
+#define PCI_CONF1_EXT_REG_SHIFT 16
+#define PCI_CONF1_EXT_REG_MASK 0xf00
+#define PCI_CONF1_EXT_REG(x) (((x) & PCI_CONF1_EXT_REG_MASK) << PCI_CONF1_EXT_REG_SHIFT)
+
+#define PCI_CONF1_EXT_ADDRESS(bus, dev, func, reg) \
+ (PCI_CONF1_ADDRESS(bus, dev, func, reg) | \
+ PCI_CONF1_EXT_REG(reg))
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index a8aec190986c..53a1fa306e1e 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -8,6 +8,7 @@
*/
#include <linux/kernel.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
@@ -350,29 +351,43 @@ static u32 calc_l1ss_pwron(struct pci_dev *pdev, u32 scale, u32 val)
return 0;
}
+/*
+ * Encode an LTR_L1.2_THRESHOLD value for the L1 PM Substates Control 1
+ * register. Ports enter L1.2 when the most recent LTR value is greater
+ * than or equal to LTR_L1.2_THRESHOLD, so we round up to make sure we
+ * don't enter L1.2 too aggressively.
+ *
+ * See PCIe r6.0, sec 5.5.1, 6.18, 7.8.3.3.
+ */
static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value)
{
- u32 threshold_ns = threshold_us * 1000;
+ u64 threshold_ns = (u64) threshold_us * 1000;
- /* See PCIe r3.1, sec 7.33.3 and sec 6.18 */
- if (threshold_ns < 32) {
- *scale = 0;
+ /*
+ * LTR_L1.2_THRESHOLD_Value ("value") is a 10-bit field with max
+ * value of 0x3ff.
+ */
+ if (threshold_ns <= 0x3ff * 1) {
+ *scale = 0; /* Value times 1ns */
*value = threshold_ns;
- } else if (threshold_ns < 1024) {
- *scale = 1;
- *value = threshold_ns >> 5;
- } else if (threshold_ns < 32768) {
- *scale = 2;
- *value = threshold_ns >> 10;
- } else if (threshold_ns < 1048576) {
- *scale = 3;
- *value = threshold_ns >> 15;
- } else if (threshold_ns < 33554432) {
- *scale = 4;
- *value = threshold_ns >> 20;
+ } else if (threshold_ns <= 0x3ff * 32) {
+ *scale = 1; /* Value times 32ns */
+ *value = roundup(threshold_ns, 32) / 32;
+ } else if (threshold_ns <= 0x3ff * 1024) {
+ *scale = 2; /* Value times 1024ns */
+ *value = roundup(threshold_ns, 1024) / 1024;
+ } else if (threshold_ns <= 0x3ff * 32768) {
+ *scale = 3; /* Value times 32768ns */
+ *value = roundup(threshold_ns, 32768) / 32768;
+ } else if (threshold_ns <= 0x3ff * 1048576) {
+ *scale = 4; /* Value times 1048576ns */
+ *value = roundup(threshold_ns, 1048576) / 1048576;
+ } else if (threshold_ns <= 0x3ff * (u64) 33554432) {
+ *scale = 5; /* Value times 33554432ns */
+ *value = roundup(threshold_ns, 33554432) / 33554432;
} else {
*scale = 5;
- *value = threshold_ns >> 25;
+ *value = 0x3ff; /* Max representable value */
}
}
@@ -455,6 +470,31 @@ static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
pci_write_config_dword(pdev, pos, val);
}
+static void aspm_program_l1ss(struct pci_dev *dev, u32 ctl1, u32 ctl2)
+{
+ u16 l1ss = dev->l1ss;
+ u32 l1_2_enable;
+
+ /*
+ * Per PCIe r6.0, sec 5.5.4, T_POWER_ON in PCI_L1SS_CTL2 must be
+ * programmed prior to setting the L1.2 enable bits in PCI_L1SS_CTL1.
+ */
+ pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL2, ctl2);
+
+ /*
+ * In addition, Common_Mode_Restore_Time and LTR_L1.2_THRESHOLD in
+ * PCI_L1SS_CTL1 must be programmed *before* setting the L1.2
+ * enable bits, even though they're all in PCI_L1SS_CTL1.
+ */
+ l1_2_enable = ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+ ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
+
+ pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL1, ctl1);
+ if (l1_2_enable)
+ pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL1,
+ ctl1 | l1_2_enable);
+}
+
/* Calculate L1.2 PM substate timing parameters */
static void aspm_calc_l1ss_info(struct pcie_link_state *link,
u32 parent_l1ss_cap, u32 child_l1ss_cap)
@@ -464,7 +504,6 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
u32 ctl1 = 0, ctl2 = 0;
u32 pctl1, pctl2, cctl1, cctl2;
- u32 pl1_2_enables, cl1_2_enables;
if (!(link->aspm_support & ASPM_STATE_L1_2_MASK))
return;
@@ -513,39 +552,78 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
ctl2 == pctl2 && ctl2 == cctl2)
return;
- /* Disable L1.2 while updating. See PCIe r5.0, sec 5.5.4, 7.8.3.3 */
- pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK;
- cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+ pctl1 &= ~(PCI_L1SS_CTL1_CM_RESTORE_TIME |
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE);
+ pctl1 |= (ctl1 & (PCI_L1SS_CTL1_CM_RESTORE_TIME |
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE));
+ aspm_program_l1ss(parent, pctl1, ctl2);
+
+ cctl1 &= ~(PCI_L1SS_CTL1_CM_RESTORE_TIME |
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE);
+ cctl1 |= (ctl1 & (PCI_L1SS_CTL1_CM_RESTORE_TIME |
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE));
+ aspm_program_l1ss(child, cctl1, ctl2);
+}
- if (pl1_2_enables || cl1_2_enables) {
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1_2_MASK, 0);
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1_2_MASK, 0);
- }
+static void aspm_l1ss_init(struct pcie_link_state *link)
+{
+ struct pci_dev *child = link->downstream, *parent = link->pdev;
+ u32 parent_l1ss_cap, child_l1ss_cap;
+ u32 parent_l1ss_ctl1 = 0, child_l1ss_ctl1 = 0;
- /* Program T_POWER_ON times in both ports */
- pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2);
- pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
+ if (!parent->l1ss || !child->l1ss)
+ return;
- /* Program Common_Mode_Restore_Time in upstream device */
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
+ /* Setup L1 substate */
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP,
+ &parent_l1ss_cap);
+ pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP,
+ &child_l1ss_cap);
- /* Program LTR_L1.2_THRESHOLD time in both ports */
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
-
- if (pl1_2_enables || cl1_2_enables) {
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0,
- pl1_2_enables);
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0,
- cl1_2_enables);
- }
+ if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
+ parent_l1ss_cap = 0;
+ if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
+ child_l1ss_cap = 0;
+
+ /*
+ * If we don't have LTR for the entire path from the Root Complex
+ * to this device, we can't use ASPM L1.2 because it relies on the
+ * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18.
+ */
+ if (!child->ltr_path)
+ child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
+
+ if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
+ link->aspm_support |= ASPM_STATE_L1_1;
+ if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
+ link->aspm_support |= ASPM_STATE_L1_2;
+ if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
+ link->aspm_support |= ASPM_STATE_L1_1_PCIPM;
+ if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
+ link->aspm_support |= ASPM_STATE_L1_2_PCIPM;
+
+ if (parent_l1ss_cap)
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ &parent_l1ss_ctl1);
+ if (child_l1ss_cap)
+ pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ &child_l1ss_ctl1);
+
+ if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
+ link->aspm_enabled |= ASPM_STATE_L1_1;
+ if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
+ link->aspm_enabled |= ASPM_STATE_L1_2;
+ if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
+ link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM;
+ if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
+ link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM;
+
+ if (link->aspm_support & ASPM_STATE_L1SS)
+ aspm_calc_l1ss_info(link, parent_l1ss_cap, child_l1ss_cap);
}
static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
@@ -553,8 +631,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
struct pci_dev *child = link->downstream, *parent = link->pdev;
u32 parent_lnkcap, child_lnkcap;
u16 parent_lnkctl, child_lnkctl;
- u32 parent_l1ss_cap, child_l1ss_cap;
- u32 parent_l1ss_ctl1 = 0, child_l1ss_ctl1 = 0;
struct pci_bus *linkbus = parent->subordinate;
if (blacklist) {
@@ -609,52 +685,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1)
link->aspm_enabled |= ASPM_STATE_L1;
- /* Setup L1 substate */
- pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP,
- &parent_l1ss_cap);
- pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP,
- &child_l1ss_cap);
-
- if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
- parent_l1ss_cap = 0;
- if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
- child_l1ss_cap = 0;
-
- /*
- * If we don't have LTR for the entire path from the Root Complex
- * to this device, we can't use ASPM L1.2 because it relies on the
- * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18.
- */
- if (!child->ltr_path)
- child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
-
- if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
- link->aspm_support |= ASPM_STATE_L1_1;
- if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
- link->aspm_support |= ASPM_STATE_L1_2;
- if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
- link->aspm_support |= ASPM_STATE_L1_1_PCIPM;
- if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
- link->aspm_support |= ASPM_STATE_L1_2_PCIPM;
-
- if (parent_l1ss_cap)
- pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- &parent_l1ss_ctl1);
- if (child_l1ss_cap)
- pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
- &child_l1ss_ctl1);
-
- if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
- link->aspm_enabled |= ASPM_STATE_L1_1;
- if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
- link->aspm_enabled |= ASPM_STATE_L1_2;
- if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
- link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM;
- if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
- link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM;
-
- if (link->aspm_support & ASPM_STATE_L1SS)
- aspm_calc_l1ss_info(link, parent_l1ss_cap, child_l1ss_cap);
+ aspm_l1ss_init(link);
/* Save default state */
link->aspm_default = link->aspm_enabled;
@@ -726,6 +757,43 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
PCI_L1SS_CTL1_L1SS_MASK, val);
}
+void pci_save_aspm_l1ss_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ u16 l1ss = dev->l1ss;
+ u32 *cap;
+
+ if (!l1ss)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
+ if (!save_state)
+ return;
+
+ cap = (u32 *)&save_state->cap.data[0];
+ pci_read_config_dword(dev, l1ss + PCI_L1SS_CTL2, cap++);
+ pci_read_config_dword(dev, l1ss + PCI_L1SS_CTL1, cap++);
+}
+
+void pci_restore_aspm_l1ss_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ u32 *cap, ctl1, ctl2;
+ u16 l1ss = dev->l1ss;
+
+ if (!l1ss)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
+ if (!save_state)
+ return;
+
+ cap = (u32 *)&save_state->cap.data[0];
+ ctl2 = *cap++;
+ ctl1 = *cap;
+ aspm_program_l1ss(dev, ctl1, ctl2);
+}
+
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
{
pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 3e9afee02e8d..f5ffea17c7f8 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -335,11 +335,16 @@ void pci_dpc_init(struct pci_dev *pdev)
return;
pdev->dpc_rp_extensions = true;
- pdev->dpc_rp_log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
- if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
- pci_err(pdev, "RP PIO log size %u is invalid\n",
- pdev->dpc_rp_log_size);
- pdev->dpc_rp_log_size = 0;
+
+ /* Quirks may set dpc_rp_log_size if device or firmware is buggy */
+ if (!pdev->dpc_rp_log_size) {
+ pdev->dpc_rp_log_size =
+ (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
+ if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
+ pci_err(pdev, "RP PIO log size %u is invalid\n",
+ pdev->dpc_rp_log_size);
+ pdev->dpc_rp_log_size = 0;
+ }
}
}
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index 368a254e3124..b4e5f553467c 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -9,30 +9,38 @@
#include <linux/pci.h>
#include "../pci.h"
-static void pci_ptm_info(struct pci_dev *dev)
+/*
+ * If the next upstream device supports PTM, return it; otherwise return
+ * NULL. PTM Messages are local, so both link partners must support it.
+ */
+static struct pci_dev *pci_upstream_ptm(struct pci_dev *dev)
{
- char clock_desc[8];
+ struct pci_dev *ups = pci_upstream_bridge(dev);
- switch (dev->ptm_granularity) {
- case 0:
- snprintf(clock_desc, sizeof(clock_desc), "unknown");
- break;
- case 255:
- snprintf(clock_desc, sizeof(clock_desc), ">254ns");
- break;
- default:
- snprintf(clock_desc, sizeof(clock_desc), "%uns",
- dev->ptm_granularity);
- break;
- }
- pci_info(dev, "PTM enabled%s, %s granularity\n",
- dev->ptm_root ? " (root)" : "", clock_desc);
+ /*
+ * Switch Downstream Ports are not permitted to have a PTM
+ * capability; their PTM behavior is controlled by the Upstream
+ * Port (PCIe r5.0, sec 7.9.16), so if the upstream bridge is a
+ * Switch Downstream Port, look up one more level.
+ */
+ if (ups && pci_pcie_type(ups) == PCI_EXP_TYPE_DOWNSTREAM)
+ ups = pci_upstream_bridge(ups);
+
+ if (ups && ups->ptm_cap)
+ return ups;
+
+ return NULL;
}
-void pci_disable_ptm(struct pci_dev *dev)
+/*
+ * Find the PTM Capability (if present) and extract the information we need
+ * to use it.
+ */
+void pci_ptm_init(struct pci_dev *dev)
{
- int ptm;
- u16 ctrl;
+ u16 ptm;
+ u32 cap;
+ struct pci_dev *ups;
if (!pci_is_pcie(dev))
return;
@@ -41,21 +49,47 @@ void pci_disable_ptm(struct pci_dev *dev)
if (!ptm)
return;
- pci_read_config_word(dev, ptm + PCI_PTM_CTRL, &ctrl);
- ctrl &= ~(PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT);
- pci_write_config_word(dev, ptm + PCI_PTM_CTRL, ctrl);
+ dev->ptm_cap = ptm;
+ pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_PTM, sizeof(u32));
+
+ pci_read_config_dword(dev, ptm + PCI_PTM_CAP, &cap);
+ dev->ptm_granularity = (cap & PCI_PTM_GRANULARITY_MASK) >> 8;
+
+ /*
+ * Per the spec recommendation (PCIe r6.0, sec 7.9.15.3), select the
+ * furthest upstream Time Source as the PTM Root. For Endpoints,
+ * "the Effective Granularity is the maximum Local Clock Granularity
+ * reported by the PTM Root and all intervening PTM Time Sources."
+ */
+ ups = pci_upstream_ptm(dev);
+ if (ups) {
+ if (ups->ptm_granularity == 0)
+ dev->ptm_granularity = 0;
+ else if (ups->ptm_granularity > dev->ptm_granularity)
+ dev->ptm_granularity = ups->ptm_granularity;
+ } else if (cap & PCI_PTM_CAP_ROOT) {
+ dev->ptm_root = 1;
+ } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
+
+ /*
+ * Per sec 7.9.15.3, this should be the Local Clock
+ * Granularity of the associated Time Source. But it
+ * doesn't say how to find that Time Source.
+ */
+ dev->ptm_granularity = 0;
+ }
+
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(dev) == PCI_EXP_TYPE_UPSTREAM)
+ pci_enable_ptm(dev, NULL);
}
void pci_save_ptm_state(struct pci_dev *dev)
{
- int ptm;
+ u16 ptm = dev->ptm_cap;
struct pci_cap_saved_state *save_state;
- u16 *cap;
+ u32 *cap;
- if (!pci_is_pcie(dev))
- return;
-
- ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
if (!ptm)
return;
@@ -63,146 +97,152 @@ void pci_save_ptm_state(struct pci_dev *dev)
if (!save_state)
return;
- cap = (u16 *)&save_state->cap.data[0];
- pci_read_config_word(dev, ptm + PCI_PTM_CTRL, cap);
+ cap = (u32 *)&save_state->cap.data[0];
+ pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, cap);
}
void pci_restore_ptm_state(struct pci_dev *dev)
{
+ u16 ptm = dev->ptm_cap;
struct pci_cap_saved_state *save_state;
- int ptm;
- u16 *cap;
+ u32 *cap;
- if (!pci_is_pcie(dev))
+ if (!ptm)
return;
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
- ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
- if (!save_state || !ptm)
+ if (!save_state)
return;
- cap = (u16 *)&save_state->cap.data[0];
- pci_write_config_word(dev, ptm + PCI_PTM_CTRL, *cap);
+ cap = (u32 *)&save_state->cap.data[0];
+ pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, *cap);
}
-void pci_ptm_init(struct pci_dev *dev)
+/* Enable PTM in the Control register if possible */
+static int __pci_enable_ptm(struct pci_dev *dev)
{
- int pos;
- u32 cap, ctrl;
- u8 local_clock;
+ u16 ptm = dev->ptm_cap;
struct pci_dev *ups;
+ u32 ctrl;
- if (!pci_is_pcie(dev))
- return;
-
- /*
- * Enable PTM only on interior devices (root ports, switch ports,
- * etc.) on the assumption that it causes no link traffic until an
- * endpoint enables it.
- */
- if ((pci_pcie_type(dev) == PCI_EXP_TYPE_ENDPOINT ||
- pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END))
- return;
+ if (!ptm)
+ return -EINVAL;
/*
- * Switch Downstream Ports are not permitted to have a PTM
- * capability; their PTM behavior is controlled by the Upstream
- * Port (PCIe r5.0, sec 7.9.16).
+ * A device uses local PTM Messages to request time information
+ * from a PTM Root that's farther upstream. Every device along the
+ * path must support PTM and have it enabled so it can handle the
+ * messages. Therefore, if this device is not a PTM Root, the
+ * upstream link partner must have PTM enabled before we can enable
+ * PTM.
*/
- ups = pci_upstream_bridge(dev);
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM &&
- ups && ups->ptm_enabled) {
- dev->ptm_granularity = ups->ptm_granularity;
- dev->ptm_enabled = 1;
- return;
+ if (!dev->ptm_root) {
+ ups = pci_upstream_ptm(dev);
+ if (!ups || !ups->ptm_enabled)
+ return -EINVAL;
}
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
- if (!pos)
- return;
-
- pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_PTM, sizeof(u16));
-
- pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap);
- local_clock = (cap & PCI_PTM_GRANULARITY_MASK) >> 8;
-
- /*
- * There's no point in enabling PTM unless it's enabled in the
- * upstream device or this device can be a PTM Root itself. Per
- * the spec recommendation (PCIe r3.1, sec 7.32.3), select the
- * furthest upstream Time Source as the PTM Root.
- */
- if (ups && ups->ptm_enabled) {
- ctrl = PCI_PTM_CTRL_ENABLE;
- if (ups->ptm_granularity == 0)
- dev->ptm_granularity = 0;
- else if (ups->ptm_granularity > local_clock)
- dev->ptm_granularity = ups->ptm_granularity;
- } else {
- if (cap & PCI_PTM_CAP_ROOT) {
- ctrl = PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT;
- dev->ptm_root = 1;
- dev->ptm_granularity = local_clock;
- } else
- return;
- }
+ pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, &ctrl);
+ ctrl |= PCI_PTM_CTRL_ENABLE;
+ ctrl &= ~PCI_PTM_GRANULARITY_MASK;
ctrl |= dev->ptm_granularity << 8;
- pci_write_config_dword(dev, pos + PCI_PTM_CTRL, ctrl);
- dev->ptm_enabled = 1;
+ if (dev->ptm_root)
+ ctrl |= PCI_PTM_CTRL_ROOT;
- pci_ptm_info(dev);
+ pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, ctrl);
+ return 0;
}
+/**
+ * pci_enable_ptm() - Enable Precision Time Measurement
+ * @dev: PCI device
+ * @granularity: pointer to return granularity
+ *
+ * Enable Precision Time Measurement for @dev. If successful and
+ * @granularity is non-NULL, return the Effective Granularity.
+ *
+ * Return: zero if successful, or -EINVAL if @dev lacks a PTM Capability or
+ * is not a PTM Root and lacks an upstream path of PTM-enabled devices.
+ */
int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
{
- int pos;
- u32 cap, ctrl;
- struct pci_dev *ups;
-
- if (!pci_is_pcie(dev))
- return -EINVAL;
-
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
- if (!pos)
- return -EINVAL;
-
- pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap);
- if (!(cap & PCI_PTM_CAP_REQ))
- return -EINVAL;
-
- /*
- * For a PCIe Endpoint, PTM is only useful if the endpoint can
- * issue PTM requests to upstream devices that have PTM enabled.
- *
- * For Root Complex Integrated Endpoints, there is no upstream
- * device, so there must be some implementation-specific way to
- * associate the endpoint with a time source.
- */
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ENDPOINT) {
- ups = pci_upstream_bridge(dev);
- if (!ups || !ups->ptm_enabled)
- return -EINVAL;
+ int rc;
+ char clock_desc[8];
- dev->ptm_granularity = ups->ptm_granularity;
- } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
- dev->ptm_granularity = 0;
- } else
- return -EINVAL;
+ rc = __pci_enable_ptm(dev);
+ if (rc)
+ return rc;
- ctrl = PCI_PTM_CTRL_ENABLE;
- ctrl |= dev->ptm_granularity << 8;
- pci_write_config_dword(dev, pos + PCI_PTM_CTRL, ctrl);
dev->ptm_enabled = 1;
- pci_ptm_info(dev);
-
if (granularity)
*granularity = dev->ptm_granularity;
+
+ switch (dev->ptm_granularity) {
+ case 0:
+ snprintf(clock_desc, sizeof(clock_desc), "unknown");
+ break;
+ case 255:
+ snprintf(clock_desc, sizeof(clock_desc), ">254ns");
+ break;
+ default:
+ snprintf(clock_desc, sizeof(clock_desc), "%uns",
+ dev->ptm_granularity);
+ break;
+ }
+ pci_info(dev, "PTM enabled%s, %s granularity\n",
+ dev->ptm_root ? " (root)" : "", clock_desc);
+
return 0;
}
EXPORT_SYMBOL(pci_enable_ptm);
+static void __pci_disable_ptm(struct pci_dev *dev)
+{
+ u16 ptm = dev->ptm_cap;
+ u32 ctrl;
+
+ if (!ptm)
+ return;
+
+ pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, &ctrl);
+ ctrl &= ~(PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT);
+ pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, ctrl);
+}
+
+/**
+ * pci_disable_ptm() - Disable Precision Time Measurement
+ * @dev: PCI device
+ *
+ * Disable Precision Time Measurement for @dev.
+ */
+void pci_disable_ptm(struct pci_dev *dev)
+{
+ if (dev->ptm_enabled) {
+ __pci_disable_ptm(dev);
+ dev->ptm_enabled = 0;
+ }
+}
+EXPORT_SYMBOL(pci_disable_ptm);
+
+/*
+ * Disable PTM, but preserve dev->ptm_enabled so we silently re-enable it on
+ * resume if necessary.
+ */
+void pci_suspend_ptm(struct pci_dev *dev)
+{
+ if (dev->ptm_enabled)
+ __pci_disable_ptm(dev);
+}
+
+/* If PTM was enabled before suspend, re-enable it when resuming */
+void pci_resume_ptm(struct pci_dev *dev)
+{
+ if (dev->ptm_enabled)
+ __pci_enable_ptm(dev);
+}
+
bool pcie_ptm_enabled(struct pci_dev *dev)
{
if (!dev)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index c5286b027f00..b66fa42c4b1f 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1297,7 +1297,7 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
!is_cardbus && !broken) {
- unsigned int cmax;
+ unsigned int cmax, buses;
/*
* Bus already configured by firmware, process it in the
@@ -1322,7 +1322,8 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
child->bridge_ctl = bctl;
}
- cmax = pci_scan_child_bus(child);
+ buses = subordinate - secondary;
+ cmax = pci_scan_child_bus_extend(child, buses);
if (cmax > subordinate)
pci_warn(dev, "bridge has subordinate %02x but max busn %02x\n",
subordinate, cmax);
@@ -2920,8 +2921,8 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
* hotplug bridges too much during the second scan below.
*/
used_buses++;
- if (cmax - max > 1)
- used_buses += cmax - max - 1;
+ if (max - cmax > 1)
+ used_buses += max - cmax - 1;
}
/* Scan bridges that need to be reconfigured */
@@ -2929,7 +2930,6 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
unsigned int buses = 0;
if (!hotplug_bridges && normal_bridges == 1) {
-
/*
* There is only one bridge on the bus (upstream
* port) so it gets all available buses which it
@@ -2938,7 +2938,6 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
*/
buses = available_buses;
} else if (dev->is_hotplug_bridge) {
-
/*
* Distribute the extra buses between hotplug
* bridges if any.
@@ -2957,7 +2956,7 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
/*
* Make sure a hotplug bridge has at least the minimum requested
* number of buses but allow it to grow up to the maximum available
- * bus number of there is room.
+ * bus number if there is room.
*/
if (bus->self && bus->self->is_hotplug_bridge) {
used_buses = max_t(unsigned int, available_buses,
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 4944798e75b5..285acc4aaccc 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -5956,3 +5956,39 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56b1, aspm_l1_acceptable_latency
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56c0, aspm_l1_acceptable_latency);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56c1, aspm_l1_acceptable_latency);
#endif
+
+#ifdef CONFIG_PCIE_DPC
+/*
+ * Intel Tiger Lake and Alder Lake BIOS has a bug that clears the DPC
+ * RP PIO Log Size of the integrated Thunderbolt PCIe Root Ports.
+ */
+static void dpc_log_size(struct pci_dev *dev)
+{
+ u16 dpc, val;
+
+ dpc = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC);
+ if (!dpc)
+ return;
+
+ pci_read_config_word(dev, dpc + PCI_EXP_DPC_CAP, &val);
+ if (!(val & PCI_EXP_DPC_CAP_RP_EXT))
+ return;
+
+ if (!((val & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8)) {
+ pci_info(dev, "Overriding RP PIO Log Size to 4\n");
+ dev->dpc_rp_log_size = 4;
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x461f, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x462f, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x463f, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x466e, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a23, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a25, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a27, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a29, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
+#endif
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 8cb68e6f6ef9..dc6a30ee6edf 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1745,119 +1745,6 @@ static enum enable_type pci_realloc_detect(struct pci_bus *bus,
}
#endif
-/*
- * First try will not touch PCI bridge res.
- * Second and later try will clear small leaf bridge res.
- * Will stop till to the max depth if can not find good one.
- */
-void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
-{
- LIST_HEAD(realloc_head);
- /* List of resources that want additional resources */
- struct list_head *add_list = NULL;
- int tried_times = 0;
- enum release_type rel_type = leaf_only;
- LIST_HEAD(fail_head);
- struct pci_dev_resource *fail_res;
- int pci_try_num = 1;
- enum enable_type enable_local;
-
- /* Don't realloc if asked to do so */
- enable_local = pci_realloc_detect(bus, pci_realloc_enable);
- if (pci_realloc_enabled(enable_local)) {
- int max_depth = pci_bus_get_depth(bus);
-
- pci_try_num = max_depth + 1;
- dev_info(&bus->dev, "max bus depth: %d pci_try_num: %d\n",
- max_depth, pci_try_num);
- }
-
-again:
- /*
- * Last try will use add_list, otherwise will try good to have as must
- * have, so can realloc parent bridge resource
- */
- if (tried_times + 1 == pci_try_num)
- add_list = &realloc_head;
- /*
- * Depth first, calculate sizes and alignments of all subordinate buses.
- */
- __pci_bus_size_bridges(bus, add_list);
-
- /* Depth last, allocate resources and update the hardware. */
- __pci_bus_assign_resources(bus, add_list, &fail_head);
- if (add_list)
- BUG_ON(!list_empty(add_list));
- tried_times++;
-
- /* Any device complain? */
- if (list_empty(&fail_head))
- goto dump;
-
- if (tried_times >= pci_try_num) {
- if (enable_local == undefined)
- dev_info(&bus->dev, "Some PCI device resources are unassigned, try booting with pci=realloc\n");
- else if (enable_local == auto_enabled)
- dev_info(&bus->dev, "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
-
- free_list(&fail_head);
- goto dump;
- }
-
- dev_info(&bus->dev, "No. %d try to assign unassigned res\n",
- tried_times + 1);
-
- /* Third times and later will not check if it is leaf */
- if ((tried_times + 1) > 2)
- rel_type = whole_subtree;
-
- /*
- * Try to release leaf bridge's resources that doesn't fit resource of
- * child device under that bridge.
- */
- list_for_each_entry(fail_res, &fail_head, list)
- pci_bus_release_bridge_resources(fail_res->dev->bus,
- fail_res->flags & PCI_RES_TYPE_MASK,
- rel_type);
-
- /* Restore size and flags */
- list_for_each_entry(fail_res, &fail_head, list) {
- struct resource *res = fail_res->res;
- int idx;
-
- res->start = fail_res->start;
- res->end = fail_res->end;
- res->flags = fail_res->flags;
-
- if (pci_is_bridge(fail_res->dev)) {
- idx = res - &fail_res->dev->resource[0];
- if (idx >= PCI_BRIDGE_RESOURCES &&
- idx <= PCI_BRIDGE_RESOURCE_END)
- res->flags = 0;
- }
- }
- free_list(&fail_head);
-
- goto again;
-
-dump:
- /* Dump the resource on buses */
- pci_bus_dump_resources(bus);
-}
-
-void __init pci_assign_unassigned_resources(void)
-{
- struct pci_bus *root_bus;
-
- list_for_each_entry(root_bus, &pci_root_buses, node) {
- pci_assign_unassigned_root_bus_resources(root_bus);
-
- /* Make sure the root bridge has a companion ACPI device */
- if (ACPI_HANDLE(root_bus->bridge))
- acpi_ioapic_add(ACPI_HANDLE(root_bus->bridge));
- }
-}
-
static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res,
struct list_head *add_list,
resource_size_t new_size)
@@ -1881,7 +1768,10 @@ static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res,
}
res->end = res->start + new_size - 1;
- remove_from_list(add_list, res);
+
+ /* If the resource is part of the add_list remove it now */
+ if (add_list)
+ remove_from_list(add_list, res);
}
static void pci_bus_distribute_available_resources(struct pci_bus *bus,
@@ -2029,13 +1919,15 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
}
static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
- struct list_head *add_list)
+ struct list_head *add_list)
{
struct resource available_io, available_mmio, available_mmio_pref;
if (!bridge->is_hotplug_bridge)
return;
+ pci_dbg(bridge, "distributing available resources\n");
+
/* Take the initial extra resources from the hotplug port */
available_io = bridge->resource[PCI_BRIDGE_IO_WINDOW];
available_mmio = bridge->resource[PCI_BRIDGE_MEM_WINDOW];
@@ -2047,6 +1939,174 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
available_mmio_pref);
}
+static bool pci_bridge_resources_not_assigned(struct pci_dev *dev)
+{
+ const struct resource *r;
+
+ /*
+ * Check the child device's resources and if they are not yet
+ * assigned it means we are configuring them (not the boot
+ * firmware) so we should be able to extend the upstream
+ * bridge's (that's the hotplug downstream PCIe port) resources
+ * in the same way we do with the normal hotplug case.
+ */
+ r = &dev->resource[PCI_BRIDGE_IO_WINDOW];
+ if (!r->flags || !(r->flags & IORESOURCE_STARTALIGN))
+ return false;
+ r = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
+ if (!r->flags || !(r->flags & IORESOURCE_STARTALIGN))
+ return false;
+ r = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
+ if (!r->flags || !(r->flags & IORESOURCE_STARTALIGN))
+ return false;
+
+ return true;
+}
+
+static void pci_root_bus_distribute_available_resources(struct pci_bus *bus,
+ struct list_head *add_list)
+{
+ struct pci_dev *dev, *bridge = bus->self;
+
+ for_each_pci_bridge(dev, bus) {
+ struct pci_bus *b;
+
+ b = dev->subordinate;
+ if (!b)
+ continue;
+
+ /*
+ * Need to check "bridge" here too because it is NULL
+ * in case of root bus.
+ */
+ if (bridge && pci_bridge_resources_not_assigned(dev)) {
+ pci_bridge_distribute_available_resources(bridge, add_list);
+ /*
+ * There is only PCIe upstream port on the bus
+ * so we don't need to go futher.
+ */
+ return;
+ }
+
+ pci_root_bus_distribute_available_resources(b, add_list);
+ }
+}
+
+/*
+ * First try will not touch PCI bridge res.
+ * Second and later try will clear small leaf bridge res.
+ * Will stop till to the max depth if can not find good one.
+ */
+void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
+{
+ LIST_HEAD(realloc_head);
+ /* List of resources that want additional resources */
+ struct list_head *add_list = NULL;
+ int tried_times = 0;
+ enum release_type rel_type = leaf_only;
+ LIST_HEAD(fail_head);
+ struct pci_dev_resource *fail_res;
+ int pci_try_num = 1;
+ enum enable_type enable_local;
+
+ /* Don't realloc if asked to do so */
+ enable_local = pci_realloc_detect(bus, pci_realloc_enable);
+ if (pci_realloc_enabled(enable_local)) {
+ int max_depth = pci_bus_get_depth(bus);
+
+ pci_try_num = max_depth + 1;
+ dev_info(&bus->dev, "max bus depth: %d pci_try_num: %d\n",
+ max_depth, pci_try_num);
+ }
+
+again:
+ /*
+ * Last try will use add_list, otherwise will try good to have as must
+ * have, so can realloc parent bridge resource
+ */
+ if (tried_times + 1 == pci_try_num)
+ add_list = &realloc_head;
+ /*
+ * Depth first, calculate sizes and alignments of all subordinate buses.
+ */
+ __pci_bus_size_bridges(bus, add_list);
+
+ pci_root_bus_distribute_available_resources(bus, add_list);
+
+ /* Depth last, allocate resources and update the hardware. */
+ __pci_bus_assign_resources(bus, add_list, &fail_head);
+ if (add_list)
+ BUG_ON(!list_empty(add_list));
+ tried_times++;
+
+ /* Any device complain? */
+ if (list_empty(&fail_head))
+ goto dump;
+
+ if (tried_times >= pci_try_num) {
+ if (enable_local == undefined)
+ dev_info(&bus->dev, "Some PCI device resources are unassigned, try booting with pci=realloc\n");
+ else if (enable_local == auto_enabled)
+ dev_info(&bus->dev, "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
+
+ free_list(&fail_head);
+ goto dump;
+ }
+
+ dev_info(&bus->dev, "No. %d try to assign unassigned res\n",
+ tried_times + 1);
+
+ /* Third times and later will not check if it is leaf */
+ if ((tried_times + 1) > 2)
+ rel_type = whole_subtree;
+
+ /*
+ * Try to release leaf bridge's resources that doesn't fit resource of
+ * child device under that bridge.
+ */
+ list_for_each_entry(fail_res, &fail_head, list)
+ pci_bus_release_bridge_resources(fail_res->dev->bus,
+ fail_res->flags & PCI_RES_TYPE_MASK,
+ rel_type);
+
+ /* Restore size and flags */
+ list_for_each_entry(fail_res, &fail_head, list) {
+ struct resource *res = fail_res->res;
+ int idx;
+
+ res->start = fail_res->start;
+ res->end = fail_res->end;
+ res->flags = fail_res->flags;
+
+ if (pci_is_bridge(fail_res->dev)) {
+ idx = res - &fail_res->dev->resource[0];
+ if (idx >= PCI_BRIDGE_RESOURCES &&
+ idx <= PCI_BRIDGE_RESOURCE_END)
+ res->flags = 0;
+ }
+ }
+ free_list(&fail_head);
+
+ goto again;
+
+dump:
+ /* Dump the resource on buses */
+ pci_bus_dump_resources(bus);
+}
+
+void __init pci_assign_unassigned_resources(void)
+{
+ struct pci_bus *root_bus;
+
+ list_for_each_entry(root_bus, &pci_root_buses, node) {
+ pci_assign_unassigned_root_bus_resources(root_bus);
+
+ /* Make sure the root bridge has a companion ACPI device */
+ if (ACPI_HANDLE(root_bus->bridge))
+ acpi_ioapic_add(ACPI_HANDLE(root_bus->bridge));
+ }
+}
+
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
{
struct pci_bus *parent = bridge->subordinate;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 439ac5f5907a..b492e67c3d87 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -214,6 +214,17 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
root = pci_find_parent_resource(dev, res);
if (!root) {
+ /*
+ * If dev is behind a bridge, accesses will only reach it
+ * if res is inside the relevant bridge window.
+ */
+ if (pci_upstream_bridge(dev))
+ return -ENXIO;
+
+ /*
+ * On the root bus, assume the host bridge will forward
+ * everything.
+ */
if (res->flags & IORESOURCE_IO)
root = &ioport_resource;
else