summaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/host/Kconfig13
-rw-r--r--drivers/pci/host/Makefile1
-rw-r--r--drivers/pci/host/pci-dra7xx.c4
-rw-r--r--drivers/pci/host/pci-hyperv.c10
-rw-r--r--drivers/pci/host/pci-imx6.c227
-rw-r--r--drivers/pci/host/pci-keystone-dw.c38
-rw-r--r--drivers/pci/host/pci-keystone.c52
-rw-r--r--drivers/pci/host/pci-keystone.h6
-rw-r--r--drivers/pci/host/pci-mvebu.c7
-rw-r--r--drivers/pci/host/pci-thunder-pem.c42
-rw-r--r--drivers/pci/host/pcie-armada8k.c262
-rw-r--r--drivers/pci/host/pcie-designware.c47
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c2
-rw-r--r--drivers/pci/pci-sysfs.c7
-rw-r--r--drivers/pci/pci.c43
-rw-r--r--drivers/pci/pcie/portdrv.h11
-rw-r--r--drivers/pci/pcie/portdrv_acpi.c10
-rw-r--r--drivers/pci/pcie/portdrv_core.c8
-rw-r--r--drivers/pci/probe.c1
-rw-r--r--drivers/pci/quirks.c196
-rw-r--r--drivers/pci/search.c14
22 files changed, 836 insertions, 167 deletions
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 7a0780d56d2d..bd6f11aaefd9 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -72,6 +72,8 @@ config PCI_RCAR_GEN2
config PCI_RCAR_GEN2_PCIE
bool "Renesas R-Car PCIe controller"
depends on ARCH_RENESAS || (ARM && COMPILE_TEST)
+ select PCI_MSI
+ select PCI_MSI_IRQ_DOMAIN
help
Say Y here if you want PCIe controller support on R-Car Gen2 SoCs.
@@ -231,4 +233,15 @@ config PCI_HOST_THUNDER_ECAM
help
Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
+config PCIE_ARMADA_8K
+ bool "Marvell Armada-8K PCIe controller"
+ depends on ARCH_MVEBU
+ select PCIE_DW
+ select PCIEPORTBUS
+ help
+ Say Y here if you want to enable PCIe controller support on
+ Armada-8K SoCs. The PCIe controller on Armada-8K is based on
+ Designware hardware and therefore the driver re-uses the
+ Designware core functions to implement the driver.
+
endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index d85b5faf9bbc..a6f85e3987c0 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -28,3 +28,4 @@ obj-$(CONFIG_PCI_HISI) += pcie-hisi.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o
obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
+obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
index 2ca3a1f30ebf..f441130407e7 100644
--- a/drivers/pci/host/pci-dra7xx.c
+++ b/drivers/pci/host/pci-dra7xx.c
@@ -142,13 +142,13 @@ static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
static void dra7xx_pcie_host_init(struct pcie_port *pp)
{
- dw_pcie_setup_rc(pp);
-
pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR;
pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR;
pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR;
pp->cfg1_base &= DRA7XX_CPU_TO_BUS_ADDR;
+ dw_pcie_setup_rc(pp);
+
dra7xx_pcie_establish_link(pp);
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index ed651baa7c50..328c653d7503 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -2268,11 +2268,6 @@ static int hv_pci_remove(struct hv_device *hdev)
hbus = hv_get_drvdata(hdev);
- ret = hv_send_resources_released(hdev);
- if (ret)
- dev_err(&hdev->device,
- "Couldn't send resources released packet(s)\n");
-
memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
init_completion(&comp_pkt.host_event);
pkt.teardown_packet.completion_func = hv_pci_generic_compl;
@@ -2295,6 +2290,11 @@ static int hv_pci_remove(struct hv_device *hdev)
pci_unlock_rescan_remove();
}
+ ret = hv_send_resources_released(hdev);
+ if (ret)
+ dev_err(&hdev->device,
+ "Couldn't send resources released packet(s)\n");
+
vmbus_close(hdev->channel);
/* Delete any children which might still exist. */
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index eb5a2755a164..b741a36a67f3 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -19,6 +19,7 @@
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
+#include <linux/of_device.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -31,19 +32,29 @@
#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
+enum imx6_pcie_variants {
+ IMX6Q,
+ IMX6SX,
+ IMX6QP,
+};
+
struct imx6_pcie {
- struct gpio_desc *reset_gpio;
+ int reset_gpio;
+ bool gpio_active_high;
struct clk *pcie_bus;
struct clk *pcie_phy;
+ struct clk *pcie_inbound_axi;
struct clk *pcie;
struct pcie_port pp;
struct regmap *iomuxc_gpr;
+ enum imx6_pcie_variants variant;
void __iomem *mem_base;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
u32 tx_deemph_gen2_6db;
u32 tx_swing_full;
u32 tx_swing_low;
+ int link_gen;
};
/* PCIe Root Complex registers (memory-mapped) */
@@ -236,37 +247,93 @@ static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
u32 val, gpr1, gpr12;
- /*
- * If the bootloader already enabled the link we need some special
- * handling to get the core back into a state where it is safe to
- * touch it for configuration. As there is no dedicated reset signal
- * wired up for MX6QDL, we need to manually force LTSSM into "detect"
- * state before completely disabling LTSSM, which is a prerequisite
- * for core configuration.
- *
- * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong
- * indication that the bootloader activated the link.
- */
- regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
- regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
+ switch (imx6_pcie->variant) {
+ case IMX6SX:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
+ /* Force PCIe PHY reset */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET);
+ break;
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_SW_RST,
+ IMX6Q_GPR1_PCIE_SW_RST);
+ break;
+ case IMX6Q:
+ /*
+ * If the bootloader already enabled the link we need some
+ * special handling to get the core back into a state where
+ * it is safe to touch it for configuration. As there is
+ * no dedicated reset signal wired up for MX6QDL, we need
+ * to manually force LTSSM into "detect" state before
+ * completely disabling LTSSM, which is a prerequisite for
+ * core configuration.
+ *
+ * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we
+ * have a strong indication that the bootloader activated
+ * the link.
+ */
+ regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
+ regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
+
+ if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
+ (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
+ val = readl(pp->dbi_base + PCIE_PL_PFLR);
+ val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
+ val |= PCIE_PL_PFLR_FORCE_LINK;
+ writel(val, pp->dbi_base + PCIE_PL_PFLR);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+ }
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
+ break;
+ }
+
+ return 0;
+}
- if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
- (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
- val = readl(pp->dbi_base + PCIE_PL_PFLR);
- val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
- val |= PCIE_PL_PFLR_FORCE_LINK;
- writel(val, pp->dbi_base + PCIE_PL_PFLR);
+static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
+{
+ struct pcie_port *pp = &imx6_pcie->pp;
+ int ret = 0;
+
+ switch (imx6_pcie->variant) {
+ case IMX6SX:
+ ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
+ if (ret) {
+ dev_err(pp->dev, "unable to enable pcie_axi clock\n");
+ break;
+ }
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
+ break;
+ case IMX6QP: /* FALLTHROUGH */
+ case IMX6Q:
+ /* power up core phy and enable ref clock */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+ /*
+ * the async reset input need ref clock to sync internally,
+ * when the ref clock comes after reset, internal synced
+ * reset time is too short, cannot meet the requirement.
+ * add one ~10us delay here.
+ */
+ udelay(10);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
+ break;
}
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
-
- return 0;
+ return ret;
}
static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
@@ -292,43 +359,60 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
goto err_pcie;
}
- /* power up core phy and enable ref clock */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
- /*
- * the async reset input need ref clock to sync internally,
- * when the ref clock comes after reset, internal synced
- * reset time is too short, cannot meet the requirement.
- * add one ~10us delay here.
- */
- udelay(10);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
+ ret = imx6_pcie_enable_ref_clk(imx6_pcie);
+ if (ret) {
+ dev_err(pp->dev, "unable to enable pcie ref clock\n");
+ goto err_ref_clk;
+ }
/* allow the clocks to stabilize */
usleep_range(200, 500);
/* Some boards don't have PCIe reset GPIO. */
- if (imx6_pcie->reset_gpio) {
- gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0);
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+ imx6_pcie->gpio_active_high);
msleep(100);
- gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1);
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+ !imx6_pcie->gpio_active_high);
}
+
+ switch (imx6_pcie->variant) {
+ case IMX6SX:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
+ break;
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_SW_RST, 0);
+
+ usleep_range(200, 500);
+ break;
+ case IMX6Q: /* Nothing to do */
+ break;
+ }
+
return 0;
+err_ref_clk:
+ clk_disable_unprepare(imx6_pcie->pcie);
err_pcie:
clk_disable_unprepare(imx6_pcie->pcie_bus);
err_pcie_bus:
clk_disable_unprepare(imx6_pcie->pcie_phy);
err_pcie_phy:
return ret;
-
}
static void imx6_pcie_init_phy(struct pcie_port *pp)
{
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+ if (imx6_pcie->variant == IMX6SX)
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_RX_EQ_MASK,
+ IMX6SX_GPR12_PCIE_RX_EQ_2);
+
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
@@ -417,11 +501,15 @@ static int imx6_pcie_establish_link(struct pcie_port *pp)
goto err_reset_phy;
}
- /* Allow Gen2 mode after the link is up. */
- tmp = readl(pp->dbi_base + PCIE_RC_LCR);
- tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
- tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
- writel(tmp, pp->dbi_base + PCIE_RC_LCR);
+ if (imx6_pcie->link_gen == 2) {
+ /* Allow Gen2 mode after the link is up. */
+ tmp = readl(pp->dbi_base + PCIE_RC_LCR);
+ tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
+ tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
+ writel(tmp, pp->dbi_base + PCIE_RC_LCR);
+ } else {
+ dev_info(pp->dev, "Link: Gen2 disabled\n");
+ }
/*
* Start Directed Speed Change so the best possible speed both link
@@ -445,8 +533,7 @@ static int imx6_pcie_establish_link(struct pcie_port *pp)
}
tmp = readl(pp->dbi_base + PCIE_RC_LCSR);
- dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
-
+ dev_info(pp->dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
return 0;
err_reset_phy:
@@ -523,6 +610,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
{
struct imx6_pcie *imx6_pcie;
struct pcie_port *pp;
+ struct device_node *np = pdev->dev.of_node;
struct resource *dbi_base;
struct device_node *node = pdev->dev.of_node;
int ret;
@@ -534,6 +622,9 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
pp = &imx6_pcie->pp;
pp->dev = &pdev->dev;
+ imx6_pcie->variant =
+ (enum imx6_pcie_variants)of_device_get_match_data(&pdev->dev);
+
/* Added for PCI abort handling */
hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
"imprecise external abort");
@@ -544,8 +635,20 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pp->dbi_base);
/* Fetch GPIOs */
- imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
- GPIOD_OUT_LOW);
+ imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+ imx6_pcie->gpio_active_high = of_property_read_bool(np,
+ "reset-gpio-active-high");
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
+ imx6_pcie->gpio_active_high ?
+ GPIOF_OUT_INIT_HIGH :
+ GPIOF_OUT_INIT_LOW,
+ "PCIe reset");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get reset gpio\n");
+ return ret;
+ }
+ }
/* Fetch clocks */
imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
@@ -569,6 +672,16 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(imx6_pcie->pcie);
}
+ if (imx6_pcie->variant == IMX6SX) {
+ imx6_pcie->pcie_inbound_axi = devm_clk_get(&pdev->dev,
+ "pcie_inbound_axi");
+ if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
+ dev_err(&pdev->dev,
+ "pcie_incbound_axi clock missing or invalid\n");
+ return PTR_ERR(imx6_pcie->pcie_inbound_axi);
+ }
+ }
+
/* Grab GPR config register range */
imx6_pcie->iomuxc_gpr =
syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
@@ -598,6 +711,12 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
&imx6_pcie->tx_swing_low))
imx6_pcie->tx_swing_low = 127;
+ /* Limit link speed */
+ ret = of_property_read_u32(pp->dev->of_node, "fsl,max-link-speed",
+ &imx6_pcie->link_gen);
+ if (ret)
+ imx6_pcie->link_gen = 1;
+
ret = imx6_add_pcie_port(pp, pdev);
if (ret < 0)
return ret;
@@ -615,7 +734,9 @@ static void imx6_pcie_shutdown(struct platform_device *pdev)
}
static const struct of_device_id imx6_pcie_of_match[] = {
- { .compatible = "fsl,imx6q-pcie", },
+ { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, },
+ { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, },
+ { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, },
{},
};
MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
index 6153853ca9c3..41515092eb0d 100644
--- a/drivers/pci/host/pci-keystone-dw.c
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -14,6 +14,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/irqreturn.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_pci.h>
@@ -53,6 +54,21 @@
#define IRQ_STATUS 0x184
#define MSI_IRQ_OFFSET 4
+/* Error IRQ bits */
+#define ERR_AER BIT(5) /* ECRC error */
+#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
+#define ERR_CORR BIT(3) /* Correctable error */
+#define ERR_NONFATAL BIT(2) /* Non-fatal error */
+#define ERR_FATAL BIT(1) /* Fatal error */
+#define ERR_SYS BIT(0) /* System (fatal, non-fatal, or correctable) */
+#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
+ ERR_NONFATAL | ERR_FATAL | ERR_SYS)
+#define ERR_FATAL_IRQ (ERR_FATAL | ERR_AXI)
+#define ERR_IRQ_STATUS_RAW 0x1c0
+#define ERR_IRQ_STATUS 0x1c4
+#define ERR_IRQ_ENABLE_SET 0x1c8
+#define ERR_IRQ_ENABLE_CLR 0x1cc
+
/* Config space registers */
#define DEBUG0 0x728
@@ -243,6 +259,28 @@ void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
writel(offset, ks_pcie->va_app_base + IRQ_EOI);
}
+void ks_dw_pcie_enable_error_irq(void __iomem *reg_base)
+{
+ writel(ERR_IRQ_ALL, reg_base + ERR_IRQ_ENABLE_SET);
+}
+
+irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev,
+ void __iomem *reg_base)
+{
+ u32 status;
+
+ status = readl(reg_base + ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & ERR_FATAL_IRQ)
+ dev_err(dev, "fatal error (status %#010x)\n", status);
+
+ /* Ack the IRQ; status bits are RW1C */
+ writel(status, reg_base + ERR_IRQ_STATUS);
+ return IRQ_HANDLED;
+}
+
static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
{
}
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index b71f55bb0315..6b8301ef21ca 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -15,6 +15,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/msi.h>
@@ -159,7 +160,7 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
char *controller, int *num_irqs)
{
- int temp, max_host_irqs, legacy = 1, *host_irqs, ret = -EINVAL;
+ int temp, max_host_irqs, legacy = 1, *host_irqs;
struct device *dev = ks_pcie->pp.dev;
struct device_node *np_pcie = dev->of_node, **np_temp;
@@ -180,11 +181,15 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
*np_temp = of_find_node_by_name(np_pcie, controller);
if (!(*np_temp)) {
dev_err(dev, "Node for %s is absent\n", controller);
- goto out;
+ return -EINVAL;
}
+
temp = of_irq_count(*np_temp);
- if (!temp)
- goto out;
+ if (!temp) {
+ dev_err(dev, "No IRQ entries in %s\n", controller);
+ return -EINVAL;
+ }
+
if (temp > max_host_irqs)
dev_warn(dev, "Too many %s interrupts defined %u\n",
(legacy ? "legacy" : "MSI"), temp);
@@ -198,12 +203,13 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
if (!host_irqs[temp])
break;
}
+
if (temp) {
*num_irqs = temp;
- ret = 0;
+ return 0;
}
-out:
- return ret;
+
+ return -EINVAL;
}
static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
@@ -226,6 +232,9 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
ks_pcie);
}
}
+
+ if (ks_pcie->error_irq > 0)
+ ks_dw_pcie_enable_error_irq(ks_pcie->va_app_base);
}
/*
@@ -289,6 +298,14 @@ static struct pcie_host_ops keystone_pcie_host_ops = {
.scan_bus = ks_dw_pcie_v3_65_scan_bus,
};
+static irqreturn_t pcie_err_irq_handler(int irq, void *priv)
+{
+ struct keystone_pcie *ks_pcie = priv;
+
+ return ks_dw_pcie_handle_error_irq(ks_pcie->pp.dev,
+ ks_pcie->va_app_base);
+}
+
static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
struct platform_device *pdev)
{
@@ -309,6 +326,22 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
return ret;
}
+ /*
+ * Index 0 is the platform interrupt for error interrupt
+ * from RC. This is optional.
+ */
+ ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0);
+ if (ks_pcie->error_irq <= 0)
+ dev_info(&pdev->dev, "no error IRQ defined\n");
+ else {
+ if (request_irq(ks_pcie->error_irq, pcie_err_irq_handler,
+ IRQF_SHARED, "pcie-error-irq", ks_pcie) < 0) {
+ dev_err(&pdev->dev, "failed to request error IRQ %d\n",
+ ks_pcie->error_irq);
+ return ret;
+ }
+ }
+
pp->root_bus_nr = -1;
pp->ops = &keystone_pcie_host_ops;
ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
@@ -317,7 +350,7 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
return ret;
}
- return ret;
+ return 0;
}
static const struct of_device_id ks_pcie_of_match[] = {
@@ -346,7 +379,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *reg_p;
struct phy *phy;
- int ret = 0;
+ int ret;
ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie),
GFP_KERNEL);
@@ -376,6 +409,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
devm_release_mem_region(dev, res->start, resource_size(res));
pp->dev = dev;
+ ks_pcie->np = dev->of_node;
platform_set_drvdata(pdev, ks_pcie);
ks_pcie->clk = devm_clk_get(dev, "pcie");
if (IS_ERR(ks_pcie->clk)) {
diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h
index f0944e8c4b02..a5b0cb2ba4d7 100644
--- a/drivers/pci/host/pci-keystone.h
+++ b/drivers/pci/host/pci-keystone.h
@@ -29,6 +29,9 @@ struct keystone_pcie {
int msi_host_irqs[MAX_MSI_HOST_IRQS];
struct device_node *msi_intc_np;
struct irq_domain *legacy_irq_domain;
+ struct device_node *np;
+
+ int error_irq;
/* Application register space */
void __iomem *va_app_base;
@@ -42,6 +45,9 @@ phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp);
/* Keystone specific PCI controller APIs */
void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset);
+void ks_dw_pcie_enable_error_irq(void __iomem *reg_base);
+irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev,
+ void __iomem *reg_base);
int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
struct device_node *msi_intc_np);
int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 53b79c5f0559..6b451df6502c 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -1003,6 +1003,7 @@ static void mvebu_pcie_msi_enable(struct mvebu_pcie *pcie)
pcie->msi->dev = &pcie->pdev->dev;
}
+#ifdef CONFIG_PM_SLEEP
static int mvebu_pcie_suspend(struct device *dev)
{
struct mvebu_pcie *pcie;
@@ -1031,6 +1032,7 @@ static int mvebu_pcie_resume(struct device *dev)
return 0;
}
+#endif
static void mvebu_pcie_port_clk_put(void *data)
{
@@ -1298,9 +1300,8 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = {
};
MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table);
-static struct dev_pm_ops mvebu_pcie_pm_ops = {
- .suspend_noirq = mvebu_pcie_suspend,
- .resume_noirq = mvebu_pcie_resume,
+static const struct dev_pm_ops mvebu_pcie_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
};
static struct platform_driver mvebu_pcie_driver = {
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c
index cabb92a514ac..d4a779761472 100644
--- a/drivers/pci/host/pci-thunder-pem.c
+++ b/drivers/pci/host/pci-thunder-pem.c
@@ -153,11 +153,11 @@ static int thunder_pem_config_read(struct pci_bus *bus, unsigned int devfn,
* reserved bits, this makes the code simpler and is OK as the bits
* are not affected by writing zeros to them.
*/
-static u32 thunder_pem_bridge_w1c_bits(int where)
+static u32 thunder_pem_bridge_w1c_bits(u64 where_aligned)
{
u32 w1c_bits = 0;
- switch (where & ~3) {
+ switch (where_aligned) {
case 0x04: /* Command/Status */
case 0x1c: /* Base and I/O Limit/Secondary Status */
w1c_bits = 0xff000000;
@@ -184,12 +184,34 @@ static u32 thunder_pem_bridge_w1c_bits(int where)
return w1c_bits;
}
+/* Some bits must be written to one so they appear to be read-only. */
+static u32 thunder_pem_bridge_w1_bits(u64 where_aligned)
+{
+ u32 w1_bits;
+
+ switch (where_aligned) {
+ case 0x1c: /* I/O Base / I/O Limit, Secondary Status */
+ /* Force 32-bit I/O addressing. */
+ w1_bits = 0x0101;
+ break;
+ case 0x24: /* Prefetchable Memory Base / Prefetchable Memory Limit */
+ /* Force 64-bit addressing */
+ w1_bits = 0x00010001;
+ break;
+ default:
+ w1_bits = 0;
+ break;
+ }
+ return w1_bits;
+}
+
static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
struct gen_pci *pci = bus->sysdata;
struct thunder_pem_pci *pem_pci;
u64 write_val, read_val;
+ u64 where_aligned = where & ~3ull;
u32 mask = 0;
pem_pci = container_of(pci, struct thunder_pem_pci, gen_pci);
@@ -205,8 +227,7 @@ static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn,
*/
switch (size) {
case 1:
- read_val = where & ~3ull;
- writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD);
+ writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD);
read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
read_val >>= 32;
mask = ~(0xff << (8 * (where & 3)));
@@ -215,8 +236,7 @@ static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn,
val |= (u32)read_val;
break;
case 2:
- read_val = where & ~3ull;
- writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD);
+ writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD);
read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
read_val >>= 32;
mask = ~(0xffff << (8 * (where & 3)));
@@ -244,11 +264,17 @@ static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn,
}
/*
+ * Some bits must be read-only with value of one. Since the
+ * access method allows these to be cleared if a zero is
+ * written, force them to one before writing.
+ */
+ val |= thunder_pem_bridge_w1_bits(where_aligned);
+
+ /*
* Low order bits are the config address, the high order 32
* bits are the data to be written.
*/
- write_val = where & ~3ull;
- write_val |= (((u64)val) << 32);
+ write_val = (((u64)val) << 32) | where_aligned;
writeq(write_val, pem_pci->pem_reg_base + PEM_CFG_WR);
return PCIBIOS_SUCCESSFUL;
}
diff --git a/drivers/pci/host/pcie-armada8k.c b/drivers/pci/host/pcie-armada8k.c
new file mode 100644
index 000000000000..55723567b5d4
--- /dev/null
+++ b/drivers/pci/host/pcie-armada8k.c
@@ -0,0 +1,262 @@
+/*
+ * PCIe host controller driver for Marvell Armada-8K SoCs
+ *
+ * Armada-8K PCIe Glue Layer Source Code
+ *
+ * Copyright (C) 2016 Marvell Technology Group Ltd.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+
+#include "pcie-designware.h"
+
+struct armada8k_pcie {
+ void __iomem *base;
+ struct clk *clk;
+ struct pcie_port pp;
+};
+
+#define PCIE_VENDOR_REGS_OFFSET 0x8000
+
+#define PCIE_GLOBAL_CONTROL_REG 0x0
+#define PCIE_APP_LTSSM_EN BIT(2)
+#define PCIE_DEVICE_TYPE_SHIFT 4
+#define PCIE_DEVICE_TYPE_MASK 0xF
+#define PCIE_DEVICE_TYPE_RC 0x4 /* Root complex */
+
+#define PCIE_GLOBAL_STATUS_REG 0x8
+#define PCIE_GLB_STS_RDLH_LINK_UP BIT(1)
+#define PCIE_GLB_STS_PHY_LINK_UP BIT(9)
+
+#define PCIE_GLOBAL_INT_CAUSE1_REG 0x1C
+#define PCIE_GLOBAL_INT_MASK1_REG 0x20
+#define PCIE_INT_A_ASSERT_MASK BIT(9)
+#define PCIE_INT_B_ASSERT_MASK BIT(10)
+#define PCIE_INT_C_ASSERT_MASK BIT(11)
+#define PCIE_INT_D_ASSERT_MASK BIT(12)
+
+#define PCIE_ARCACHE_TRC_REG 0x50
+#define PCIE_AWCACHE_TRC_REG 0x54
+#define PCIE_ARUSER_REG 0x5C
+#define PCIE_AWUSER_REG 0x60
+/*
+ * AR/AW Cache defauls: Normal memory, Write-Back, Read / Write
+ * allocate
+ */
+#define ARCACHE_DEFAULT_VALUE 0x3511
+#define AWCACHE_DEFAULT_VALUE 0x5311
+
+#define DOMAIN_OUTER_SHAREABLE 0x2
+#define AX_USER_DOMAIN_MASK 0x3
+#define AX_USER_DOMAIN_SHIFT 4
+
+#define to_armada8k_pcie(x) container_of(x, struct armada8k_pcie, pp)
+
+static int armada8k_pcie_link_up(struct pcie_port *pp)
+{
+ struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
+ u32 reg;
+ u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP;
+
+ reg = readl(pcie->base + PCIE_GLOBAL_STATUS_REG);
+
+ if ((reg & mask) == mask)
+ return 1;
+
+ dev_dbg(pp->dev, "No link detected (Global-Status: 0x%08x).\n", reg);
+ return 0;
+}
+
+static void armada8k_pcie_establish_link(struct pcie_port *pp)
+{
+ struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
+ void __iomem *base = pcie->base;
+ u32 reg;
+
+ if (!dw_pcie_link_up(pp)) {
+ /* Disable LTSSM state machine to enable configuration */
+ reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
+ reg &= ~(PCIE_APP_LTSSM_EN);
+ writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
+ }
+
+ /* Set the device to root complex mode */
+ reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
+ reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT);
+ reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT;
+ writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
+
+ /* Set the PCIe master AxCache attributes */
+ writel(ARCACHE_DEFAULT_VALUE, base + PCIE_ARCACHE_TRC_REG);
+ writel(AWCACHE_DEFAULT_VALUE, base + PCIE_AWCACHE_TRC_REG);
+
+ /* Set the PCIe master AxDomain attributes */
+ reg = readl(base + PCIE_ARUSER_REG);
+ reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
+ reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
+ writel(reg, base + PCIE_ARUSER_REG);
+
+ reg = readl(base + PCIE_AWUSER_REG);
+ reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
+ reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
+ writel(reg, base + PCIE_AWUSER_REG);
+
+ /* Enable INT A-D interrupts */
+ reg = readl(base + PCIE_GLOBAL_INT_MASK1_REG);
+ reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK |
+ PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK;
+ writel(reg, base + PCIE_GLOBAL_INT_MASK1_REG);
+
+ if (!dw_pcie_link_up(pp)) {
+ /* Configuration done. Start LTSSM */
+ reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
+ reg |= PCIE_APP_LTSSM_EN;
+ writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
+ }
+
+ /* Wait until the link becomes active again */
+ if (dw_pcie_wait_for_link(pp))
+ dev_err(pp->dev, "Link not up after reconfiguration\n");
+}
+
+static void armada8k_pcie_host_init(struct pcie_port *pp)
+{
+ dw_pcie_setup_rc(pp);
+ armada8k_pcie_establish_link(pp);
+}
+
+static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+ struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
+ void __iomem *base = pcie->base;
+ u32 val;
+
+ /*
+ * Interrupts are directly handled by the device driver of the
+ * PCI device. However, they are also latched into the PCIe
+ * controller, so we simply discard them.
+ */
+ val = readl(base + PCIE_GLOBAL_INT_CAUSE1_REG);
+ writel(val, base + PCIE_GLOBAL_INT_CAUSE1_REG);
+
+ return IRQ_HANDLED;
+}
+
+static struct pcie_host_ops armada8k_pcie_host_ops = {
+ .link_up = armada8k_pcie_link_up,
+ .host_init = armada8k_pcie_host_init,
+};
+
+static int armada8k_add_pcie_port(struct pcie_port *pp,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pp->root_bus_nr = -1;
+ pp->ops = &armada8k_pcie_host_ops;
+
+ pp->irq = platform_get_irq(pdev, 0);
+ if (!pp->irq) {
+ dev_err(dev, "failed to get irq for port\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler,
+ IRQF_SHARED, "armada8k-pcie", pp);
+ if (ret) {
+ dev_err(dev, "failed to request irq %d\n", pp->irq);
+ return ret;
+ }
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int armada8k_pcie_probe(struct platform_device *pdev)
+{
+ struct armada8k_pcie *pcie;
+ struct pcie_port *pp;
+ struct device *dev = &pdev->dev;
+ struct resource *base;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->clk))
+ return PTR_ERR(pcie->clk);
+
+ clk_prepare_enable(pcie->clk);
+
+ pp = &pcie->pp;
+ pp->dev = dev;
+ platform_set_drvdata(pdev, pcie);
+
+ /* Get the dw-pcie unit configuration/control registers base. */
+ base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
+ pp->dbi_base = devm_ioremap_resource(dev, base);
+ if (IS_ERR(pp->dbi_base)) {
+ dev_err(dev, "couldn't remap regs base %p\n", base);
+ ret = PTR_ERR(pp->dbi_base);
+ goto fail;
+ }
+
+ pcie->base = pp->dbi_base + PCIE_VENDOR_REGS_OFFSET;
+
+ ret = armada8k_add_pcie_port(pp, pdev);
+ if (ret)
+ goto fail;
+
+ return 0;
+
+fail:
+ if (!IS_ERR(pcie->clk))
+ clk_disable_unprepare(pcie->clk);
+
+ return ret;
+}
+
+static const struct of_device_id armada8k_pcie_of_match[] = {
+ { .compatible = "marvell,armada8k-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, armada8k_pcie_of_match);
+
+static struct platform_driver armada8k_pcie_driver = {
+ .probe = armada8k_pcie_probe,
+ .driver = {
+ .name = "armada8k-pcie",
+ .of_match_table = of_match_ptr(armada8k_pcie_of_match),
+ },
+};
+
+module_platform_driver(armada8k_pcie_driver);
+
+MODULE_DESCRIPTION("Armada 8k PCIe host controller driver");
+MODULE_AUTHOR("Yehuda Yitshak <yehuday@marvell.com>");
+MODULE_AUTHOR("Shadi Ammouri <shadi@marvell.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index a4cccd356304..aafd766546f3 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -434,7 +434,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
struct platform_device *pdev = to_platform_device(pp->dev);
struct pci_bus *bus, *child;
struct resource *cfg_res;
- u32 val;
int i, ret;
LIST_HEAD(res);
struct resource_entry *win;
@@ -544,25 +543,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (pp->ops->host_init)
pp->ops->host_init(pp);
- /*
- * If the platform provides ->rd_other_conf, it means the platform
- * uses its own address translation component rather than ATU, so
- * we should not program the ATU here.
- */
- if (!pp->ops->rd_other_conf)
- dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
- PCIE_ATU_TYPE_MEM, pp->mem_base,
- pp->mem_bus_addr, pp->mem_size);
-
- dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
-
- /* program correct class for RC */
- dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
-
- dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
- val |= PORT_LOGIC_SPEED_CHANGE;
- dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
-
pp->root_bus_nr = pp->busn->start;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
bus = pci_scan_root_bus_msi(pp->dev, pp->root_bus_nr,
@@ -728,8 +708,6 @@ static struct pci_ops dw_pcie_ops = {
void dw_pcie_setup_rc(struct pcie_port *pp)
{
u32 val;
- u32 membase;
- u32 memlimit;
/* set the number of lanes */
dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val);
@@ -788,18 +766,31 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
val |= 0x00010100;
dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
- /* setup memory base, memory limit */
- membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
- memlimit = (pp->mem_size + (u32)pp->mem_base) & 0xfff00000;
- val = memlimit | membase;
- dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE);
-
/* setup command register */
dw_pcie_readl_rc(pp, PCI_COMMAND, &val);
val &= 0xffff0000;
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_writel_rc(pp, val, PCI_COMMAND);
+
+ /*
+ * If the platform provides ->rd_other_conf, it means the platform
+ * uses its own address translation component rather than ATU, so
+ * we should not program the ATU here.
+ */
+ if (!pp->ops->rd_other_conf)
+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
+ PCIE_ATU_TYPE_MEM, pp->mem_base,
+ pp->mem_bus_addr, pp->mem_size);
+
+ dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
+
+ /* program correct class for RC */
+ dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
+
+ dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
}
MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 5139e6443bbd..3479d30e2be8 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -819,7 +819,7 @@ static int nwl_pcie_probe(struct platform_device *pdev)
err = nwl_pcie_bridge_init(pcie);
if (err) {
- dev_err(pcie->dev, "HW Initalization failed\n");
+ dev_err(pcie->dev, "HW Initialization failed\n");
return err;
}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 2f6d3a1c1726..f6221d739f59 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -138,6 +138,8 @@ static union apci_descriptor *ibm_slot_from_id(int id)
char *table;
size = ibm_get_table_from_acpi(&table);
+ if (size < 0)
+ return NULL;
des = (union apci_descriptor *)table;
if (memcmp(des->header.sig, "aPCI", 4) != 0)
goto ibm_slot_done;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index e982010f0ed1..cbb13be1ba28 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1008,6 +1008,9 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
if (i >= PCI_ROM_RESOURCE)
return -ENODEV;
+ if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start))
+ return -EINVAL;
+
if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) {
WARN(1, "process \"%s\" tried to map 0x%08lx bytes at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n",
current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff,
@@ -1024,10 +1027,6 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
pci_resource_to_user(pdev, i, res, &start, &end);
vma->vm_pgoff += start >> PAGE_SHIFT;
mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
-
- if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(start))
- return -EINVAL;
-
return pci_mmap_page_range(pdev, vma, mmap_type, write_combine);
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 25e0327d4429..7542001de9b0 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2389,7 +2389,7 @@ out:
return offset + ent_size;
}
-/* Enhanced Allocation Initalization */
+/* Enhanced Allocation Initialization */
void pci_ea_init(struct pci_dev *dev)
{
int ea;
@@ -2547,7 +2547,7 @@ void pci_request_acs(void)
* pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
* @dev: the PCI device
*/
-static int pci_std_enable_acs(struct pci_dev *dev)
+static void pci_std_enable_acs(struct pci_dev *dev)
{
int pos;
u16 cap;
@@ -2555,7 +2555,7 @@ static int pci_std_enable_acs(struct pci_dev *dev)
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
if (!pos)
- return -ENODEV;
+ return;
pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
@@ -2573,8 +2573,6 @@ static int pci_std_enable_acs(struct pci_dev *dev)
ctrl |= (cap & PCI_ACS_UF);
pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
-
- return 0;
}
/**
@@ -2586,10 +2584,10 @@ void pci_enable_acs(struct pci_dev *dev)
if (!pci_acs_enable)
return;
- if (!pci_std_enable_acs(dev))
+ if (!pci_dev_specific_enable_acs(dev))
return;
- pci_dev_specific_enable_acs(dev);
+ pci_std_enable_acs(dev);
}
static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
@@ -4578,6 +4576,37 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
return 0;
}
+/**
+ * pci_add_dma_alias - Add a DMA devfn alias for a device
+ * @dev: the PCI device for which alias is added
+ * @devfn: alias slot and function
+ *
+ * This helper encodes 8-bit devfn as bit number in dma_alias_mask.
+ * It should be called early, preferably as PCI fixup header quirk.
+ */
+void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
+{
+ if (!dev->dma_alias_mask)
+ dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX),
+ sizeof(long), GFP_KERNEL);
+ if (!dev->dma_alias_mask) {
+ dev_warn(&dev->dev, "Unable to allocate DMA alias mask\n");
+ return;
+ }
+
+ set_bit(devfn, dev->dma_alias_mask);
+ dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n",
+ PCI_SLOT(devfn), PCI_FUNC(devfn));
+}
+
+bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
+{
+ return (dev1->dma_alias_mask &&
+ test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
+ (dev2->dma_alias_mask &&
+ test_bit(dev1->devfn, dev2->dma_alias_mask));
+}
+
bool pci_device_is_present(struct pci_dev *pdev)
{
u32 v;
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 7d82f6d47e68..587aef36030d 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -67,17 +67,14 @@ static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
#endif /* !CONFIG_PCIE_PME */
#ifdef CONFIG_ACPI
-int pcie_port_acpi_setup(struct pci_dev *port, int *mask);
+void pcie_port_acpi_setup(struct pci_dev *port, int *mask);
-static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
+static inline void pcie_port_platform_notify(struct pci_dev *port, int *mask)
{
- return pcie_port_acpi_setup(port, mask);
+ pcie_port_acpi_setup(port, mask);
}
#else /* !CONFIG_ACPI */
-static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
-{
- return 0;
-}
+static inline void pcie_port_platform_notify(struct pci_dev *port, int *mask){}
#endif /* !CONFIG_ACPI */
#endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
index 44296eb729d3..6b8c2f1d0e71 100644
--- a/drivers/pci/pcie/portdrv_acpi.c
+++ b/drivers/pci/pcie/portdrv_acpi.c
@@ -32,22 +32,22 @@
* NOTE: It turns out that we cannot do that for individual port services
* separately, because that would make some systems work incorrectly.
*/
-int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
+void pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
{
struct acpi_pci_root *root;
acpi_handle handle;
u32 flags;
if (acpi_pci_disabled)
- return 0;
+ return;
handle = acpi_find_root_bridge_handle(port);
if (!handle)
- return -EINVAL;
+ return;
root = acpi_pci_find_root(handle);
if (!root)
- return -ENODEV;
+ return;
flags = root->osc_control_set;
@@ -58,6 +58,4 @@ int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
*srv_mask |= PCIE_PORT_SERVICE_PME;
if (flags & OSC_PCI_EXPRESS_AER_CONTROL)
*srv_mask |= PCIE_PORT_SERVICE_AER;
-
- return 0;
}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 2ab0f424a378..050069f32f77 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -256,7 +256,6 @@ static int get_port_device_capability(struct pci_dev *dev)
int services = 0;
u32 reg32;
int cap_mask = 0;
- int err;
if (pcie_ports_disabled)
return 0;
@@ -266,11 +265,8 @@ static int get_port_device_capability(struct pci_dev *dev)
if (pci_aer_available())
cap_mask |= PCIE_PORT_SERVICE_AER;
- if (pcie_ports_auto) {
- err = pcie_port_platform_notify(dev, &cap_mask);
- if (err)
- return 0;
- }
+ if (pcie_ports_auto)
+ pcie_port_platform_notify(dev, &cap_mask);
/* Hot-Plug Capable */
if ((cap_mask & PCIE_PORT_SERVICE_HP) &&
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 8004f67c57ec..ae7daeb83e21 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1537,6 +1537,7 @@ static void pci_release_dev(struct device *dev)
pcibios_release_device(pci_dev);
pci_bus_put(pci_dev->bus);
kfree(pci_dev->driver_override);
+ kfree(pci_dev->dma_alias_mask);
kfree(pci_dev);
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 8e678027b900..ee72ebe18f4b 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3150,6 +3150,39 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
quirk_broken_intx_masking);
+/*
+ * Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking,
+ * DisINTx can be set but the interrupt status bit is non-functional.
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1572,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1574,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1580,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1581,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1583,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1584,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1585,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1586,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1587,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1588,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1589,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d0,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d1,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d2,
+ quirk_broken_intx_masking);
+
static void quirk_no_bus_reset(struct pci_dev *dev)
{
dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
@@ -3185,6 +3218,29 @@ static void quirk_no_pm_reset(struct pci_dev *dev)
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
+/*
+ * Thunderbolt controllers with broken MSI hotplug signaling:
+ * Entire 1st generation (Light Ridge, Eagle Ridge, Light Peak) and part
+ * of the 2nd generation (Cactus Ridge 4C up to revision 1, Port Ridge).
+ */
+static void quirk_thunderbolt_hotplug_msi(struct pci_dev *pdev)
+{
+ if (pdev->is_hotplug_bridge &&
+ (pdev->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C ||
+ pdev->revision <= 1))
+ pdev->no_msi = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
+ quirk_thunderbolt_hotplug_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EAGLE_RIDGE,
+ quirk_thunderbolt_hotplug_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_PEAK,
+ quirk_thunderbolt_hotplug_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
+ quirk_thunderbolt_hotplug_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
+ quirk_thunderbolt_hotplug_msi);
+
#ifdef CONFIG_ACPI
/*
* Apple: Shutdown Cactus Ridge Thunderbolt controller.
@@ -3232,7 +3288,8 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
acpi_execute_simple_method(SXIO, NULL, 0);
acpi_execute_simple_method(SXLV, NULL, 0);
}
-DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL, 0x1547,
+DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
quirk_apple_poweroff_thunderbolt);
/*
@@ -3266,9 +3323,11 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
if (!nhi)
goto out;
if (nhi->vendor != PCI_VENDOR_ID_INTEL
- || (nhi->device != 0x1547 && nhi->device != 0x156c)
- || nhi->subsystem_vendor != 0x2222
- || nhi->subsystem_device != 0x1111)
+ || (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
+ nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
+ nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
+ || nhi->subsystem_vendor != 0x2222
+ || nhi->subsystem_device != 0x1111)
goto out;
dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n");
device_pm_wait_for_dev(&dev->dev, &nhi->dev);
@@ -3276,9 +3335,14 @@ out:
pci_dev_put(nhi);
pci_dev_put(sibling);
}
-DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, 0x1547,
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
quirk_apple_wait_for_thunderbolt);
-DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, 0x156d,
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
+ quirk_apple_wait_for_thunderbolt);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE,
quirk_apple_wait_for_thunderbolt);
#endif
@@ -3610,10 +3674,8 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe)
static void quirk_dma_func0_alias(struct pci_dev *dev)
{
- if (PCI_FUNC(dev->devfn) != 0) {
- dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
- dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
- }
+ if (PCI_FUNC(dev->devfn) != 0)
+ pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
}
/*
@@ -3626,10 +3688,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
static void quirk_dma_func1_alias(struct pci_dev *dev)
{
- if (PCI_FUNC(dev->devfn) != 1) {
- dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 1);
- dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
- }
+ if (PCI_FUNC(dev->devfn) != 1)
+ pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1));
}
/*
@@ -3695,13 +3755,8 @@ static void quirk_fixed_dma_alias(struct pci_dev *dev)
const struct pci_device_id *id;
id = pci_match_id(fixed_dma_alias_tbl, dev);
- if (id) {
- dev->dma_alias_devfn = id->driver_data;
- dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
- dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n",
- PCI_SLOT(dev->dma_alias_devfn),
- PCI_FUNC(dev->dma_alias_devfn));
- }
+ if (id)
+ pci_add_dma_alias(dev, id->driver_data);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
@@ -3734,6 +3789,21 @@ DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
/*
+ * MIC x200 NTB forwards PCIe traffic using multiple alien RIDs. They have to
+ * be added as aliases to the DMA device in order to allow buffer access
+ * when IOMMU is enabled. Following devfns have to match RIT-LUT table
+ * programmed in the EEPROM.
+ */
+static void quirk_mic_x200_dma_alias(struct pci_dev *pdev)
+{
+ pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0));
+ pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0));
+ pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3));
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
+
+/*
* Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero)
* class code. Fix it.
*/
@@ -3936,6 +4006,55 @@ static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
return acs_flags & ~flags ? 0 : 1;
}
+/*
+ * Sunrise Point PCH root ports implement ACS, but unfortunately as shown in
+ * the datasheet (Intel 100 Series Chipset Family PCH Datasheet, Vol. 2,
+ * 12.1.46, 12.1.47)[1] this chipset uses dwords for the ACS capability and
+ * control registers whereas the PCIe spec packs them into words (Rev 3.0,
+ * 7.16 ACS Extended Capability). The bit definitions are correct, but the
+ * control register is at offset 8 instead of 6 and we should probably use
+ * dword accesses to them. This applies to the following PCI Device IDs, as
+ * found in volume 1 of the datasheet[2]:
+ *
+ * 0xa110-0xa11f Sunrise Point-H PCI Express Root Port #{0-16}
+ * 0xa167-0xa16a Sunrise Point-H PCI Express Root Port #{17-20}
+ *
+ * N.B. This doesn't fix what lspci shows.
+ *
+ * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
+ * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
+ */
+static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
+{
+ return pci_is_pcie(dev) &&
+ pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT &&
+ ((dev->device & ~0xf) == 0xa110 ||
+ (dev->device >= 0xa167 && dev->device <= 0xa16a));
+}
+
+#define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4)
+
+static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ int pos;
+ u32 cap, ctrl;
+
+ if (!pci_quirk_intel_spt_pch_acs_match(dev))
+ return -ENOTTY;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
+ if (!pos)
+ return -ENOTTY;
+
+ /* see pci_acs_flags_enabled() */
+ pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
+ acs_flags &= (cap | PCI_ACS_EC);
+
+ pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
+
+ return acs_flags & ~ctrl ? 0 : 1;
+}
+
static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
{
/*
@@ -4024,6 +4143,7 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
/* Intel PCH root ports */
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs },
{ 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */
{ 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
/* Cavium ThunderX */
@@ -4159,16 +4279,44 @@ static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
return 0;
}
+static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
+{
+ int pos;
+ u32 cap, ctrl;
+
+ if (!pci_quirk_intel_spt_pch_acs_match(dev))
+ return -ENOTTY;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
+ if (!pos)
+ return -ENOTTY;
+
+ pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
+ pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
+
+ ctrl |= (cap & PCI_ACS_SV);
+ ctrl |= (cap & PCI_ACS_RR);
+ ctrl |= (cap & PCI_ACS_CR);
+ ctrl |= (cap & PCI_ACS_UF);
+
+ pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
+
+ dev_info(&dev->dev, "Intel SPT PCH root port ACS workaround enabled\n");
+
+ return 0;
+}
+
static const struct pci_dev_enable_acs {
u16 vendor;
u16 device;
int (*enable_acs)(struct pci_dev *dev);
} pci_dev_enable_acs[] = {
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_pch_acs },
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_spt_pch_acs },
{ 0 }
};
-void pci_dev_specific_enable_acs(struct pci_dev *dev)
+int pci_dev_specific_enable_acs(struct pci_dev *dev)
{
const struct pci_dev_enable_acs *i;
int ret;
@@ -4180,9 +4328,11 @@ void pci_dev_specific_enable_acs(struct pci_dev *dev)
i->device == (u16)PCI_ANY_ID)) {
ret = i->enable_acs(dev);
if (ret >= 0)
- return;
+ return ret;
}
}
+
+ return -ENOTTY;
}
/*
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index a20ce7d5e2a7..33e0f033a48e 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -40,11 +40,15 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
* If the device is broken and uses an alias requester ID for
* DMA, iterate over that too.
*/
- if (unlikely(pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN)) {
- ret = fn(pdev, PCI_DEVID(pdev->bus->number,
- pdev->dma_alias_devfn), data);
- if (ret)
- return ret;
+ if (unlikely(pdev->dma_alias_mask)) {
+ u8 devfn;
+
+ for_each_set_bit(devfn, pdev->dma_alias_mask, U8_MAX) {
+ ret = fn(pdev, PCI_DEVID(pdev->bus->number, devfn),
+ data);
+ if (ret)
+ return ret;
+ }
}
for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {