summaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/host/Kconfig27
-rw-r--r--drivers/pci/host/Makefile2
-rw-r--r--drivers/pci/host/pci-aardvark.c51
-rw-r--r--drivers/pci/host/pci-dra7xx.c134
-rw-r--r--drivers/pci/host/pci-exynos.c230
-rw-r--r--drivers/pci/host/pci-host-common.c15
-rw-r--r--drivers/pci/host/pci-hyperv.c65
-rw-r--r--drivers/pci/host/pci-imx6.c259
-rw-r--r--drivers/pci/host/pci-keystone-dw.c123
-rw-r--r--drivers/pci/host/pci-keystone.c33
-rw-r--r--drivers/pci/host/pci-keystone.h9
-rw-r--r--drivers/pci/host/pci-layerscape.c65
-rw-r--r--drivers/pci/host/pci-mvebu.c21
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c46
-rw-r--r--drivers/pci/host/pci-tegra.c289
-rw-r--r--drivers/pci/host/pci-versatile.c8
-rw-r--r--drivers/pci/host/pci-xgene.c151
-rw-r--r--drivers/pci/host/pcie-altera-msi.c20
-rw-r--r--drivers/pci/host/pcie-altera.c278
-rw-r--r--drivers/pci/host/pcie-armada8k.c78
-rw-r--r--drivers/pci/host/pcie-artpec6.c117
-rw-r--r--drivers/pci/host/pcie-designware-plat.c28
-rw-r--r--drivers/pci/host/pcie-designware.c229
-rw-r--r--drivers/pci/host/pcie-designware.h15
-rw-r--r--drivers/pci/host/pcie-hisi.c86
-rw-r--r--drivers/pci/host/pcie-iproc-bcma.c14
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c27
-rw-r--r--drivers/pci/host/pcie-iproc.c52
-rw-r--r--drivers/pci/host/pcie-qcom.c58
-rw-r--r--drivers/pci/host/pcie-rcar.c276
-rw-r--r--drivers/pci/host/pcie-rockchip.c1227
-rw-r--r--drivers/pci/host/pcie-spear13xx.c119
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c198
-rw-r--r--drivers/pci/host/pcie-xilinx.c152
-rw-r--r--drivers/pci/host/vmd.c768
-rw-r--r--drivers/pci/hotplug/cpci_hotplug.h2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c10
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c18
-rw-r--r--drivers/pci/hotplug/pciehp.h3
-rw-r--r--drivers/pci/hotplug/pciehp_core.c23
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c84
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c121
-rw-r--r--drivers/pci/hotplug/pnv_php.c283
-rw-r--r--drivers/pci/iov.c5
-rw-r--r--drivers/pci/msi.c172
-rw-r--r--drivers/pci/pci-acpi.c22
-rw-r--r--drivers/pci/pci-driver.c17
-rw-r--r--drivers/pci/pci-mid.c7
-rw-r--r--drivers/pci/pci.c99
-rw-r--r--drivers/pci/pci.h9
-rw-r--r--drivers/pci/pcie/Kconfig11
-rw-r--r--drivers/pci/pcie/Makefile1
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c36
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h9
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c61
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c6
-rw-r--r--drivers/pci/pcie/pcie-dpc.c18
-rw-r--r--drivers/pci/pcie/pme.c16
-rw-r--r--drivers/pci/pcie/portdrv_pci.c17
-rw-r--r--drivers/pci/pcie/ptm.c142
-rw-r--r--drivers/pci/probe.c6
-rw-r--r--drivers/pci/quirks.c29
-rw-r--r--drivers/pci/remove.c3
-rw-r--r--drivers/pci/setup-bus.c8
65 files changed, 4547 insertions, 1963 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 67f9916ff14d..6555eb78d91c 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -25,7 +25,7 @@ config PCI_MSI
If you don't know what to do here, say Y.
config PCI_MSI_IRQ_DOMAIN
- def_bool ARM || ARM64 || X86
+ def_bool ARC || ARM || ARM64 || X86
depends on PCI_MSI
select GENERIC_MSI_IRQ_DOMAIN
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 9b485d873b0d..d7e7c0a827c3 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -274,4 +274,31 @@ config PCIE_ARTPEC6
Say Y here to enable PCIe controller support on Axis ARTPEC-6
SoCs. This PCIe controller uses the DesignWare core.
+config PCIE_ROCKCHIP
+ bool "Rockchip PCIe controller"
+ depends on ARCH_ROCKCHIP
+ depends on OF
+ depends on PCI_MSI_IRQ_DOMAIN
+ select MFD_SYSCON
+ help
+ Say Y here if you want internal PCI support on Rockchip SoC.
+ There is 1 internal PCIe port available to support GEN2 with
+ 4 slots.
+
+config VMD
+ depends on PCI_MSI && X86_64
+ tristate "Intel Volume Management Device Driver"
+ default N
+ ---help---
+ Adds support for the Intel Volume Management Device (VMD). VMD is a
+ secondary PCI host bridge that allows PCI Express root ports,
+ and devices attached to them, to be removed from the default
+ PCI domain and placed within the VMD domain. This provides
+ more bus resources than are otherwise possible with a
+ single domain. If you know your system provides one of these and
+ has devices attached to it, say Y; if you are not sure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vmd.
+
endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 88434101e4c4..084cb4983645 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -31,3 +31,5 @@ obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o
obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
+obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
+obj-$(CONFIG_VMD) += vmd.o
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index ef9893fa3176..4fce494271cc 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -230,20 +230,20 @@ static int advk_pcie_link_up(struct advk_pcie *pcie)
static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
{
+ struct device *dev = &pcie->pdev->dev;
int retries;
/* check if the link is up or not */
for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
if (advk_pcie_link_up(pcie)) {
- dev_info(&pcie->pdev->dev, "link up\n");
+ dev_info(dev, "link up\n");
return 0;
}
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
- dev_err(&pcie->pdev->dev, "link never came up\n");
-
+ dev_err(dev, "link never came up\n");
return -ETIMEDOUT;
}
@@ -376,6 +376,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
{
+ struct device *dev = &pcie->pdev->dev;
u32 reg;
unsigned int status;
char *strcomp_status, *str_posted;
@@ -407,12 +408,13 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
else
str_posted = "Posted";
- dev_err(&pcie->pdev->dev, "%s PIO Response Status: %s, %#x @ %#x\n",
+ dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
}
static int advk_pcie_wait_pio(struct advk_pcie *pcie)
{
+ struct device *dev = &pcie->pdev->dev;
unsigned long timeout;
timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS);
@@ -426,7 +428,7 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
return 0;
}
- dev_err(&pcie->pdev->dev, "config read/write timed out\n");
+ dev_err(dev, "config read/write timed out\n");
return -ETIMEDOUT;
}
@@ -560,10 +562,11 @@ static int advk_pcie_alloc_msi(struct advk_pcie *pcie)
static void advk_pcie_free_msi(struct advk_pcie *pcie, int hwirq)
{
+ struct device *dev = &pcie->pdev->dev;
+
mutex_lock(&pcie->msi_used_lock);
if (!test_bit(hwirq, pcie->msi_irq_in_use))
- dev_err(&pcie->pdev->dev, "trying to free unused MSI#%d\n",
- hwirq);
+ dev_err(dev, "trying to free unused MSI#%d\n", hwirq);
else
clear_bit(hwirq, pcie->msi_irq_in_use);
mutex_unlock(&pcie->msi_used_lock);
@@ -848,7 +851,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
int err, res_valid = 0;
struct device *dev = &pcie->pdev->dev;
struct device_node *np = dev->of_node;
- struct resource_entry *win;
+ struct resource_entry *win, *tmp;
resource_size_t iobase;
INIT_LIST_HEAD(&pcie->resources);
@@ -862,7 +865,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
if (err)
goto out_release_res;
- resource_list_for_each_entry(win, &pcie->resources) {
+ resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
struct resource *res = win->res;
switch (resource_type(res)) {
@@ -874,9 +877,11 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
lower_32_bits(res->start),
OB_PCIE_IO);
err = pci_remap_iospace(res, iobase);
- if (err)
+ if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
+ resource_list_destroy_entry(win);
+ }
break;
case IORESOURCE_MEM:
advk_pcie_set_ob_win(pcie, 0,
@@ -908,6 +913,7 @@ out_release_res:
static int advk_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct advk_pcie *pcie;
struct resource *res;
struct pci_bus *bus, *child;
@@ -915,33 +921,29 @@ static int advk_pcie_probe(struct platform_device *pdev)
struct device_node *msi_node;
int ret, irq;
- pcie = devm_kzalloc(&pdev->dev, sizeof(struct advk_pcie),
- GFP_KERNEL);
+ pcie = devm_kzalloc(dev, sizeof(struct advk_pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->pdev = pdev;
- platform_set_drvdata(pdev, pcie);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pcie->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pcie->base)) {
- dev_err(&pdev->dev, "Failed to map registers\n");
+ pcie->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
- }
irq = platform_get_irq(pdev, 0);
- ret = devm_request_irq(&pdev->dev, irq, advk_pcie_irq_handler,
+ ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
pcie);
if (ret) {
- dev_err(&pdev->dev, "Failed to register interrupt\n");
+ dev_err(dev, "Failed to register interrupt\n");
return ret;
}
ret = advk_pcie_parse_request_of_pci_ranges(pcie);
if (ret) {
- dev_err(&pdev->dev, "Failed to parse resources\n");
+ dev_err(dev, "Failed to parse resources\n");
return ret;
}
@@ -949,24 +951,24 @@ static int advk_pcie_probe(struct platform_device *pdev)
ret = advk_pcie_init_irq_domain(pcie);
if (ret) {
- dev_err(&pdev->dev, "Failed to initialize irq\n");
+ dev_err(dev, "Failed to initialize irq\n");
return ret;
}
ret = advk_pcie_init_msi_irq_domain(pcie);
if (ret) {
- dev_err(&pdev->dev, "Failed to initialize irq\n");
+ dev_err(dev, "Failed to initialize irq\n");
advk_pcie_remove_irq_domain(pcie);
return ret;
}
- msi_node = of_parse_phandle(pdev->dev.of_node, "msi-parent", 0);
+ msi_node = of_parse_phandle(dev->of_node, "msi-parent", 0);
if (msi_node)
msi = of_pci_find_msi_chip_by_node(msi_node);
else
msi = NULL;
- bus = pci_scan_root_bus_msi(&pdev->dev, 0, &advk_pcie_ops,
+ bus = pci_scan_root_bus_msi(dev, 0, &advk_pcie_ops,
pcie, &pcie->resources, &pcie->msi);
if (!bus) {
advk_pcie_remove_msi_irq_domain(pcie);
@@ -980,7 +982,6 @@ static int advk_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
-
return 0;
}
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
index 81b3949a26db..9595fad63f6f 100644
--- a/drivers/pci/host/pci-dra7xx.c
+++ b/drivers/pci/host/pci-dra7xx.c
@@ -15,7 +15,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
@@ -64,11 +64,10 @@
#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
struct dra7xx_pcie {
- void __iomem *base;
- struct phy **phy;
- int phy_count;
- struct device *dev;
struct pcie_port pp;
+ void __iomem *base; /* DT ti_conf */
+ int phy_count; /* DT phy-names count */
+ struct phy **phy;
};
#define to_dra7xx_pcie(x) container_of((x), struct dra7xx_pcie, pp)
@@ -84,17 +83,6 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
writel(value, pcie->base + offset);
}
-static inline u32 dra7xx_pcie_readl_rc(struct pcie_port *pp, u32 offset)
-{
- return readl(pp->dbi_base + offset);
-}
-
-static inline void dra7xx_pcie_writel_rc(struct pcie_port *pp, u32 offset,
- u32 value)
-{
- writel(value, pp->dbi_base + offset);
-}
-
static int dra7xx_pcie_link_up(struct pcie_port *pp)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
@@ -103,13 +91,14 @@ static int dra7xx_pcie_link_up(struct pcie_port *pp)
return !!(reg & LINK_UP);
}
-static int dra7xx_pcie_establish_link(struct pcie_port *pp)
+static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx)
{
- struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+ struct pcie_port *pp = &dra7xx->pp;
+ struct device *dev = pp->dev;
u32 reg;
if (dw_pcie_link_up(pp)) {
- dev_err(pp->dev, "link is already up\n");
+ dev_err(dev, "link is already up\n");
return 0;
}
@@ -120,10 +109,8 @@ static int dra7xx_pcie_establish_link(struct pcie_port *pp)
return dw_pcie_wait_for_link(pp);
}
-static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
+static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
{
- struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
-
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
~INTERRUPTS);
dra7xx_pcie_writel(dra7xx,
@@ -142,6 +129,8 @@ static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
static void dra7xx_pcie_host_init(struct pcie_port *pp)
{
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+
pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR;
pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR;
pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR;
@@ -149,10 +138,10 @@ static void dra7xx_pcie_host_init(struct pcie_port *pp)
dw_pcie_setup_rc(pp);
- dra7xx_pcie_establish_link(pp);
+ dra7xx_pcie_establish_link(dra7xx);
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
- dra7xx_pcie_enable_interrupts(pp);
+ dra7xx_pcie_enable_interrupts(dra7xx);
}
static struct pcie_host_ops dra7xx_pcie_host_ops = {
@@ -196,8 +185,8 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
{
- struct pcie_port *pp = arg;
- struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+ struct dra7xx_pcie *dra7xx = arg;
+ struct pcie_port *pp = &dra7xx->pp;
u32 reg;
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
@@ -223,51 +212,51 @@ static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
{
struct dra7xx_pcie *dra7xx = arg;
+ struct device *dev = dra7xx->pp.dev;
u32 reg;
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
if (reg & ERR_SYS)
- dev_dbg(dra7xx->dev, "System Error\n");
+ dev_dbg(dev, "System Error\n");
if (reg & ERR_FATAL)
- dev_dbg(dra7xx->dev, "Fatal Error\n");
+ dev_dbg(dev, "Fatal Error\n");
if (reg & ERR_NONFATAL)
- dev_dbg(dra7xx->dev, "Non Fatal Error\n");
+ dev_dbg(dev, "Non Fatal Error\n");
if (reg & ERR_COR)
- dev_dbg(dra7xx->dev, "Correctable Error\n");
+ dev_dbg(dev, "Correctable Error\n");
if (reg & ERR_AXI)
- dev_dbg(dra7xx->dev, "AXI tag lookup fatal Error\n");
+ dev_dbg(dev, "AXI tag lookup fatal Error\n");
if (reg & ERR_ECRC)
- dev_dbg(dra7xx->dev, "ECRC Error\n");
+ dev_dbg(dev, "ECRC Error\n");
if (reg & PME_TURN_OFF)
- dev_dbg(dra7xx->dev,
+ dev_dbg(dev,
"Power Management Event Turn-Off message received\n");
if (reg & PME_TO_ACK)
- dev_dbg(dra7xx->dev,
+ dev_dbg(dev,
"Power Management Turn-Off Ack message received\n");
if (reg & PM_PME)
- dev_dbg(dra7xx->dev,
- "PM Power Management Event message received\n");
+ dev_dbg(dev, "PM Power Management Event message received\n");
if (reg & LINK_REQ_RST)
- dev_dbg(dra7xx->dev, "Link Request Reset\n");
+ dev_dbg(dev, "Link Request Reset\n");
if (reg & LINK_UP_EVT)
- dev_dbg(dra7xx->dev, "Link-up state change\n");
+ dev_dbg(dev, "Link-up state change\n");
if (reg & CFG_BME_EVT)
- dev_dbg(dra7xx->dev, "CFG 'Bus Master Enable' change\n");
+ dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
if (reg & CFG_MSE_EVT)
- dev_dbg(dra7xx->dev, "CFG 'Memory Space Enable' change\n");
+ dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
@@ -278,13 +267,9 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
struct platform_device *pdev)
{
int ret;
- struct pcie_port *pp;
+ struct pcie_port *pp = &dra7xx->pp;
+ struct device *dev = pp->dev;
struct resource *res;
- struct device *dev = &pdev->dev;
-
- pp = &dra7xx->pp;
- pp->dev = dev;
- pp->ops = &dra7xx_pcie_host_ops;
pp->irq = platform_get_irq(pdev, 1);
if (pp->irq < 0) {
@@ -292,12 +277,11 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
return -EINVAL;
}
- ret = devm_request_irq(&pdev->dev, pp->irq,
- dra7xx_pcie_msi_irq_handler,
+ ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD,
- "dra7-pcie-msi", pp);
+ "dra7-pcie-msi", dra7xx);
if (ret) {
- dev_err(&pdev->dev, "failed to request irq\n");
+ dev_err(dev, "failed to request irq\n");
return ret;
}
@@ -314,7 +298,7 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
ret = dw_pcie_host_init(pp);
if (ret) {
- dev_err(dra7xx->dev, "failed to initialize host\n");
+ dev_err(dev, "failed to initialize host\n");
return ret;
}
@@ -332,6 +316,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
void __iomem *base;
struct resource *res;
struct dra7xx_pcie *dra7xx;
+ struct pcie_port *pp;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
char name[10];
@@ -343,6 +328,10 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
if (!dra7xx)
return -ENOMEM;
+ pp = &dra7xx->pp;
+ pp->dev = dev;
+ pp->ops = &dra7xx_pcie_host_ops;
+
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "missing IRQ resource\n");
@@ -390,7 +379,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
dra7xx->base = base;
dra7xx->phy = phy;
- dra7xx->dev = dev;
dra7xx->phy_count = phy_count;
pm_runtime_enable(dev);
@@ -407,7 +395,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
ret = devm_gpio_request_one(dev, gpio_sel, gpio_flags,
"pcie_reset");
if (ret) {
- dev_err(&pdev->dev, "gpio%d request failed, ret %d\n",
+ dev_err(dev, "gpio%d request failed, ret %d\n",
gpio_sel, ret);
goto err_gpio;
}
@@ -420,12 +408,11 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
reg &= ~LTSSM_EN;
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
- platform_set_drvdata(pdev, dra7xx);
-
ret = dra7xx_add_pcie_port(dra7xx, pdev);
if (ret < 0)
goto err_gpio;
+ platform_set_drvdata(pdev, dra7xx);
return 0;
err_gpio:
@@ -443,25 +430,6 @@ err_phy:
return ret;
}
-static int __exit dra7xx_pcie_remove(struct platform_device *pdev)
-{
- struct dra7xx_pcie *dra7xx = platform_get_drvdata(pdev);
- struct pcie_port *pp = &dra7xx->pp;
- struct device *dev = &pdev->dev;
- int count = dra7xx->phy_count;
-
- if (pp->irq_domain)
- irq_domain_remove(pp->irq_domain);
- pm_runtime_put(dev);
- pm_runtime_disable(dev);
- while (count--) {
- phy_power_off(dra7xx->phy[count]);
- phy_exit(dra7xx->phy[count]);
- }
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int dra7xx_pcie_suspend(struct device *dev)
{
@@ -470,9 +438,9 @@ static int dra7xx_pcie_suspend(struct device *dev)
u32 val;
/* clear MSE */
- val = dra7xx_pcie_readl_rc(pp, PCI_COMMAND);
+ val = dw_pcie_readl_rc(pp, PCI_COMMAND);
val &= ~PCI_COMMAND_MEMORY;
- dra7xx_pcie_writel_rc(pp, PCI_COMMAND, val);
+ dw_pcie_writel_rc(pp, PCI_COMMAND, val);
return 0;
}
@@ -484,9 +452,9 @@ static int dra7xx_pcie_resume(struct device *dev)
u32 val;
/* set MSE */
- val = dra7xx_pcie_readl_rc(pp, PCI_COMMAND);
+ val = dw_pcie_readl_rc(pp, PCI_COMMAND);
val |= PCI_COMMAND_MEMORY;
- dra7xx_pcie_writel_rc(pp, PCI_COMMAND, val);
+ dw_pcie_writel_rc(pp, PCI_COMMAND, val);
return 0;
}
@@ -545,19 +513,13 @@ static const struct of_device_id of_dra7xx_pcie_match[] = {
{ .compatible = "ti,dra7-pcie", },
{},
};
-MODULE_DEVICE_TABLE(of, of_dra7xx_pcie_match);
static struct platform_driver dra7xx_pcie_driver = {
- .remove = __exit_p(dra7xx_pcie_remove),
.driver = {
.name = "dra7-pcie",
.of_match_table = of_dra7xx_pcie_match,
+ .suppress_bind_attrs = true,
.pm = &dra7xx_pcie_pm_ops,
},
};
-
-module_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
-
-MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
-MODULE_DESCRIPTION("TI PCIe controller driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
index 219976103efc..f1c544bb8b68 100644
--- a/drivers/pci/host/pci-exynos.c
+++ b/drivers/pci/host/pci-exynos.c
@@ -16,7 +16,7 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -29,13 +29,13 @@
#define to_exynos_pcie(x) container_of(x, struct exynos_pcie, pp)
struct exynos_pcie {
- void __iomem *elbi_base;
- void __iomem *phy_base;
- void __iomem *block_base;
+ struct pcie_port pp;
+ void __iomem *elbi_base; /* DT 0th resource */
+ void __iomem *phy_base; /* DT 1st resource */
+ void __iomem *block_base; /* DT 2nd resource */
int reset_gpio;
struct clk *clk;
struct clk *bus_clk;
- struct pcie_port pp;
};
/* PCIe ELBI registers */
@@ -102,40 +102,40 @@ struct exynos_pcie {
#define PCIE_PHY_TRSV3_PD_TSV (0x1 << 7)
#define PCIE_PHY_TRSV3_LVCC 0x31c
-static inline void exynos_elb_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
+static void exynos_elb_writel(struct exynos_pcie *exynos_pcie, u32 val, u32 reg)
{
- writel(val, pcie->elbi_base + reg);
+ writel(val, exynos_pcie->elbi_base + reg);
}
-static inline u32 exynos_elb_readl(struct exynos_pcie *pcie, u32 reg)
+static u32 exynos_elb_readl(struct exynos_pcie *exynos_pcie, u32 reg)
{
- return readl(pcie->elbi_base + reg);
+ return readl(exynos_pcie->elbi_base + reg);
}
-static inline void exynos_phy_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
+static void exynos_phy_writel(struct exynos_pcie *exynos_pcie, u32 val, u32 reg)
{
- writel(val, pcie->phy_base + reg);
+ writel(val, exynos_pcie->phy_base + reg);
}
-static inline u32 exynos_phy_readl(struct exynos_pcie *pcie, u32 reg)
+static u32 exynos_phy_readl(struct exynos_pcie *exynos_pcie, u32 reg)
{
- return readl(pcie->phy_base + reg);
+ return readl(exynos_pcie->phy_base + reg);
}
-static inline void exynos_blk_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
+static void exynos_blk_writel(struct exynos_pcie *exynos_pcie, u32 val, u32 reg)
{
- writel(val, pcie->block_base + reg);
+ writel(val, exynos_pcie->block_base + reg);
}
-static inline u32 exynos_blk_readl(struct exynos_pcie *pcie, u32 reg)
+static u32 exynos_blk_readl(struct exynos_pcie *exynos_pcie, u32 reg)
{
- return readl(pcie->block_base + reg);
+ return readl(exynos_pcie->block_base + reg);
}
-static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on)
+static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *exynos_pcie,
+ bool on)
{
u32 val;
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
if (on) {
val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_AWMISC);
@@ -148,10 +148,10 @@ static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on)
}
}
-static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on)
+static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *exynos_pcie,
+ bool on)
{
u32 val;
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
if (on) {
val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_ARMISC);
@@ -164,10 +164,9 @@ static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on)
}
}
-static void exynos_pcie_assert_core_reset(struct pcie_port *pp)
+static void exynos_pcie_assert_core_reset(struct exynos_pcie *exynos_pcie)
{
u32 val;
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET);
val &= ~PCIE_CORE_RESET_ENABLE;
@@ -177,10 +176,9 @@ static void exynos_pcie_assert_core_reset(struct pcie_port *pp)
exynos_elb_writel(exynos_pcie, 0, PCIE_NONSTICKY_RESET);
}
-static void exynos_pcie_deassert_core_reset(struct pcie_port *pp)
+static void exynos_pcie_deassert_core_reset(struct exynos_pcie *exynos_pcie)
{
u32 val;
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET);
val |= PCIE_CORE_RESET_ENABLE;
@@ -193,18 +191,14 @@ static void exynos_pcie_deassert_core_reset(struct pcie_port *pp)
exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_MAC_RESET);
}
-static void exynos_pcie_assert_phy_reset(struct pcie_port *pp)
+static void exynos_pcie_assert_phy_reset(struct exynos_pcie *exynos_pcie)
{
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
-
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_MAC_RESET);
exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_GLOBAL_RESET);
}
-static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
+static void exynos_pcie_deassert_phy_reset(struct exynos_pcie *exynos_pcie)
{
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
-
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_GLOBAL_RESET);
exynos_elb_writel(exynos_pcie, 1, PCIE_PWR_RESET);
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET);
@@ -213,10 +207,9 @@ static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET);
}
-static void exynos_pcie_power_on_phy(struct pcie_port *pp)
+static void exynos_pcie_power_on_phy(struct exynos_pcie *exynos_pcie)
{
u32 val;
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
val &= ~PCIE_PHY_COMMON_PD_CMN;
@@ -239,10 +232,9 @@ static void exynos_pcie_power_on_phy(struct pcie_port *pp)
exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
}
-static void exynos_pcie_power_off_phy(struct pcie_port *pp)
+static void exynos_pcie_power_off_phy(struct exynos_pcie *exynos_pcie)
{
u32 val;
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
val |= PCIE_PHY_COMMON_PD_CMN;
@@ -265,10 +257,8 @@ static void exynos_pcie_power_off_phy(struct pcie_port *pp)
exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
}
-static void exynos_pcie_init_phy(struct pcie_port *pp)
+static void exynos_pcie_init_phy(struct exynos_pcie *exynos_pcie)
{
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
-
/* DCC feedback control off */
exynos_phy_writel(exynos_pcie, 0x29, PCIE_PHY_DCC_FEEDBACK);
@@ -305,51 +295,41 @@ static void exynos_pcie_init_phy(struct pcie_port *pp)
exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV3_LVCC);
}
-static void exynos_pcie_assert_reset(struct pcie_port *pp)
+static void exynos_pcie_assert_reset(struct exynos_pcie *exynos_pcie)
{
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+ struct pcie_port *pp = &exynos_pcie->pp;
+ struct device *dev = pp->dev;
if (exynos_pcie->reset_gpio >= 0)
- devm_gpio_request_one(pp->dev, exynos_pcie->reset_gpio,
+ devm_gpio_request_one(dev, exynos_pcie->reset_gpio,
GPIOF_OUT_INIT_HIGH, "RESET");
}
-static int exynos_pcie_establish_link(struct pcie_port *pp)
+static int exynos_pcie_establish_link(struct exynos_pcie *exynos_pcie)
{
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+ struct pcie_port *pp = &exynos_pcie->pp;
+ struct device *dev = pp->dev;
u32 val;
if (dw_pcie_link_up(pp)) {
- dev_err(pp->dev, "Link already up\n");
+ dev_err(dev, "Link already up\n");
return 0;
}
- /* assert reset signals */
- exynos_pcie_assert_core_reset(pp);
- exynos_pcie_assert_phy_reset(pp);
-
- /* de-assert phy reset */
- exynos_pcie_deassert_phy_reset(pp);
-
- /* power on phy */
- exynos_pcie_power_on_phy(pp);
-
- /* initialize phy */
- exynos_pcie_init_phy(pp);
+ exynos_pcie_assert_core_reset(exynos_pcie);
+ exynos_pcie_assert_phy_reset(exynos_pcie);
+ exynos_pcie_deassert_phy_reset(exynos_pcie);
+ exynos_pcie_power_on_phy(exynos_pcie);
+ exynos_pcie_init_phy(exynos_pcie);
/* pulse for common reset */
exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_COMMON_RESET);
udelay(500);
exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET);
- /* de-assert core reset */
- exynos_pcie_deassert_core_reset(pp);
-
- /* setup root complex */
+ exynos_pcie_deassert_core_reset(exynos_pcie);
dw_pcie_setup_rc(pp);
-
- /* assert reset signal */
- exynos_pcie_assert_reset(pp);
+ exynos_pcie_assert_reset(exynos_pcie);
/* assert LTSSM enable */
exynos_elb_writel(exynos_pcie, PCIE_ELBI_LTSSM_ENABLE,
@@ -361,27 +341,23 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
while (exynos_phy_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED) == 0) {
val = exynos_blk_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED);
- dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
+ dev_info(dev, "PLL Locked: 0x%x\n", val);
}
- /* power off phy */
- exynos_pcie_power_off_phy(pp);
-
+ exynos_pcie_power_off_phy(exynos_pcie);
return -ETIMEDOUT;
}
-static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp)
+static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *exynos_pcie)
{
u32 val;
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_PULSE);
exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_PULSE);
}
-static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp)
+static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *exynos_pcie)
{
u32 val;
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
/* enable INTX interrupt */
val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
@@ -391,23 +367,24 @@ static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp)
static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
{
- struct pcie_port *pp = arg;
+ struct exynos_pcie *exynos_pcie = arg;
- exynos_pcie_clear_irq_pulse(pp);
+ exynos_pcie_clear_irq_pulse(exynos_pcie);
return IRQ_HANDLED;
}
static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg)
{
- struct pcie_port *pp = arg;
+ struct exynos_pcie *exynos_pcie = arg;
+ struct pcie_port *pp = &exynos_pcie->pp;
return dw_handle_msi_irq(pp);
}
-static void exynos_pcie_msi_init(struct pcie_port *pp)
+static void exynos_pcie_msi_init(struct exynos_pcie *exynos_pcie)
{
+ struct pcie_port *pp = &exynos_pcie->pp;
u32 val;
- struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
dw_pcie_msi_init(pp);
@@ -417,57 +394,64 @@ static void exynos_pcie_msi_init(struct pcie_port *pp)
exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_LEVEL);
}
-static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
+static void exynos_pcie_enable_interrupts(struct exynos_pcie *exynos_pcie)
{
- exynos_pcie_enable_irq_pulse(pp);
+ exynos_pcie_enable_irq_pulse(exynos_pcie);
if (IS_ENABLED(CONFIG_PCI_MSI))
- exynos_pcie_msi_init(pp);
+ exynos_pcie_msi_init(exynos_pcie);
}
-static inline void exynos_pcie_readl_rc(struct pcie_port *pp,
- void __iomem *dbi_base, u32 *val)
+static u32 exynos_pcie_readl_rc(struct pcie_port *pp, u32 reg)
{
- exynos_pcie_sideband_dbi_r_mode(pp, true);
- *val = readl(dbi_base);
- exynos_pcie_sideband_dbi_r_mode(pp, false);
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+ u32 val;
+
+ exynos_pcie_sideband_dbi_r_mode(exynos_pcie, true);
+ val = readl(pp->dbi_base + reg);
+ exynos_pcie_sideband_dbi_r_mode(exynos_pcie, false);
+ return val;
}
-static inline void exynos_pcie_writel_rc(struct pcie_port *pp,
- u32 val, void __iomem *dbi_base)
+static void exynos_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val)
{
- exynos_pcie_sideband_dbi_w_mode(pp, true);
- writel(val, dbi_base);
- exynos_pcie_sideband_dbi_w_mode(pp, false);
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ exynos_pcie_sideband_dbi_w_mode(exynos_pcie, true);
+ writel(val, pp->dbi_base + reg);
+ exynos_pcie_sideband_dbi_w_mode(exynos_pcie, false);
}
static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
u32 *val)
{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
int ret;
- exynos_pcie_sideband_dbi_r_mode(pp, true);
+ exynos_pcie_sideband_dbi_r_mode(exynos_pcie, true);
ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val);
- exynos_pcie_sideband_dbi_r_mode(pp, false);
+ exynos_pcie_sideband_dbi_r_mode(exynos_pcie, false);
return ret;
}
static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
u32 val)
{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
int ret;
- exynos_pcie_sideband_dbi_w_mode(pp, true);
+ exynos_pcie_sideband_dbi_w_mode(exynos_pcie, true);
ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val);
- exynos_pcie_sideband_dbi_w_mode(pp, false);
+ exynos_pcie_sideband_dbi_w_mode(exynos_pcie, false);
return ret;
}
static int exynos_pcie_link_up(struct pcie_port *pp)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
- u32 val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_RDLH_LINKUP);
+ u32 val;
+ val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_RDLH_LINKUP);
if (val == PCIE_ELBI_LTSSM_ENABLE)
return 1;
@@ -476,8 +460,10 @@ static int exynos_pcie_link_up(struct pcie_port *pp)
static void exynos_pcie_host_init(struct pcie_port *pp)
{
- exynos_pcie_establish_link(pp);
- exynos_pcie_enable_interrupts(pp);
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ exynos_pcie_establish_link(exynos_pcie);
+ exynos_pcie_enable_interrupts(exynos_pcie);
}
static struct pcie_host_ops exynos_pcie_host_ops = {
@@ -489,36 +475,38 @@ static struct pcie_host_ops exynos_pcie_host_ops = {
.host_init = exynos_pcie_host_init,
};
-static int __init exynos_add_pcie_port(struct pcie_port *pp,
+static int __init exynos_add_pcie_port(struct exynos_pcie *exynos_pcie,
struct platform_device *pdev)
{
+ struct pcie_port *pp = &exynos_pcie->pp;
+ struct device *dev = pp->dev;
int ret;
pp->irq = platform_get_irq(pdev, 1);
if (!pp->irq) {
- dev_err(&pdev->dev, "failed to get irq\n");
+ dev_err(dev, "failed to get irq\n");
return -ENODEV;
}
- ret = devm_request_irq(&pdev->dev, pp->irq, exynos_pcie_irq_handler,
- IRQF_SHARED, "exynos-pcie", pp);
+ ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler,
+ IRQF_SHARED, "exynos-pcie", exynos_pcie);
if (ret) {
- dev_err(&pdev->dev, "failed to request irq\n");
+ dev_err(dev, "failed to request irq\n");
return ret;
}
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq(pdev, 0);
if (!pp->msi_irq) {
- dev_err(&pdev->dev, "failed to get msi irq\n");
+ dev_err(dev, "failed to get msi irq\n");
return -ENODEV;
}
- ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ ret = devm_request_irq(dev, pp->msi_irq,
exynos_pcie_msi_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD,
- "exynos-pcie", pp);
+ "exynos-pcie", exynos_pcie);
if (ret) {
- dev_err(&pdev->dev, "failed to request msi irq\n");
+ dev_err(dev, "failed to request msi irq\n");
return ret;
}
}
@@ -528,7 +516,7 @@ static int __init exynos_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
- dev_err(&pdev->dev, "failed to initialize host\n");
+ dev_err(dev, "failed to initialize host\n");
return ret;
}
@@ -537,37 +525,36 @@ static int __init exynos_add_pcie_port(struct pcie_port *pp,
static int __init exynos_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct exynos_pcie *exynos_pcie;
struct pcie_port *pp;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev->of_node;
struct resource *elbi_base;
struct resource *phy_base;
struct resource *block_base;
int ret;
- exynos_pcie = devm_kzalloc(&pdev->dev, sizeof(*exynos_pcie),
- GFP_KERNEL);
+ exynos_pcie = devm_kzalloc(dev, sizeof(*exynos_pcie), GFP_KERNEL);
if (!exynos_pcie)
return -ENOMEM;
pp = &exynos_pcie->pp;
-
- pp->dev = &pdev->dev;
+ pp->dev = dev;
exynos_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
- exynos_pcie->clk = devm_clk_get(&pdev->dev, "pcie");
+ exynos_pcie->clk = devm_clk_get(dev, "pcie");
if (IS_ERR(exynos_pcie->clk)) {
- dev_err(&pdev->dev, "Failed to get pcie rc clock\n");
+ dev_err(dev, "Failed to get pcie rc clock\n");
return PTR_ERR(exynos_pcie->clk);
}
ret = clk_prepare_enable(exynos_pcie->clk);
if (ret)
return ret;
- exynos_pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
+ exynos_pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
if (IS_ERR(exynos_pcie->bus_clk)) {
- dev_err(&pdev->dev, "Failed to get pcie bus clock\n");
+ dev_err(dev, "Failed to get pcie bus clock\n");
ret = PTR_ERR(exynos_pcie->bus_clk);
goto fail_clk;
}
@@ -576,27 +563,27 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
goto fail_clk;
elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- exynos_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base);
+ exynos_pcie->elbi_base = devm_ioremap_resource(dev, elbi_base);
if (IS_ERR(exynos_pcie->elbi_base)) {
ret = PTR_ERR(exynos_pcie->elbi_base);
goto fail_bus_clk;
}
phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- exynos_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
+ exynos_pcie->phy_base = devm_ioremap_resource(dev, phy_base);
if (IS_ERR(exynos_pcie->phy_base)) {
ret = PTR_ERR(exynos_pcie->phy_base);
goto fail_bus_clk;
}
block_base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- exynos_pcie->block_base = devm_ioremap_resource(&pdev->dev, block_base);
+ exynos_pcie->block_base = devm_ioremap_resource(dev, block_base);
if (IS_ERR(exynos_pcie->block_base)) {
ret = PTR_ERR(exynos_pcie->block_base);
goto fail_bus_clk;
}
- ret = exynos_add_pcie_port(pp, pdev);
+ ret = exynos_add_pcie_port(exynos_pcie, pdev);
if (ret < 0)
goto fail_bus_clk;
@@ -624,7 +611,6 @@ static const struct of_device_id exynos_pcie_of_match[] = {
{ .compatible = "samsung,exynos5440-pcie", },
{},
};
-MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
static struct platform_driver exynos_pcie_driver = {
.remove = __exit_p(exynos_pcie_remove),
@@ -641,7 +627,3 @@ static int __init exynos_pcie_init(void)
return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
}
subsys_initcall(exynos_pcie_init);
-
-MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
-MODULE_DESCRIPTION("Samsung PCIe host controller driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c
index 9d9d34e959b6..e3c48b5deb93 100644
--- a/drivers/pci/host/pci-host-common.c
+++ b/drivers/pci/host/pci-host-common.c
@@ -1,4 +1,6 @@
/*
+ * Generic PCI host driver common code
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -17,7 +19,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/pci-ecam.h>
@@ -29,7 +30,7 @@ static int gen_pci_parse_request_of_pci_ranges(struct device *dev,
int err, res_valid = 0;
struct device_node *np = dev->of_node;
resource_size_t iobase;
- struct resource_entry *win;
+ struct resource_entry *win, *tmp;
err = of_pci_get_host_bridge_resources(np, 0, 0xff, resources, &iobase);
if (err)
@@ -39,15 +40,17 @@ static int gen_pci_parse_request_of_pci_ranges(struct device *dev,
if (err)
return err;
- resource_list_for_each_entry(win, resources) {
+ resource_list_for_each_entry_safe(win, tmp, resources) {
struct resource *res = win->res;
switch (resource_type(res)) {
case IORESOURCE_IO:
err = pci_remap_iospace(res, iobase);
- if (err)
+ if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
+ resource_list_destroy_entry(win);
+ }
break;
case IORESOURCE_MEM:
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
@@ -162,7 +165,3 @@ int pci_host_common_probe(struct platform_device *pdev,
pci_bus_add_devices(bus);
return 0;
}
-
-MODULE_DESCRIPTION("Generic PCI host driver common code");
-MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 6955ffdb89f3..763ff8745828 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -200,11 +200,11 @@ struct tran_int_desc {
*/
struct pci_message {
- u32 message_type;
+ u32 type;
} __packed;
struct pci_child_message {
- u32 message_type;
+ struct pci_message message_type;
union win_slot_encoding wslot;
} __packed;
@@ -222,7 +222,8 @@ struct pci_packet {
void (*completion_func)(void *context, struct pci_response *resp,
int resp_packet_size);
void *compl_ctxt;
- struct pci_message message;
+
+ struct pci_message message[0];
};
/*
@@ -258,7 +259,7 @@ struct pci_bus_d0_entry {
struct pci_bus_relations {
struct pci_incoming_message incoming;
u32 device_count;
- struct pci_function_description func[1];
+ struct pci_function_description func[0];
} __packed;
struct pci_q_res_req_response {
@@ -314,7 +315,7 @@ struct pci_dev_incoming {
} __packed;
struct pci_eject_response {
- u32 message_type;
+ struct pci_message message_type;
union win_slot_encoding wslot;
u32 status;
} __packed;
@@ -373,7 +374,6 @@ struct hv_pcibus_device {
struct list_head children;
struct list_head dr_list;
- struct work_struct wrk;
struct msi_domain_info msi_info;
struct msi_controller msi_chip;
@@ -393,7 +393,7 @@ struct hv_dr_work {
struct hv_dr_state {
struct list_head list_entry;
u32 device_count;
- struct pci_function_description func[1];
+ struct pci_function_description func[0];
};
enum hv_pcichild_state {
@@ -447,15 +447,16 @@ struct hv_pci_compl {
* for any message for which the completion packet contains a
* status and nothing else.
*/
-static
-void
-hv_pci_generic_compl(void *context, struct pci_response *resp,
- int resp_packet_size)
+static void hv_pci_generic_compl(void *context, struct pci_response *resp,
+ int resp_packet_size)
{
struct hv_pci_compl *comp_pkt = context;
if (resp_packet_size >= offsetofend(struct pci_response, status))
comp_pkt->completion_status = resp->status;
+ else
+ comp_pkt->completion_status = -1;
+
complete(&comp_pkt->host_event);
}
@@ -694,13 +695,12 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
struct pci_delete_interrupt *int_pkt;
struct {
struct pci_packet pkt;
- u8 buffer[sizeof(struct pci_delete_interrupt) -
- sizeof(struct pci_message)];
+ u8 buffer[sizeof(struct pci_delete_interrupt)];
} ctxt;
memset(&ctxt, 0, sizeof(ctxt));
int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
- int_pkt->message_type.message_type =
+ int_pkt->message_type.type =
PCI_DELETE_INTERRUPT_MESSAGE;
int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
int_pkt->int_desc = *int_desc;
@@ -847,8 +847,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct cpumask *affinity;
struct {
struct pci_packet pkt;
- u8 buffer[sizeof(struct pci_create_interrupt) -
- sizeof(struct pci_message)];
+ u8 buffer[sizeof(struct pci_create_interrupt)];
} ctxt;
int cpu;
int ret;
@@ -876,7 +875,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
ctxt.pkt.completion_func = hv_pci_compose_compl;
ctxt.pkt.compl_ctxt = &comp;
int_pkt = (struct pci_create_interrupt *)&ctxt.pkt.message;
- int_pkt->message_type.message_type = PCI_CREATE_INTERRUPT_MESSAGE;
+ int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
int_pkt->int_desc.vector = cfg->vector;
int_pkt->int_desc.vector_count = 1;
@@ -897,8 +896,10 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
sizeof(*int_pkt), (unsigned long)&ctxt.pkt,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (!ret)
- wait_for_completion(&comp.comp_pkt.host_event);
+ if (ret)
+ goto free_int_desc;
+
+ wait_for_completion(&comp.comp_pkt.host_event);
if (comp.comp_pkt.completion_status < 0) {
dev_err(&hbus->hdev->device,
@@ -1289,7 +1290,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
pkt.init_packet.compl_ctxt = &comp_pkt;
pkt.init_packet.completion_func = q_resource_requirements;
res_req = (struct pci_child_message *)&pkt.init_packet.message;
- res_req->message_type = PCI_QUERY_RESOURCE_REQUIREMENTS;
+ res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
res_req->wslot.slot = desc->win_slot.slot;
ret = vmbus_sendpacket(hbus->hdev->channel, res_req,
@@ -1466,8 +1467,7 @@ static void pci_devices_present_work(struct work_struct *work)
if (hpdev->reported_missing) {
found = true;
put_pcichild(hpdev, hv_pcidev_ref_childlist);
- list_del(&hpdev->list_entry);
- list_add_tail(&hpdev->list_entry, &removed);
+ list_move_tail(&hpdev->list_entry, &removed);
break;
}
}
@@ -1558,8 +1558,7 @@ static void hv_eject_device_work(struct work_struct *work)
int wslot;
struct {
struct pci_packet pkt;
- u8 buffer[sizeof(struct pci_eject_response) -
- sizeof(struct pci_message)];
+ u8 buffer[sizeof(struct pci_eject_response)];
} ctxt;
hpdev = container_of(work, struct hv_pci_dev, wrk);
@@ -1585,7 +1584,7 @@ static void hv_eject_device_work(struct work_struct *work)
memset(&ctxt, 0, sizeof(ctxt));
ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
- ejct_pkt->message_type = PCI_EJECTION_COMPLETE;
+ ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
vmbus_sendpacket(hpdev->hbus->hdev->channel, ejct_pkt,
sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
@@ -1688,7 +1687,7 @@ static void hv_pci_onchannelcallback(void *context)
case VM_PKT_DATA_INBAND:
new_message = (struct pci_incoming_message *)buffer;
- switch (new_message->message_type.message_type) {
+ switch (new_message->message_type.type) {
case PCI_BUS_RELATIONS:
bus_rel = (struct pci_bus_relations *)buffer;
@@ -1719,7 +1718,7 @@ static void hv_pci_onchannelcallback(void *context)
default:
dev_warn(&hbus->hdev->device,
"Unimplemented protocol message %x\n",
- new_message->message_type.message_type);
+ new_message->message_type.type);
break;
}
break;
@@ -1772,7 +1771,7 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
version_req = (struct pci_version_request *)&pkt->message;
- version_req->message_type.message_type = PCI_QUERY_PROTOCOL_VERSION;
+ version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
version_req->protocol_version = PCI_PROTOCOL_VERSION_CURRENT;
ret = vmbus_sendpacket(hdev->channel, version_req,
@@ -1973,7 +1972,7 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
- d0_entry->message_type.message_type = PCI_BUS_D0ENTRY;
+ d0_entry->message_type.type = PCI_BUS_D0ENTRY;
d0_entry->mmio_base = hbus->mem_config->start;
ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
@@ -2019,7 +2018,7 @@ static int hv_pci_query_relations(struct hv_device *hdev)
return -ENOTEMPTY;
memset(&message, 0, sizeof(message));
- message.message_type = PCI_QUERY_BUS_RELATIONS;
+ message.type = PCI_QUERY_BUS_RELATIONS;
ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
0, VM_PKT_DATA_INBAND, 0);
@@ -2072,8 +2071,8 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
init_completion(&comp_pkt.host_event);
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
- pkt->message.message_type = PCI_RESOURCES_ASSIGNED;
res_assigned = (struct pci_resources_assigned *)&pkt->message;
+ res_assigned->message_type.type = PCI_RESOURCES_ASSIGNED;
res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
put_pcichild(hpdev, hv_pcidev_ref_by_slot);
@@ -2123,7 +2122,7 @@ static int hv_send_resources_released(struct hv_device *hdev)
continue;
memset(&pkt, 0, sizeof(pkt));
- pkt.message_type = PCI_RESOURCES_RELEASED;
+ pkt.message_type.type = PCI_RESOURCES_RELEASED;
pkt.wslot.slot = hpdev->desc.win_slot.slot;
put_pcichild(hpdev, hv_pcidev_ref_by_slot);
@@ -2290,7 +2289,7 @@ static int hv_pci_remove(struct hv_device *hdev)
init_completion(&comp_pkt.host_event);
pkt.teardown_packet.completion_func = hv_pci_generic_compl;
pkt.teardown_packet.compl_ctxt = &comp_pkt;
- pkt.teardown_packet.message.message_type = PCI_BUS_D0EXIT;
+ pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
ret = vmbus_sendpacket(hdev->channel, &pkt.teardown_packet.message,
sizeof(struct pci_message),
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index b741a36a67f3..c8cefb078218 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -39,16 +39,15 @@ enum imx6_pcie_variants {
};
struct imx6_pcie {
+ struct pcie_port pp; /* pp.dbi_base is DT 0th resource */
int reset_gpio;
bool gpio_active_high;
struct clk *pcie_bus;
struct clk *pcie_phy;
struct clk *pcie_inbound_axi;
struct clk *pcie;
- struct pcie_port pp;
struct regmap *iomuxc_gpr;
enum imx6_pcie_variants variant;
- void __iomem *mem_base;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
u32 tx_deemph_gen2_6db;
@@ -96,14 +95,15 @@ struct imx6_pcie {
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
-static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
+static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val)
{
+ struct pcie_port *pp = &imx6_pcie->pp;
u32 val;
u32 max_iterations = 10;
u32 wait_counter = 0;
do {
- val = readl(dbi_base + PCIE_PHY_STAT);
+ val = dw_pcie_readl_rc(pp, PCIE_PHY_STAT);
val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
wait_counter++;
@@ -116,123 +116,126 @@ static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
return -ETIMEDOUT;
}
-static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
+static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
{
+ struct pcie_port *pp = &imx6_pcie->pp;
u32 val;
int ret;
val = addr << PCIE_PHY_CTRL_DATA_LOC;
- writel(val, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val);
val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
- writel(val, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val);
- ret = pcie_phy_poll_ack(dbi_base, 1);
+ ret = pcie_phy_poll_ack(imx6_pcie, 1);
if (ret)
return ret;
val = addr << PCIE_PHY_CTRL_DATA_LOC;
- writel(val, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, val);
- return pcie_phy_poll_ack(dbi_base, 0);
+ return pcie_phy_poll_ack(imx6_pcie, 0);
}
/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
-static int pcie_phy_read(void __iomem *dbi_base, int addr, int *data)
+static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
{
+ struct pcie_port *pp = &imx6_pcie->pp;
u32 val, phy_ctl;
int ret;
- ret = pcie_phy_wait_ack(dbi_base, addr);
+ ret = pcie_phy_wait_ack(imx6_pcie, addr);
if (ret)
return ret;
/* assert Read signal */
phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
- writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, phy_ctl);
- ret = pcie_phy_poll_ack(dbi_base, 1);
+ ret = pcie_phy_poll_ack(imx6_pcie, 1);
if (ret)
return ret;
- val = readl(dbi_base + PCIE_PHY_STAT);
+ val = dw_pcie_readl_rc(pp, PCIE_PHY_STAT);
*data = val & 0xffff;
/* deassert Read signal */
- writel(0x00, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, 0x00);
- return pcie_phy_poll_ack(dbi_base, 0);
+ return pcie_phy_poll_ack(imx6_pcie, 0);
}
-static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
+static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
{
+ struct pcie_port *pp = &imx6_pcie->pp;
u32 var;
int ret;
/* write addr */
/* cap addr */
- ret = pcie_phy_wait_ack(dbi_base, addr);
+ ret = pcie_phy_wait_ack(imx6_pcie, addr);
if (ret)
return ret;
var = data << PCIE_PHY_CTRL_DATA_LOC;
- writel(var, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
/* capture data */
var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
- writel(var, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
- ret = pcie_phy_poll_ack(dbi_base, 1);
+ ret = pcie_phy_poll_ack(imx6_pcie, 1);
if (ret)
return ret;
/* deassert cap data */
var = data << PCIE_PHY_CTRL_DATA_LOC;
- writel(var, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(dbi_base, 0);
+ ret = pcie_phy_poll_ack(imx6_pcie, 0);
if (ret)
return ret;
/* assert wr signal */
var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
- writel(var, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
/* wait for ack */
- ret = pcie_phy_poll_ack(dbi_base, 1);
+ ret = pcie_phy_poll_ack(imx6_pcie, 1);
if (ret)
return ret;
/* deassert wr signal */
var = data << PCIE_PHY_CTRL_DATA_LOC;
- writel(var, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(dbi_base, 0);
+ ret = pcie_phy_poll_ack(imx6_pcie, 0);
if (ret)
return ret;
- writel(0x0, dbi_base + PCIE_PHY_CTRL);
+ dw_pcie_writel_rc(pp, PCIE_PHY_CTRL, 0x0);
return 0;
}
-static void imx6_pcie_reset_phy(struct pcie_port *pp)
+static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
{
u32 tmp;
- pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
+ pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
+ pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
usleep_range(2000, 3000);
- pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
+ pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
+ pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
}
/* Added for PCI abort handling */
@@ -242,9 +245,9 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
return 0;
}
-static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
+static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
{
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+ struct pcie_port *pp = &imx6_pcie->pp;
u32 val, gpr1, gpr12;
switch (imx6_pcie->variant) {
@@ -281,10 +284,10 @@ static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
(gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
- val = readl(pp->dbi_base + PCIE_PL_PFLR);
+ val = dw_pcie_readl_rc(pp, PCIE_PL_PFLR);
val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
val |= PCIE_PL_PFLR_FORCE_LINK;
- writel(val, pp->dbi_base + PCIE_PL_PFLR);
+ dw_pcie_writel_rc(pp, PCIE_PL_PFLR, val);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
@@ -296,20 +299,19 @@ static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
break;
}
-
- return 0;
}
static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
{
struct pcie_port *pp = &imx6_pcie->pp;
+ struct device *dev = pp->dev;
int ret = 0;
switch (imx6_pcie->variant) {
case IMX6SX:
ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
if (ret) {
- dev_err(pp->dev, "unable to enable pcie_axi clock\n");
+ dev_err(dev, "unable to enable pcie_axi clock\n");
break;
}
@@ -336,32 +338,33 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
return ret;
}
-static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
+static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
{
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+ struct pcie_port *pp = &imx6_pcie->pp;
+ struct device *dev = pp->dev;
int ret;
ret = clk_prepare_enable(imx6_pcie->pcie_phy);
if (ret) {
- dev_err(pp->dev, "unable to enable pcie_phy clock\n");
- goto err_pcie_phy;
+ dev_err(dev, "unable to enable pcie_phy clock\n");
+ return;
}
ret = clk_prepare_enable(imx6_pcie->pcie_bus);
if (ret) {
- dev_err(pp->dev, "unable to enable pcie_bus clock\n");
+ dev_err(dev, "unable to enable pcie_bus clock\n");
goto err_pcie_bus;
}
ret = clk_prepare_enable(imx6_pcie->pcie);
if (ret) {
- dev_err(pp->dev, "unable to enable pcie clock\n");
+ dev_err(dev, "unable to enable pcie clock\n");
goto err_pcie;
}
ret = imx6_pcie_enable_ref_clk(imx6_pcie);
if (ret) {
- dev_err(pp->dev, "unable to enable pcie ref clock\n");
+ dev_err(dev, "unable to enable pcie ref clock\n");
goto err_ref_clk;
}
@@ -392,7 +395,7 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
break;
}
- return 0;
+ return;
err_ref_clk:
clk_disable_unprepare(imx6_pcie->pcie);
@@ -400,14 +403,10 @@ err_pcie:
clk_disable_unprepare(imx6_pcie->pcie_bus);
err_pcie_bus:
clk_disable_unprepare(imx6_pcie->pcie_phy);
-err_pcie_phy:
- return ret;
}
-static void imx6_pcie_init_phy(struct pcie_port *pp)
+static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
{
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
-
if (imx6_pcie->variant == IMX6SX)
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_RX_EQ_MASK,
@@ -439,45 +438,52 @@ static void imx6_pcie_init_phy(struct pcie_port *pp)
imx6_pcie->tx_swing_low << 25);
}
-static int imx6_pcie_wait_for_link(struct pcie_port *pp)
+static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
{
+ struct pcie_port *pp = &imx6_pcie->pp;
+ struct device *dev = pp->dev;
+
/* check if the link is up or not */
if (!dw_pcie_wait_for_link(pp))
return 0;
- dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
- readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
- readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
+ dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
+ dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0),
+ dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1));
return -ETIMEDOUT;
}
-static int imx6_pcie_wait_for_speed_change(struct pcie_port *pp)
+static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
{
+ struct pcie_port *pp = &imx6_pcie->pp;
+ struct device *dev = pp->dev;
u32 tmp;
unsigned int retries;
for (retries = 0; retries < 200; retries++) {
- tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+ tmp = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
/* Test if the speed change finished. */
if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
return 0;
usleep_range(100, 1000);
}
- dev_err(pp->dev, "Speed change timeout\n");
+ dev_err(dev, "Speed change timeout\n");
return -EINVAL;
}
static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
{
- struct pcie_port *pp = arg;
+ struct imx6_pcie *imx6_pcie = arg;
+ struct pcie_port *pp = &imx6_pcie->pp;
return dw_handle_msi_irq(pp);
}
-static int imx6_pcie_establish_link(struct pcie_port *pp)
+static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
{
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+ struct pcie_port *pp = &imx6_pcie->pp;
+ struct device *dev = pp->dev;
u32 tmp;
int ret;
@@ -486,76 +492,73 @@ static int imx6_pcie_establish_link(struct pcie_port *pp)
* started in Gen2 mode, there is a possibility the devices on the
* bus will not be detected at all. This happens with PCIe switches.
*/
- tmp = readl(pp->dbi_base + PCIE_RC_LCR);
+ tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCR);
tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
- writel(tmp, pp->dbi_base + PCIE_RC_LCR);
+ dw_pcie_writel_rc(pp, PCIE_RC_LCR, tmp);
/* Start LTSSM. */
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
- ret = imx6_pcie_wait_for_link(pp);
+ ret = imx6_pcie_wait_for_link(imx6_pcie);
if (ret) {
- dev_info(pp->dev, "Link never came up\n");
+ dev_info(dev, "Link never came up\n");
goto err_reset_phy;
}
if (imx6_pcie->link_gen == 2) {
/* Allow Gen2 mode after the link is up. */
- tmp = readl(pp->dbi_base + PCIE_RC_LCR);
+ tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCR);
tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
- writel(tmp, pp->dbi_base + PCIE_RC_LCR);
+ dw_pcie_writel_rc(pp, PCIE_RC_LCR, tmp);
} else {
- dev_info(pp->dev, "Link: Gen2 disabled\n");
+ dev_info(dev, "Link: Gen2 disabled\n");
}
/*
* Start Directed Speed Change so the best possible speed both link
* partners support can be negotiated.
*/
- tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+ tmp = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
tmp |= PORT_LOGIC_SPEED_CHANGE;
- writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+ dw_pcie_writel_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
- ret = imx6_pcie_wait_for_speed_change(pp);
+ ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
if (ret) {
- dev_err(pp->dev, "Failed to bring link up!\n");
+ dev_err(dev, "Failed to bring link up!\n");
goto err_reset_phy;
}
/* Make sure link training is finished as well! */
- ret = imx6_pcie_wait_for_link(pp);
+ ret = imx6_pcie_wait_for_link(imx6_pcie);
if (ret) {
- dev_err(pp->dev, "Failed to bring link up!\n");
+ dev_err(dev, "Failed to bring link up!\n");
goto err_reset_phy;
}
- tmp = readl(pp->dbi_base + PCIE_RC_LCSR);
- dev_info(pp->dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
+ tmp = dw_pcie_readl_rc(pp, PCIE_RC_LCSR);
+ dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
return 0;
err_reset_phy:
- dev_dbg(pp->dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
- readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
- readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
- imx6_pcie_reset_phy(pp);
-
+ dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
+ dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0),
+ dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1));
+ imx6_pcie_reset_phy(imx6_pcie);
return ret;
}
static void imx6_pcie_host_init(struct pcie_port *pp)
{
- imx6_pcie_assert_core_reset(pp);
-
- imx6_pcie_init_phy(pp);
-
- imx6_pcie_deassert_core_reset(pp);
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+ imx6_pcie_assert_core_reset(imx6_pcie);
+ imx6_pcie_init_phy(imx6_pcie);
+ imx6_pcie_deassert_core_reset(imx6_pcie);
dw_pcie_setup_rc(pp);
-
- imx6_pcie_establish_link(pp);
+ imx6_pcie_establish_link(imx6_pcie);
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
@@ -563,7 +566,7 @@ static void imx6_pcie_host_init(struct pcie_port *pp)
static int imx6_pcie_link_up(struct pcie_port *pp)
{
- return readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) &
+ return dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1) &
PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
}
@@ -572,24 +575,26 @@ static struct pcie_host_ops imx6_pcie_host_ops = {
.host_init = imx6_pcie_host_init,
};
-static int __init imx6_add_pcie_port(struct pcie_port *pp,
- struct platform_device *pdev)
+static int __init imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
+ struct platform_device *pdev)
{
+ struct pcie_port *pp = &imx6_pcie->pp;
+ struct device *dev = pp->dev;
int ret;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
if (pp->msi_irq <= 0) {
- dev_err(&pdev->dev, "failed to get MSI irq\n");
+ dev_err(dev, "failed to get MSI irq\n");
return -ENODEV;
}
- ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ ret = devm_request_irq(dev, pp->msi_irq,
imx6_pcie_msi_handler,
IRQF_SHARED | IRQF_NO_THREAD,
- "mx6-pcie-msi", pp);
+ "mx6-pcie-msi", imx6_pcie);
if (ret) {
- dev_err(&pdev->dev, "failed to request MSI irq\n");
+ dev_err(dev, "failed to request MSI irq\n");
return ret;
}
}
@@ -599,7 +604,7 @@ static int __init imx6_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
- dev_err(&pdev->dev, "failed to initialize host\n");
+ dev_err(dev, "failed to initialize host\n");
return ret;
}
@@ -608,75 +613,72 @@ static int __init imx6_add_pcie_port(struct pcie_port *pp,
static int __init imx6_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct imx6_pcie *imx6_pcie;
struct pcie_port *pp;
- struct device_node *np = pdev->dev.of_node;
struct resource *dbi_base;
- struct device_node *node = pdev->dev.of_node;
+ struct device_node *node = dev->of_node;
int ret;
- imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
+ imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
if (!imx6_pcie)
return -ENOMEM;
pp = &imx6_pcie->pp;
- pp->dev = &pdev->dev;
+ pp->dev = dev;
imx6_pcie->variant =
- (enum imx6_pcie_variants)of_device_get_match_data(&pdev->dev);
+ (enum imx6_pcie_variants)of_device_get_match_data(dev);
/* Added for PCI abort handling */
hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
"imprecise external abort");
dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
+ pp->dbi_base = devm_ioremap_resource(dev, dbi_base);
if (IS_ERR(pp->dbi_base))
return PTR_ERR(pp->dbi_base);
/* Fetch GPIOs */
- imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
- imx6_pcie->gpio_active_high = of_property_read_bool(np,
+ imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
+ imx6_pcie->gpio_active_high = of_property_read_bool(node,
"reset-gpio-active-high");
if (gpio_is_valid(imx6_pcie->reset_gpio)) {
- ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
+ ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
imx6_pcie->gpio_active_high ?
GPIOF_OUT_INIT_HIGH :
GPIOF_OUT_INIT_LOW,
"PCIe reset");
if (ret) {
- dev_err(&pdev->dev, "unable to get reset gpio\n");
+ dev_err(dev, "unable to get reset gpio\n");
return ret;
}
}
/* Fetch clocks */
- imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
+ imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
if (IS_ERR(imx6_pcie->pcie_phy)) {
- dev_err(&pdev->dev,
- "pcie_phy clock source missing or invalid\n");
+ dev_err(dev, "pcie_phy clock source missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie_phy);
}
- imx6_pcie->pcie_bus = devm_clk_get(&pdev->dev, "pcie_bus");
+ imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
if (IS_ERR(imx6_pcie->pcie_bus)) {
- dev_err(&pdev->dev,
- "pcie_bus clock source missing or invalid\n");
+ dev_err(dev, "pcie_bus clock source missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie_bus);
}
- imx6_pcie->pcie = devm_clk_get(&pdev->dev, "pcie");
+ imx6_pcie->pcie = devm_clk_get(dev, "pcie");
if (IS_ERR(imx6_pcie->pcie)) {
- dev_err(&pdev->dev,
- "pcie clock source missing or invalid\n");
+ dev_err(dev, "pcie clock source missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie);
}
if (imx6_pcie->variant == IMX6SX) {
- imx6_pcie->pcie_inbound_axi = devm_clk_get(&pdev->dev,
+ imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
"pcie_inbound_axi");
if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
- dev_err(&pdev->dev,
+ dev_err(dev,
"pcie_incbound_axi clock missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie_inbound_axi);
}
@@ -686,7 +688,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
imx6_pcie->iomuxc_gpr =
syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
- dev_err(&pdev->dev, "unable to find iomuxc registers\n");
+ dev_err(dev, "unable to find iomuxc registers\n");
return PTR_ERR(imx6_pcie->iomuxc_gpr);
}
@@ -712,12 +714,12 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
imx6_pcie->tx_swing_low = 127;
/* Limit link speed */
- ret = of_property_read_u32(pp->dev->of_node, "fsl,max-link-speed",
+ ret = of_property_read_u32(node, "fsl,max-link-speed",
&imx6_pcie->link_gen);
if (ret)
imx6_pcie->link_gen = 1;
- ret = imx6_add_pcie_port(pp, pdev);
+ ret = imx6_add_pcie_port(imx6_pcie, pdev);
if (ret < 0)
return ret;
@@ -730,7 +732,7 @@ static void imx6_pcie_shutdown(struct platform_device *pdev)
struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
/* bring down link, so bootloader gets clean state in case of reboot */
- imx6_pcie_assert_core_reset(&imx6_pcie->pp);
+ imx6_pcie_assert_core_reset(imx6_pcie);
}
static const struct of_device_id imx6_pcie_of_match[] = {
@@ -739,7 +741,6 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, },
{},
};
-MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);
static struct platform_driver imx6_pcie_driver = {
.driver = {
@@ -749,14 +750,8 @@ static struct platform_driver imx6_pcie_driver = {
.shutdown = imx6_pcie_shutdown,
};
-/* Freescale PCIe driver does not allow module unload */
-
static int __init imx6_pcie_init(void)
{
return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
}
-module_init(imx6_pcie_init);
-
-MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
-MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
-MODULE_LICENSE("GPL v2");
+device_initcall(imx6_pcie_init);
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
index 41515092eb0d..9397c4667106 100644
--- a/drivers/pci/host/pci-keystone-dw.c
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -88,13 +88,24 @@ phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
return ks_pcie->app.start + MSI_IRQ;
}
+static u32 ks_dw_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
+{
+ return readl(ks_pcie->va_app_base + offset);
+}
+
+static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val)
+{
+ writel(val, ks_pcie->va_app_base + offset);
+}
+
void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
{
struct pcie_port *pp = &ks_pcie->pp;
+ struct device *dev = pp->dev;
u32 pending, vector;
int src, virq;
- pending = readl(ks_pcie->va_app_base + MSI0_IRQ_STATUS + (offset << 4));
+ pending = ks_dw_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4));
/*
* MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
@@ -104,7 +115,7 @@ void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
if (BIT(src) & pending) {
vector = offset + (src << 3);
virq = irq_linear_revmap(pp->irq_domain, vector);
- dev_dbg(pp->dev, "irq: bit %d, vector %d, virq %d\n",
+ dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n",
src, vector, virq);
generic_handle_irq(virq);
}
@@ -124,9 +135,9 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
- writel(BIT(bit_pos),
- ks_pcie->va_app_base + MSI0_IRQ_STATUS + (reg_offset << 4));
- writel(reg_offset + MSI_IRQ_OFFSET, ks_pcie->va_app_base + IRQ_EOI);
+ ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
+ BIT(bit_pos));
+ ks_dw_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
}
void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
@@ -135,8 +146,8 @@ void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- writel(BIT(bit_pos),
- ks_pcie->va_app_base + MSI0_IRQ_ENABLE_SET + (reg_offset << 4));
+ ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
+ BIT(bit_pos));
}
void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
@@ -145,8 +156,8 @@ void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- writel(BIT(bit_pos),
- ks_pcie->va_app_base + MSI0_IRQ_ENABLE_CLR + (reg_offset << 4));
+ ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
+ BIT(bit_pos));
}
static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
@@ -215,6 +226,7 @@ static const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip)
{
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+ struct device *dev = pp->dev;
int i;
pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
@@ -222,7 +234,7 @@ int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip)
&ks_dw_pcie_msi_domain_ops,
chip);
if (!pp->irq_domain) {
- dev_err(pp->dev, "irq domain init failed\n");
+ dev_err(dev, "irq domain init failed\n");
return -ENXIO;
}
@@ -237,47 +249,47 @@ void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
int i;
for (i = 0; i < MAX_LEGACY_IRQS; i++)
- writel(0x1, ks_pcie->va_app_base + IRQ_ENABLE_SET + (i << 4));
+ ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
}
void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
{
struct pcie_port *pp = &ks_pcie->pp;
+ struct device *dev = pp->dev;
u32 pending;
int virq;
- pending = readl(ks_pcie->va_app_base + IRQ_STATUS + (offset << 4));
+ pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4));
if (BIT(0) & pending) {
virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
- dev_dbg(pp->dev, ": irq: irq_offset %d, virq %d\n", offset,
- virq);
+ dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
generic_handle_irq(virq);
}
/* EOI the INTx interrupt */
- writel(offset, ks_pcie->va_app_base + IRQ_EOI);
+ ks_dw_app_writel(ks_pcie, IRQ_EOI, offset);
}
-void ks_dw_pcie_enable_error_irq(void __iomem *reg_base)
+void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
{
- writel(ERR_IRQ_ALL, reg_base + ERR_IRQ_ENABLE_SET);
+ ks_dw_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
}
-irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev,
- void __iomem *reg_base)
+irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
{
u32 status;
- status = readl(reg_base + ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
+ status = ks_dw_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
if (!status)
return IRQ_NONE;
if (status & ERR_FATAL_IRQ)
- dev_err(dev, "fatal error (status %#010x)\n", status);
+ dev_err(ks_pcie->pp.dev, "fatal error (status %#010x)\n",
+ status);
/* Ack the IRQ; status bits are RW1C */
- writel(status, reg_base + ERR_IRQ_STATUS);
+ ks_dw_app_writel(ks_pcie, ERR_IRQ_STATUS, status);
return IRQ_HANDLED;
}
@@ -322,15 +334,15 @@ static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
* Since modification of dbi_cs2 involves different clock domain, read the
* status back to ensure the transition is complete.
*/
-static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt)
+static void ks_dw_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
{
u32 val;
- writel(DBI_CS2_EN_VAL | readl(reg_virt + CMD_STATUS),
- reg_virt + CMD_STATUS);
+ val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
+ ks_dw_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val);
do {
- val = readl(reg_virt + CMD_STATUS);
+ val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
} while (!(val & DBI_CS2_EN_VAL));
}
@@ -340,15 +352,15 @@ static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt)
* Since modification of dbi_cs2 involves different clock domain, read the
* status back to ensure the transition is complete.
*/
-static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt)
+static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
{
u32 val;
- writel(~DBI_CS2_EN_VAL & readl(reg_virt + CMD_STATUS),
- reg_virt + CMD_STATUS);
+ val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
+ ks_dw_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val);
do {
- val = readl(reg_virt + CMD_STATUS);
+ val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
} while (val & DBI_CS2_EN_VAL);
}
@@ -357,28 +369,29 @@ void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
struct pcie_port *pp = &ks_pcie->pp;
u32 start = pp->mem->start, end = pp->mem->end;
int i, tr_size;
+ u32 val;
/* Disable BARs for inbound access */
- ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
- writel(0, pp->dbi_base + PCI_BASE_ADDRESS_0);
- writel(0, pp->dbi_base + PCI_BASE_ADDRESS_1);
- ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
+ ks_dw_pcie_set_dbi_mode(ks_pcie);
+ dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 0);
+ dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_1, 0);
+ ks_dw_pcie_clear_dbi_mode(ks_pcie);
/* Set outbound translation size per window division */
- writel(CFG_PCIM_WIN_SZ_IDX & 0x7, ks_pcie->va_app_base + OB_SIZE);
+ ks_dw_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7);
tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
/* Using Direct 1:1 mapping of RC <-> PCI memory space */
for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
- writel(start | 1, ks_pcie->va_app_base + OB_OFFSET_INDEX(i));
- writel(0, ks_pcie->va_app_base + OB_OFFSET_HI(i));
+ ks_dw_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1);
+ ks_dw_app_writel(ks_pcie, OB_OFFSET_HI(i), 0);
start += tr_size;
}
/* Enable OB translation */
- writel(OB_XLAT_EN_VAL | readl(ks_pcie->va_app_base + CMD_STATUS),
- ks_pcie->va_app_base + CMD_STATUS);
+ val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
+ ks_dw_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val);
}
/**
@@ -418,7 +431,7 @@ static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
if (bus != 1)
regval |= BIT(24);
- writel(regval, ks_pcie->va_app_base + CFG_SETUP);
+ ks_dw_app_writel(ks_pcie, CFG_SETUP, regval);
return pp->va_cfg0_base;
}
@@ -456,19 +469,19 @@ void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
/* Configure and set up BAR0 */
- ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
+ ks_dw_pcie_set_dbi_mode(ks_pcie);
/* Enable BAR0 */
- writel(1, pp->dbi_base + PCI_BASE_ADDRESS_0);
- writel(SZ_4K - 1, pp->dbi_base + PCI_BASE_ADDRESS_0);
+ dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 1);
+ dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, SZ_4K - 1);
- ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
+ ks_dw_pcie_clear_dbi_mode(ks_pcie);
/*
* For BAR0, just setting bus address for inbound writes (MSI) should
* be sufficient. Use physical address to avoid any conflicts.
*/
- writel(ks_pcie->app.start, pp->dbi_base + PCI_BASE_ADDRESS_0);
+ dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
}
/**
@@ -476,8 +489,9 @@ void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
*/
int ks_dw_pcie_link_up(struct pcie_port *pp)
{
- u32 val = readl(pp->dbi_base + DEBUG0);
+ u32 val;
+ val = dw_pcie_readl_rc(pp, DEBUG0);
return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
}
@@ -486,13 +500,13 @@ void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
u32 val;
/* Disable Link training */
- val = readl(ks_pcie->va_app_base + CMD_STATUS);
+ val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
val &= ~LTSSM_EN_VAL;
- writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
+ ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
/* Initiate Link Training */
- val = readl(ks_pcie->va_app_base + CMD_STATUS);
- writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
+ val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
+ ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
}
/**
@@ -506,12 +520,13 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
struct device_node *msi_intc_np)
{
struct pcie_port *pp = &ks_pcie->pp;
- struct platform_device *pdev = to_platform_device(pp->dev);
+ struct device *dev = pp->dev;
+ struct platform_device *pdev = to_platform_device(dev);
struct resource *res;
/* Index 0 is the config reg. space address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pp->dbi_base = devm_ioremap_resource(pp->dev, res);
+ pp->dbi_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pp->dbi_base))
return PTR_ERR(pp->dbi_base);
@@ -524,7 +539,7 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
/* Index 1 is the application reg. space address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- ks_pcie->va_app_base = devm_ioremap_resource(pp->dev, res);
+ ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
if (IS_ERR(ks_pcie->va_app_base))
return PTR_ERR(ks_pcie->va_app_base);
@@ -537,7 +552,7 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
&ks_dw_pcie_legacy_irq_domain_ops,
NULL);
if (!ks_pcie->legacy_irq_domain) {
- dev_err(pp->dev, "Failed to add irq domain for legacy irqs\n");
+ dev_err(dev, "Failed to add irq domain for legacy irqs\n");
return -EINVAL;
}
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index 8ba28834d470..043c19a05da1 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -89,12 +89,13 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
{
struct pcie_port *pp = &ks_pcie->pp;
+ struct device *dev = pp->dev;
unsigned int retries;
dw_pcie_setup_rc(pp);
if (dw_pcie_link_up(pp)) {
- dev_err(pp->dev, "Link already up\n");
+ dev_err(dev, "Link already up\n");
return 0;
}
@@ -105,7 +106,7 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
return 0;
}
- dev_err(pp->dev, "phy link never came up\n");
+ dev_err(dev, "phy link never came up\n");
return -ETIMEDOUT;
}
@@ -115,9 +116,10 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
u32 offset = irq - ks_pcie->msi_host_irqs[0];
struct pcie_port *pp = &ks_pcie->pp;
+ struct device *dev = pp->dev;
struct irq_chip *chip = irq_desc_get_chip(desc);
- dev_dbg(pp->dev, "%s, irq %d\n", __func__, irq);
+ dev_dbg(dev, "%s, irq %d\n", __func__, irq);
/*
* The chained irq handler installation would have replaced normal
@@ -142,10 +144,11 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
unsigned int irq = irq_desc_get_irq(desc);
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
struct pcie_port *pp = &ks_pcie->pp;
+ struct device *dev = pp->dev;
u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
struct irq_chip *chip = irq_desc_get_chip(desc);
- dev_dbg(pp->dev, ": Handling legacy irq %d\n", irq);
+ dev_dbg(dev, ": Handling legacy irq %d\n", irq);
/*
* The chained irq handler installation would have replaced normal
@@ -234,7 +237,7 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
}
if (ks_pcie->error_irq > 0)
- ks_dw_pcie_enable_error_irq(ks_pcie->va_app_base);
+ ks_dw_pcie_enable_error_irq(ks_pcie);
}
/*
@@ -302,14 +305,14 @@ static irqreturn_t pcie_err_irq_handler(int irq, void *priv)
{
struct keystone_pcie *ks_pcie = priv;
- return ks_dw_pcie_handle_error_irq(ks_pcie->pp.dev,
- ks_pcie->va_app_base);
+ return ks_dw_pcie_handle_error_irq(ks_pcie);
}
static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
struct platform_device *pdev)
{
struct pcie_port *pp = &ks_pcie->pp;
+ struct device *dev = pp->dev;
int ret;
ret = ks_pcie_get_irq_controller_info(ks_pcie,
@@ -332,11 +335,12 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
*/
ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0);
if (ks_pcie->error_irq <= 0)
- dev_info(&pdev->dev, "no error IRQ defined\n");
+ dev_info(dev, "no error IRQ defined\n");
else {
- if (request_irq(ks_pcie->error_irq, pcie_err_irq_handler,
- IRQF_SHARED, "pcie-error-irq", ks_pcie) < 0) {
- dev_err(&pdev->dev, "failed to request error IRQ %d\n",
+ ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler,
+ IRQF_SHARED, "pcie-error-irq", ks_pcie);
+ if (ret < 0) {
+ dev_err(dev, "failed to request error IRQ %d\n",
ks_pcie->error_irq);
return ret;
}
@@ -346,7 +350,7 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
pp->ops = &keystone_pcie_host_ops;
ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
if (ret) {
- dev_err(&pdev->dev, "failed to initialize host\n");
+ dev_err(dev, "failed to initialize host\n");
return ret;
}
@@ -380,12 +384,12 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
struct phy *phy;
int ret;
- ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie),
- GFP_KERNEL);
+ ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
if (!ks_pcie)
return -ENOMEM;
pp = &ks_pcie->pp;
+ pp->dev = dev;
/* initialize SerDes Phy if present */
phy = devm_phy_get(dev, "pcie-phy");
@@ -407,7 +411,6 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
devm_iounmap(dev, reg_p);
devm_release_mem_region(dev, res->start, resource_size(res));
- pp->dev = dev;
ks_pcie->np = dev->of_node;
platform_set_drvdata(pdev, ks_pcie);
ks_pcie->clk = devm_clk_get(dev, "pcie");
diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h
index a5b0cb2ba4d7..bc54bafda068 100644
--- a/drivers/pci/host/pci-keystone.h
+++ b/drivers/pci/host/pci-keystone.h
@@ -17,8 +17,8 @@
#define MAX_LEGACY_HOST_IRQS 4
struct keystone_pcie {
+ struct pcie_port pp; /* pp.dbi_base is DT 0th res */
struct clk *clk;
- struct pcie_port pp;
/* PCI Device ID */
u32 device_id;
int num_legacy_host_irqs;
@@ -34,7 +34,7 @@ struct keystone_pcie {
int error_irq;
/* Application register space */
- void __iomem *va_app_base;
+ void __iomem *va_app_base; /* DT 1st resource */
struct resource app;
};
@@ -45,9 +45,8 @@ phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp);
/* Keystone specific PCI controller APIs */
void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset);
-void ks_dw_pcie_enable_error_irq(void __iomem *reg_base);
-irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev,
- void __iomem *reg_base);
+void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie);
+irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie);
int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
struct device_node *msi_intc_np);
int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index 114ba819277a..2cb7315e26d0 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -45,10 +45,9 @@ struct ls_pcie_drvdata {
};
struct ls_pcie {
- void __iomem *dbi;
+ struct pcie_port pp; /* pp.dbi_base is DT regs */
void __iomem *lut;
struct regmap *scfg;
- struct pcie_port pp;
const struct ls_pcie_drvdata *drvdata;
int index;
};
@@ -59,7 +58,7 @@ static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
{
u32 header_type;
- header_type = ioread8(pcie->dbi + PCI_HEADER_TYPE);
+ header_type = ioread8(pcie->pp.dbi_base + PCI_HEADER_TYPE);
header_type &= 0x7f;
return header_type == PCI_HEADER_TYPE_BRIDGE;
@@ -68,13 +67,13 @@ static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
/* Clear multi-function bit */
static void ls_pcie_clear_multifunction(struct ls_pcie *pcie)
{
- iowrite8(PCI_HEADER_TYPE_BRIDGE, pcie->dbi + PCI_HEADER_TYPE);
+ iowrite8(PCI_HEADER_TYPE_BRIDGE, pcie->pp.dbi_base + PCI_HEADER_TYPE);
}
/* Fix class value */
static void ls_pcie_fix_class(struct ls_pcie *pcie)
{
- iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
+ iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->pp.dbi_base + PCI_CLASS_DEVICE);
}
/* Drop MSG TLP except for Vendor MSG */
@@ -82,9 +81,9 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
{
u32 val;
- val = ioread32(pcie->dbi + PCIE_STRFMR1);
+ val = ioread32(pcie->pp.dbi_base + PCIE_STRFMR1);
val &= 0xDFFFFFFF;
- iowrite32(val, pcie->dbi + PCIE_STRFMR1);
+ iowrite32(val, pcie->pp.dbi_base + PCIE_STRFMR1);
}
static int ls1021_pcie_link_up(struct pcie_port *pp)
@@ -106,18 +105,19 @@ static int ls1021_pcie_link_up(struct pcie_port *pp)
static void ls1021_pcie_host_init(struct pcie_port *pp)
{
+ struct device *dev = pp->dev;
struct ls_pcie *pcie = to_ls_pcie(pp);
u32 index[2];
- pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node,
+ pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node,
"fsl,pcie-scfg");
if (IS_ERR(pcie->scfg)) {
- dev_err(pp->dev, "No syscfg phandle specified\n");
+ dev_err(dev, "No syscfg phandle specified\n");
pcie->scfg = NULL;
return;
}
- if (of_property_read_u32_array(pp->dev->of_node,
+ if (of_property_read_u32_array(dev->of_node,
"fsl,pcie-scfg", index, 2)) {
pcie->scfg = NULL;
return;
@@ -148,18 +148,19 @@ static void ls_pcie_host_init(struct pcie_port *pp)
{
struct ls_pcie *pcie = to_ls_pcie(pp);
- iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN);
+ iowrite32(1, pcie->pp.dbi_base + PCIE_DBI_RO_WR_EN);
ls_pcie_fix_class(pcie);
ls_pcie_clear_multifunction(pcie);
ls_pcie_drop_msg_tlp(pcie);
- iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN);
+ iowrite32(0, pcie->pp.dbi_base + PCIE_DBI_RO_WR_EN);
}
static int ls_pcie_msi_host_init(struct pcie_port *pp,
struct msi_controller *chip)
{
+ struct device *dev = pp->dev;
+ struct device_node *np = dev->of_node;
struct device_node *msi_node;
- struct device_node *np = pp->dev->of_node;
/*
* The MSI domain is set by the generic of_msi_configure(). This
@@ -169,7 +170,7 @@ static int ls_pcie_msi_host_init(struct pcie_port *pp,
*/
msi_node = of_parse_phandle(np, "msi-parent", 0);
if (!msi_node) {
- dev_err(pp->dev, "failed to find msi-parent\n");
+ dev_err(dev, "failed to find msi-parent\n");
return -EINVAL;
}
@@ -212,19 +213,15 @@ static const struct of_device_id ls_pcie_of_match[] = {
{ },
};
-static int __init ls_add_pcie_port(struct pcie_port *pp,
- struct platform_device *pdev)
+static int __init ls_add_pcie_port(struct ls_pcie *pcie)
{
+ struct pcie_port *pp = &pcie->pp;
+ struct device *dev = pp->dev;
int ret;
- struct ls_pcie *pcie = to_ls_pcie(pp);
-
- pp->dev = &pdev->dev;
- pp->dbi_base = pcie->dbi;
- pp->ops = pcie->drvdata->ops;
ret = dw_pcie_host_init(pp);
if (ret) {
- dev_err(pp->dev, "failed to initialize host\n");
+ dev_err(dev, "failed to initialize host\n");
return ret;
}
@@ -233,38 +230,42 @@ static int __init ls_add_pcie_port(struct pcie_port *pp,
static int __init ls_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
const struct of_device_id *match;
struct ls_pcie *pcie;
+ struct pcie_port *pp;
struct resource *dbi_base;
int ret;
- match = of_match_device(ls_pcie_of_match, &pdev->dev);
+ match = of_match_device(ls_pcie_of_match, dev);
if (!match)
return -ENODEV;
- pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
+ pp = &pcie->pp;
+ pp->dev = dev;
+ pp->ops = pcie->drvdata->ops;
+
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
- pcie->dbi = devm_ioremap_resource(&pdev->dev, dbi_base);
- if (IS_ERR(pcie->dbi)) {
- dev_err(&pdev->dev, "missing *regs* space\n");
- return PTR_ERR(pcie->dbi);
+ pcie->pp.dbi_base = devm_ioremap_resource(dev, dbi_base);
+ if (IS_ERR(pcie->pp.dbi_base)) {
+ dev_err(dev, "missing *regs* space\n");
+ return PTR_ERR(pcie->pp.dbi_base);
}
pcie->drvdata = match->data;
- pcie->lut = pcie->dbi + pcie->drvdata->lut_offset;
+ pcie->lut = pcie->pp.dbi_base + pcie->drvdata->lut_offset;
if (!ls_pcie_is_bridge(pcie))
return -ENODEV;
- ret = ls_add_pcie_port(&pcie->pp, pdev);
+ ret = ls_add_pcie_port(pcie);
if (ret < 0)
return ret;
- platform_set_drvdata(pdev, pcie);
-
return 0;
}
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 307f81d6b479..45a89d969700 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -1190,13 +1190,13 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
static int mvebu_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct mvebu_pcie *pcie;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev->of_node;
struct device_node *child;
int num, i, ret;
- pcie = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pcie),
- GFP_KERNEL);
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
@@ -1206,7 +1206,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
/* Get the PCIe memory and I/O aperture */
mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
if (resource_size(&pcie->mem) == 0) {
- dev_err(&pdev->dev, "invalid memory aperture size\n");
+ dev_err(dev, "invalid memory aperture size\n");
return -EINVAL;
}
@@ -1224,20 +1224,18 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
/* Get the bus range */
ret = of_pci_parse_bus_range(np, &pcie->busn);
if (ret) {
- dev_err(&pdev->dev, "failed to parse bus-range property: %d\n",
- ret);
+ dev_err(dev, "failed to parse bus-range property: %d\n", ret);
return ret;
}
- num = of_get_available_child_count(pdev->dev.of_node);
+ num = of_get_available_child_count(np);
- pcie->ports = devm_kcalloc(&pdev->dev, num, sizeof(*pcie->ports),
- GFP_KERNEL);
+ pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL);
if (!pcie->ports)
return -ENOMEM;
i = 0;
- for_each_available_child_of_node(pdev->dev.of_node, child) {
+ for_each_available_child_of_node(np, child) {
struct mvebu_pcie_port *port = &pcie->ports[i];
ret = mvebu_pcie_parse_port(pcie, port, child);
@@ -1266,8 +1264,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
port->base = mvebu_pcie_map_registers(pdev, child, port);
if (IS_ERR(port->base)) {
- dev_err(&pdev->dev, "%s: cannot map registers\n",
- port->name);
+ dev_err(dev, "%s: cannot map registers\n", port->name);
port->base = NULL;
mvebu_pcie_powerdown(port);
continue;
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 597566f96f5e..1eeefa4df64c 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -154,10 +154,11 @@ static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
{
struct rcar_pci_priv *priv = pw;
+ struct device *dev = priv->dev;
u32 status = ioread32(priv->reg + RCAR_PCI_INT_STATUS_REG);
if (status & RCAR_PCI_INT_ALLERRORS) {
- dev_err(priv->dev, "error irq: status %08x\n", status);
+ dev_err(dev, "error irq: status %08x\n", status);
/* clear the error(s) */
iowrite32(status & RCAR_PCI_INT_ALLERRORS,
@@ -170,13 +171,14 @@ static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv)
{
+ struct device *dev = priv->dev;
int ret;
u32 val;
- ret = devm_request_irq(priv->dev, priv->irq, rcar_pci_err_irq,
+ ret = devm_request_irq(dev, priv->irq, rcar_pci_err_irq,
IRQF_SHARED, "error irq", priv);
if (ret) {
- dev_err(priv->dev, "cannot claim IRQ for error handling\n");
+ dev_err(dev, "cannot claim IRQ for error handling\n");
return;
}
@@ -192,15 +194,16 @@ static inline void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) { }
static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
{
struct rcar_pci_priv *priv = sys->private_data;
+ struct device *dev = priv->dev;
void __iomem *reg = priv->reg;
u32 val;
int ret;
- pm_runtime_enable(priv->dev);
- pm_runtime_get_sync(priv->dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
val = ioread32(reg + RCAR_PCI_UNIT_REV_REG);
- dev_info(priv->dev, "PCI: bus%u revision %x\n", sys->busnr, val);
+ dev_info(dev, "PCI: bus%u revision %x\n", sys->busnr, val);
/* Disable Direct Power Down State and assert reset */
val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD;
@@ -275,7 +278,7 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
/* Add PCI resources */
pci_add_resource(&sys->resources, &priv->mem_res);
- ret = devm_request_pci_bus_resources(priv->dev, &sys->resources);
+ ret = devm_request_pci_bus_resources(dev, &sys->resources);
if (ret < 0)
return ret;
@@ -311,6 +314,7 @@ static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
struct device_node *np)
{
+ struct device *dev = pci->dev;
struct of_pci_range range;
struct of_pci_range_parser parser;
int index = 0;
@@ -331,14 +335,14 @@ static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
/* Catch HW limitations */
if (!(range.flags & IORESOURCE_PREFETCH)) {
- dev_err(pci->dev, "window must be prefetchable\n");
+ dev_err(dev, "window must be prefetchable\n");
return -EINVAL;
}
if (pci->window_addr) {
u32 lowaddr = 1 << (ffs(pci->window_addr) - 1);
if (lowaddr < pci->window_size) {
- dev_err(pci->dev, "invalid window size/addr\n");
+ dev_err(dev, "invalid window size/addr\n");
return -EINVAL;
}
}
@@ -350,6 +354,7 @@ static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
static int rcar_pci_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct resource *cfg_res, *mem_res;
struct rcar_pci_priv *priv;
void __iomem *reg;
@@ -357,7 +362,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
void *hw_private[1];
cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, cfg_res);
+ reg = devm_ioremap_resource(dev, cfg_res);
if (IS_ERR(reg))
return PTR_ERR(reg);
@@ -368,8 +373,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
if (mem_res->start & 0xFFFF)
return -EINVAL;
- priv = devm_kzalloc(&pdev->dev,
- sizeof(struct rcar_pci_priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(struct rcar_pci_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -378,10 +382,10 @@ static int rcar_pci_probe(struct platform_device *pdev)
priv->irq = platform_get_irq(pdev, 0);
priv->reg = reg;
- priv->dev = &pdev->dev;
+ priv->dev = dev;
if (priv->irq < 0) {
- dev_err(&pdev->dev, "no valid irq found\n");
+ dev_err(dev, "no valid irq found\n");
return priv->irq;
}
@@ -390,23 +394,23 @@ static int rcar_pci_probe(struct platform_device *pdev)
priv->window_pci = 0x40000000;
priv->window_size = SZ_1G;
- if (pdev->dev.of_node) {
+ if (dev->of_node) {
struct resource busnr;
int ret;
- ret = of_pci_parse_bus_range(pdev->dev.of_node, &busnr);
+ ret = of_pci_parse_bus_range(dev->of_node, &busnr);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to parse bus-range\n");
+ dev_err(dev, "failed to parse bus-range\n");
return ret;
}
priv->busnr = busnr.start;
if (busnr.end != busnr.start)
- dev_warn(&pdev->dev, "only one bus number supported\n");
+ dev_warn(dev, "only one bus number supported\n");
- ret = rcar_pci_parse_map_dma_ranges(priv, pdev->dev.of_node);
+ ret = rcar_pci_parse_map_dma_ranges(priv, dev->of_node);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to parse dma-range\n");
+ dev_err(dev, "failed to parse dma-range\n");
return ret;
}
} else {
@@ -421,7 +425,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
hw.map_irq = rcar_pci_map_irq;
hw.ops = &rcar_pci_ops;
hw.setup = rcar_pci_setup;
- pci_common_init_dev(&pdev->dev, &hw);
+ pci_common_init_dev(dev, &hw);
return 0;
}
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 6de0757b11e4..8dfccf733241 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -240,7 +240,7 @@ struct tegra_msi {
};
/* used to differentiate between Tegra SoC generations */
-struct tegra_pcie_soc_data {
+struct tegra_pcie_soc {
unsigned int num_ports;
unsigned int msi_base_shift;
u32 pads_pll_ctl;
@@ -300,7 +300,7 @@ struct tegra_pcie {
struct regulator_bulk_data *supplies;
unsigned int num_supplies;
- const struct tegra_pcie_soc_data *soc_data;
+ const struct tegra_pcie_soc *soc;
struct dentry *debugfs;
};
@@ -384,6 +384,7 @@ static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
unsigned int busnr)
{
+ struct device *dev = pcie->dev;
pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_XN | L_PTE_MT_DEV_SHARED | L_PTE_SHARED);
phys_addr_t cs = pcie->cs->start;
@@ -413,8 +414,7 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
if (err < 0) {
- dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
- err);
+ dev_err(dev, "ioremap_page_range() failed: %d\n", err);
goto unmap;
}
}
@@ -462,6 +462,7 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
int where)
{
struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
+ struct device *dev = pcie->dev;
void __iomem *addr = NULL;
if (bus->number == 0) {
@@ -482,8 +483,7 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
addr = (void __iomem *)b->area->addr;
if (!addr) {
- dev_err(pcie->dev,
- "failed to map cfg. space for bus %u\n",
+ dev_err(dev, "failed to map cfg. space for bus %u\n",
bus->number);
return NULL;
}
@@ -542,8 +542,8 @@ static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
{
- const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
unsigned long value;
/* enable reference clock */
@@ -562,8 +562,8 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
{
- const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
+ const struct tegra_pcie_soc *soc = port->pcie->soc;
unsigned long value;
/* assert port reset */
@@ -584,12 +584,13 @@ static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
static void tegra_pcie_port_free(struct tegra_pcie_port *port)
{
struct tegra_pcie *pcie = port->pcie;
+ struct device *dev = pcie->dev;
- devm_iounmap(pcie->dev, port->base);
- devm_release_mem_region(pcie->dev, port->regs.start,
+ devm_iounmap(dev, port->base);
+ devm_release_mem_region(dev, port->regs.start,
resource_size(&port->regs));
list_del(&port->list);
- devm_kfree(pcie->dev, port);
+ devm_kfree(dev, port);
}
/* Tegra PCIE root complex wrongly reports device class */
@@ -612,26 +613,30 @@ DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
{
struct tegra_pcie *pcie = sys_to_pcie(sys);
+ struct device *dev = pcie->dev;
int err;
sys->mem_offset = pcie->offset.mem;
sys->io_offset = pcie->offset.io;
- err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->io);
+ err = devm_request_resource(dev, &iomem_resource, &pcie->io);
if (err < 0)
return err;
- pci_add_resource_offset(&sys->resources, &pcie->pio, sys->io_offset);
+ err = pci_remap_iospace(&pcie->pio, pcie->io.start);
+ if (!err)
+ pci_add_resource_offset(&sys->resources, &pcie->pio,
+ sys->io_offset);
+
pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
pci_add_resource_offset(&sys->resources, &pcie->prefetch,
sys->mem_offset);
pci_add_resource(&sys->resources, &pcie->busn);
- err = devm_request_pci_bus_resources(pcie->dev, &sys->resources);
+ err = devm_request_pci_bus_resources(dev, &sys->resources);
if (err < 0)
return err;
- pci_remap_iospace(&pcie->pio, pcie->io.start);
return 1;
}
@@ -669,6 +674,7 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
"Peer2Peer error",
};
struct tegra_pcie *pcie = arg;
+ struct device *dev = pcie->dev;
u32 code, signature;
code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
@@ -686,11 +692,9 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
* happen a lot during enumeration
*/
if (code == AFI_INTR_MASTER_ABORT)
- dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
- signature);
+ dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
else
- dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
- signature);
+ dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
code == AFI_INTR_FPCI_DECODE_ERROR) {
@@ -698,9 +702,9 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
if (code == AFI_INTR_MASTER_ABORT)
- dev_dbg(pcie->dev, " FPCI address: %10llx\n", address);
+ dev_dbg(dev, " FPCI address: %10llx\n", address);
else
- dev_err(pcie->dev, " FPCI address: %10llx\n", address);
+ dev_err(dev, " FPCI address: %10llx\n", address);
}
return IRQ_HANDLED;
@@ -774,7 +778,7 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
{
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ const struct tegra_pcie_soc *soc = pcie->soc;
u32 value;
timeout = jiffies + msecs_to_jiffies(timeout);
@@ -790,7 +794,8 @@ static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
{
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ struct device *dev = pcie->dev;
+ const struct tegra_pcie_soc *soc = pcie->soc;
u32 value;
int err;
@@ -826,7 +831,7 @@ static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
/* wait for the PLL to lock */
err = tegra_pcie_pll_wait(pcie, 500);
if (err < 0) {
- dev_err(pcie->dev, "PLL failed to lock: %d\n", err);
+ dev_err(dev, "PLL failed to lock: %d\n", err);
return err;
}
@@ -845,7 +850,7 @@ static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
{
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ const struct tegra_pcie_soc *soc = pcie->soc;
u32 value;
/* disable TX/RX data */
@@ -856,7 +861,7 @@ static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
/* override IDDQ */
value = pads_readl(pcie, PADS_CTL);
value |= PADS_CTL_IDDQ_1L;
- pads_writel(pcie, PADS_CTL, value);
+ pads_writel(pcie, value, PADS_CTL);
/* reset PLL */
value = pads_readl(pcie, soc->pads_pll_ctl);
@@ -877,8 +882,7 @@ static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
for (i = 0; i < port->lanes; i++) {
err = phy_power_on(port->phys[i]);
if (err < 0) {
- dev_err(dev, "failed to power on PHY#%u: %d\n", i,
- err);
+ dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
return err;
}
}
@@ -906,7 +910,8 @@ static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
{
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ struct device *dev = pcie->dev;
+ const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_pcie_port *port;
int err;
@@ -917,7 +922,7 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
err = tegra_pcie_phy_enable(pcie);
if (err < 0)
- dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
+ dev_err(dev, "failed to power on PHY: %d\n", err);
return err;
}
@@ -925,7 +930,7 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
list_for_each_entry(port, &pcie->ports, list) {
err = tegra_pcie_port_phy_power_on(port);
if (err < 0) {
- dev_err(pcie->dev,
+ dev_err(dev,
"failed to power on PCIe port %u PHY: %d\n",
port->index, err);
return err;
@@ -943,6 +948,7 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
{
+ struct device *dev = pcie->dev;
struct tegra_pcie_port *port;
int err;
@@ -953,8 +959,7 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
err = tegra_pcie_phy_disable(pcie);
if (err < 0)
- dev_err(pcie->dev, "failed to power off PHY: %d\n",
- err);
+ dev_err(dev, "failed to power off PHY: %d\n", err);
return err;
}
@@ -962,7 +967,7 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
list_for_each_entry(port, &pcie->ports, list) {
err = tegra_pcie_port_phy_power_off(port);
if (err < 0) {
- dev_err(pcie->dev,
+ dev_err(dev,
"failed to power off PCIe port %u PHY: %d\n",
port->index, err);
return err;
@@ -974,7 +979,8 @@ static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
{
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ struct device *dev = pcie->dev;
+ const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_pcie_port *port;
unsigned long value;
int err;
@@ -1013,7 +1019,7 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
err = tegra_pcie_phy_power_on(pcie);
if (err < 0) {
- dev_err(pcie->dev, "failed to power on PHY(s): %d\n", err);
+ dev_err(dev, "failed to power on PHY(s): %d\n", err);
return err;
}
@@ -1046,13 +1052,14 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
static void tegra_pcie_power_off(struct tegra_pcie *pcie)
{
+ struct device *dev = pcie->dev;
int err;
/* TODO: disable and unprepare clocks? */
err = tegra_pcie_phy_power_off(pcie);
if (err < 0)
- dev_err(pcie->dev, "failed to power off PHY(s): %d\n", err);
+ dev_err(dev, "failed to power off PHY(s): %d\n", err);
reset_control_assert(pcie->pcie_xrst);
reset_control_assert(pcie->afi_rst);
@@ -1062,12 +1069,13 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie)
err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
if (err < 0)
- dev_warn(pcie->dev, "failed to disable regulators: %d\n", err);
+ dev_warn(dev, "failed to disable regulators: %d\n", err);
}
static int tegra_pcie_power_on(struct tegra_pcie *pcie)
{
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ struct device *dev = pcie->dev;
+ const struct tegra_pcie_soc *soc = pcie->soc;
int err;
reset_control_assert(pcie->pcie_xrst);
@@ -1079,13 +1087,13 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
/* enable regulators */
err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
if (err < 0)
- dev_err(pcie->dev, "failed to enable regulators: %d\n", err);
+ dev_err(dev, "failed to enable regulators: %d\n", err);
err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
pcie->pex_clk,
pcie->pex_rst);
if (err) {
- dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
+ dev_err(dev, "powerup sequence failed: %d\n", err);
return err;
}
@@ -1093,22 +1101,21 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
err = clk_prepare_enable(pcie->afi_clk);
if (err < 0) {
- dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
+ dev_err(dev, "failed to enable AFI clock: %d\n", err);
return err;
}
if (soc->has_cml_clk) {
err = clk_prepare_enable(pcie->cml_clk);
if (err < 0) {
- dev_err(pcie->dev, "failed to enable CML clock: %d\n",
- err);
+ dev_err(dev, "failed to enable CML clock: %d\n", err);
return err;
}
}
err = clk_prepare_enable(pcie->pll_e);
if (err < 0) {
- dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
+ dev_err(dev, "failed to enable PLLE clock: %d\n", err);
return err;
}
@@ -1117,22 +1124,23 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
{
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ struct device *dev = pcie->dev;
+ const struct tegra_pcie_soc *soc = pcie->soc;
- pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
+ pcie->pex_clk = devm_clk_get(dev, "pex");
if (IS_ERR(pcie->pex_clk))
return PTR_ERR(pcie->pex_clk);
- pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
+ pcie->afi_clk = devm_clk_get(dev, "afi");
if (IS_ERR(pcie->afi_clk))
return PTR_ERR(pcie->afi_clk);
- pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
+ pcie->pll_e = devm_clk_get(dev, "pll_e");
if (IS_ERR(pcie->pll_e))
return PTR_ERR(pcie->pll_e);
if (soc->has_cml_clk) {
- pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
+ pcie->cml_clk = devm_clk_get(dev, "cml");
if (IS_ERR(pcie->cml_clk))
return PTR_ERR(pcie->cml_clk);
}
@@ -1142,15 +1150,17 @@ static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
{
- pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
+ struct device *dev = pcie->dev;
+
+ pcie->pex_rst = devm_reset_control_get(dev, "pex");
if (IS_ERR(pcie->pex_rst))
return PTR_ERR(pcie->pex_rst);
- pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
+ pcie->afi_rst = devm_reset_control_get(dev, "afi");
if (IS_ERR(pcie->afi_rst))
return PTR_ERR(pcie->afi_rst);
- pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
+ pcie->pcie_xrst = devm_reset_control_get(dev, "pcie_x");
if (IS_ERR(pcie->pcie_xrst))
return PTR_ERR(pcie->pcie_xrst);
@@ -1159,18 +1169,19 @@ static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
{
+ struct device *dev = pcie->dev;
int err;
- pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
+ pcie->phy = devm_phy_optional_get(dev, "pcie");
if (IS_ERR(pcie->phy)) {
err = PTR_ERR(pcie->phy);
- dev_err(pcie->dev, "failed to get PHY: %d\n", err);
+ dev_err(dev, "failed to get PHY: %d\n", err);
return err;
}
err = phy_init(pcie->phy);
if (err < 0) {
- dev_err(pcie->dev, "failed to initialize PHY: %d\n", err);
+ dev_err(dev, "failed to initialize PHY: %d\n", err);
return err;
}
@@ -1234,7 +1245,7 @@ static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
{
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ const struct tegra_pcie_soc *soc = pcie->soc;
struct device_node *np = pcie->dev->of_node;
struct tegra_pcie_port *port;
int err;
@@ -1253,43 +1264,44 @@ static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
{
- struct platform_device *pdev = to_platform_device(pcie->dev);
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
struct resource *pads, *afi, *res;
int err;
err = tegra_pcie_clocks_get(pcie);
if (err) {
- dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
+ dev_err(dev, "failed to get clocks: %d\n", err);
return err;
}
err = tegra_pcie_resets_get(pcie);
if (err) {
- dev_err(&pdev->dev, "failed to get resets: %d\n", err);
+ dev_err(dev, "failed to get resets: %d\n", err);
return err;
}
err = tegra_pcie_phys_get(pcie);
if (err < 0) {
- dev_err(&pdev->dev, "failed to get PHYs: %d\n", err);
+ dev_err(dev, "failed to get PHYs: %d\n", err);
return err;
}
err = tegra_pcie_power_on(pcie);
if (err) {
- dev_err(&pdev->dev, "failed to power up: %d\n", err);
+ dev_err(dev, "failed to power up: %d\n", err);
return err;
}
pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
- pcie->pads = devm_ioremap_resource(&pdev->dev, pads);
+ pcie->pads = devm_ioremap_resource(dev, pads);
if (IS_ERR(pcie->pads)) {
err = PTR_ERR(pcie->pads);
goto poweroff;
}
afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
- pcie->afi = devm_ioremap_resource(&pdev->dev, afi);
+ pcie->afi = devm_ioremap_resource(dev, afi);
if (IS_ERR(pcie->afi)) {
err = PTR_ERR(pcie->afi);
goto poweroff;
@@ -1302,7 +1314,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
goto poweroff;
}
- pcie->cs = devm_request_mem_region(pcie->dev, res->start,
+ pcie->cs = devm_request_mem_region(dev, res->start,
resource_size(res), res->name);
if (!pcie->cs) {
err = -EADDRNOTAVAIL;
@@ -1312,7 +1324,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
/* request interrupt */
err = platform_get_irq_byname(pdev, "intr");
if (err < 0) {
- dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
+ dev_err(dev, "failed to get IRQ: %d\n", err);
goto poweroff;
}
@@ -1320,7 +1332,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
if (err) {
- dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
+ dev_err(dev, "failed to register IRQ: %d\n", err);
goto poweroff;
}
@@ -1333,6 +1345,7 @@ poweroff:
static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
{
+ struct device *dev = pcie->dev;
int err;
if (pcie->irq > 0)
@@ -1342,7 +1355,7 @@ static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
err = phy_exit(pcie->phy);
if (err < 0)
- dev_err(pcie->dev, "failed to teardown PHY: %d\n", err);
+ dev_err(dev, "failed to teardown PHY: %d\n", err);
return 0;
}
@@ -1381,6 +1394,7 @@ static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
{
struct tegra_pcie *pcie = data;
+ struct device *dev = pcie->dev;
struct tegra_msi *msi = &pcie->msi;
unsigned int i, processed = 0;
@@ -1400,13 +1414,13 @@ static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
if (test_bit(index, msi->used))
generic_handle_irq(irq);
else
- dev_info(pcie->dev, "unhandled MSI\n");
+ dev_info(dev, "unhandled MSI\n");
} else {
/*
* that's weird who triggered this?
* just clear it
*/
- dev_info(pcie->dev, "unexpected MSI\n");
+ dev_info(dev, "unexpected MSI\n");
}
/* see if there's any more pending in this vector */
@@ -1485,8 +1499,9 @@ static const struct irq_domain_ops msi_domain_ops = {
static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
{
- struct platform_device *pdev = to_platform_device(pcie->dev);
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_msi *msi = &pcie->msi;
unsigned long base;
int err;
@@ -1494,20 +1509,20 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
mutex_init(&msi->lock);
- msi->chip.dev = pcie->dev;
+ msi->chip.dev = dev;
msi->chip.setup_irq = tegra_msi_setup_irq;
msi->chip.teardown_irq = tegra_msi_teardown_irq;
- msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
+ msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
&msi_domain_ops, &msi->chip);
if (!msi->domain) {
- dev_err(&pdev->dev, "failed to create IRQ domain\n");
+ dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
err = platform_get_irq_byname(pdev, "msi");
if (err < 0) {
- dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
+ dev_err(dev, "failed to get IRQ: %d\n", err);
goto err;
}
@@ -1516,7 +1531,7 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
tegra_msi_irq_chip.name, pcie);
if (err < 0) {
- dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ dev_err(dev, "failed to request IRQ: %d\n", err);
goto err;
}
@@ -1591,46 +1606,47 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
u32 *xbar)
{
- struct device_node *np = pcie->dev->of_node;
+ struct device *dev = pcie->dev;
+ struct device_node *np = dev->of_node;
if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
switch (lanes) {
case 0x0000104:
- dev_info(pcie->dev, "4x1, 1x1 configuration\n");
+ dev_info(dev, "4x1, 1x1 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
return 0;
case 0x0000102:
- dev_info(pcie->dev, "2x1, 1x1 configuration\n");
+ dev_info(dev, "2x1, 1x1 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
return 0;
}
} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
switch (lanes) {
case 0x00000204:
- dev_info(pcie->dev, "4x1, 2x1 configuration\n");
+ dev_info(dev, "4x1, 2x1 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
return 0;
case 0x00020202:
- dev_info(pcie->dev, "2x3 configuration\n");
+ dev_info(dev, "2x3 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
return 0;
case 0x00010104:
- dev_info(pcie->dev, "4x1, 1x2 configuration\n");
+ dev_info(dev, "4x1, 1x2 configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
return 0;
}
} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
switch (lanes) {
case 0x00000004:
- dev_info(pcie->dev, "single-mode configuration\n");
+ dev_info(dev, "single-mode configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
return 0;
case 0x00000202:
- dev_info(pcie->dev, "dual-mode configuration\n");
+ dev_info(dev, "dual-mode configuration\n");
*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
return 0;
}
@@ -1670,7 +1686,8 @@ static bool of_regulator_bulk_available(struct device_node *np,
*/
static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
{
- struct device_node *np = pcie->dev->of_node;
+ struct device *dev = pcie->dev;
+ struct device_node *np = dev->of_node;
if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
pcie->num_supplies = 3;
@@ -1678,12 +1695,12 @@ static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
pcie->num_supplies = 2;
if (pcie->num_supplies == 0) {
- dev_err(pcie->dev, "device %s not supported in legacy mode\n",
+ dev_err(dev, "device %s not supported in legacy mode\n",
np->full_name);
return -ENODEV;
}
- pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
sizeof(*pcie->supplies),
GFP_KERNEL);
if (!pcie->supplies)
@@ -1695,8 +1712,7 @@ static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
if (pcie->num_supplies > 2)
pcie->supplies[2].supply = "avdd";
- return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
- pcie->supplies);
+ return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
}
/*
@@ -1710,13 +1726,14 @@ static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
*/
static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
{
- struct device_node *np = pcie->dev->of_node;
+ struct device *dev = pcie->dev;
+ struct device_node *np = dev->of_node;
unsigned int i = 0;
if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
pcie->num_supplies = 7;
- pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
sizeof(*pcie->supplies),
GFP_KERNEL);
if (!pcie->supplies)
@@ -1743,7 +1760,7 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
(need_pexb ? 2 : 0);
- pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
sizeof(*pcie->supplies),
GFP_KERNEL);
if (!pcie->supplies)
@@ -1766,7 +1783,7 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
pcie->num_supplies = 5;
- pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
sizeof(*pcie->supplies),
GFP_KERNEL);
if (!pcie->supplies)
@@ -1779,9 +1796,9 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
pcie->supplies[4].supply = "vddio-pex-clk";
}
- if (of_regulator_bulk_available(pcie->dev->of_node, pcie->supplies,
+ if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
pcie->num_supplies))
- return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
+ return devm_regulator_bulk_get(dev, pcie->num_supplies,
pcie->supplies);
/*
@@ -1789,9 +1806,9 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
* that the device tree complies with an older version of the device
* tree binding.
*/
- dev_info(pcie->dev, "using legacy DT binding for power supplies\n");
+ dev_info(dev, "using legacy DT binding for power supplies\n");
- devm_kfree(pcie->dev, pcie->supplies);
+ devm_kfree(dev, pcie->supplies);
pcie->num_supplies = 0;
return tegra_pcie_get_legacy_regulators(pcie);
@@ -1799,8 +1816,9 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
{
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
- struct device_node *np = pcie->dev->of_node, *port;
+ struct device *dev = pcie->dev;
+ struct device_node *np = dev->of_node, *port;
+ const struct tegra_pcie_soc *soc = pcie->soc;
struct of_pci_range_parser parser;
struct of_pci_range range;
u32 lanes = 0, mask = 0;
@@ -1809,7 +1827,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
int err;
if (of_pci_range_parser_init(&parser, np)) {
- dev_err(pcie->dev, "missing \"ranges\" property\n");
+ dev_err(dev, "missing \"ranges\" property\n");
return -EINVAL;
}
@@ -1864,8 +1882,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
err = of_pci_parse_bus_range(np, &pcie->busn);
if (err < 0) {
- dev_err(pcie->dev, "failed to parse ranges property: %d\n",
- err);
+ dev_err(dev, "failed to parse ranges property: %d\n", err);
pcie->busn.name = np->name;
pcie->busn.start = 0;
pcie->busn.end = 0xff;
@@ -1880,15 +1897,14 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
err = of_pci_get_devfn(port);
if (err < 0) {
- dev_err(pcie->dev, "failed to parse address: %d\n",
- err);
+ dev_err(dev, "failed to parse address: %d\n", err);
return err;
}
index = PCI_SLOT(err);
if (index < 1 || index > soc->num_ports) {
- dev_err(pcie->dev, "invalid port number: %d\n", index);
+ dev_err(dev, "invalid port number: %d\n", index);
return -EINVAL;
}
@@ -1896,13 +1912,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
err = of_property_read_u32(port, "nvidia,num-lanes", &value);
if (err < 0) {
- dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
+ dev_err(dev, "failed to parse # of lanes: %d\n",
err);
return err;
}
if (value > 16) {
- dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
+ dev_err(dev, "invalid # of lanes: %u\n", value);
return -EINVAL;
}
@@ -1916,14 +1932,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
mask |= ((1 << value) - 1) << lane;
lane += value;
- rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
+ rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
if (!rp)
return -ENOMEM;
err = of_address_to_resource(port, 0, &rp->regs);
if (err < 0) {
- dev_err(pcie->dev, "failed to parse address: %d\n",
- err);
+ dev_err(dev, "failed to parse address: %d\n", err);
return err;
}
@@ -1933,7 +1948,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
rp->pcie = pcie;
rp->np = port;
- rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
+ rp->base = devm_ioremap_resource(dev, &rp->regs);
if (IS_ERR(rp->base))
return PTR_ERR(rp->base);
@@ -1942,7 +1957,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
if (err < 0) {
- dev_err(pcie->dev, "invalid lane configuration\n");
+ dev_err(dev, "invalid lane configuration\n");
return err;
}
@@ -1961,6 +1976,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
#define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
{
+ struct device *dev = port->pcie->dev;
unsigned int retries = 3;
unsigned long value;
@@ -1983,8 +1999,7 @@ static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
} while (--timeout);
if (!timeout) {
- dev_err(port->pcie->dev, "link %u down, retrying\n",
- port->index);
+ dev_err(dev, "link %u down, retrying\n", port->index);
goto retry;
}
@@ -2008,11 +2023,12 @@ retry:
static int tegra_pcie_enable(struct tegra_pcie *pcie)
{
+ struct device *dev = pcie->dev;
struct tegra_pcie_port *port, *tmp;
struct hw_pci hw;
list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
- dev_info(pcie->dev, "probing port %u, using %u lanes\n",
+ dev_info(dev, "probing port %u, using %u lanes\n",
port->index, port->lanes);
tegra_pcie_port_enable(port);
@@ -2020,7 +2036,7 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie)
if (tegra_pcie_port_check_link(port))
continue;
- dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
+ dev_info(dev, "link %u down, ignoring\n", port->index);
tegra_pcie_port_disable(port);
tegra_pcie_port_free(port);
@@ -2038,12 +2054,11 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie)
hw.map_irq = tegra_pcie_map_irq;
hw.ops = &tegra_pcie_ops;
- pci_common_init_dev(pcie->dev, &hw);
-
+ pci_common_init_dev(dev, &hw);
return 0;
}
-static const struct tegra_pcie_soc_data tegra20_pcie_data = {
+static const struct tegra_pcie_soc tegra20_pcie = {
.num_ports = 2,
.msi_base_shift = 0,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
@@ -2056,7 +2071,7 @@ static const struct tegra_pcie_soc_data tegra20_pcie_data = {
.has_gen2 = false,
};
-static const struct tegra_pcie_soc_data tegra30_pcie_data = {
+static const struct tegra_pcie_soc tegra30_pcie = {
.num_ports = 3,
.msi_base_shift = 8,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
@@ -2070,7 +2085,7 @@ static const struct tegra_pcie_soc_data tegra30_pcie_data = {
.has_gen2 = false,
};
-static const struct tegra_pcie_soc_data tegra124_pcie_data = {
+static const struct tegra_pcie_soc tegra124_pcie = {
.num_ports = 2,
.msi_base_shift = 8,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
@@ -2084,9 +2099,9 @@ static const struct tegra_pcie_soc_data tegra124_pcie_data = {
};
static const struct of_device_id tegra_pcie_of_match[] = {
- { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie_data },
- { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
- { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
+ { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
+ { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
+ { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
{ },
};
@@ -2201,22 +2216,18 @@ remove:
static int tegra_pcie_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
struct tegra_pcie *pcie;
int err;
- match = of_match_device(tegra_pcie_of_match, &pdev->dev);
- if (!match)
- return -ENODEV;
-
- pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
+ pcie->soc = of_device_get_match_data(dev);
INIT_LIST_HEAD(&pcie->buses);
INIT_LIST_HEAD(&pcie->ports);
- pcie->soc_data = match->data;
- pcie->dev = &pdev->dev;
+ pcie->dev = dev;
err = tegra_pcie_parse_dt(pcie);
if (err < 0)
@@ -2224,7 +2235,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
err = tegra_pcie_get_resources(pcie);
if (err < 0) {
- dev_err(&pdev->dev, "failed to request resources: %d\n", err);
+ dev_err(dev, "failed to request resources: %d\n", err);
return err;
}
@@ -2238,27 +2249,23 @@ static int tegra_pcie_probe(struct platform_device *pdev)
if (IS_ENABLED(CONFIG_PCI_MSI)) {
err = tegra_pcie_enable_msi(pcie);
if (err < 0) {
- dev_err(&pdev->dev,
- "failed to enable MSI support: %d\n",
- err);
+ dev_err(dev, "failed to enable MSI support: %d\n", err);
goto put_resources;
}
}
err = tegra_pcie_enable(pcie);
if (err < 0) {
- dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
+ dev_err(dev, "failed to enable PCIe ports: %d\n", err);
goto disable_msi;
}
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
err = tegra_pcie_debugfs_init(pcie);
if (err < 0)
- dev_err(&pdev->dev, "failed to setup debugfs: %d\n",
- err);
+ dev_err(dev, "failed to setup debugfs: %d\n", err);
}
- platform_set_drvdata(pdev, pcie);
return 0;
disable_msi:
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index f234405770ab..b7dc07002f13 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -74,7 +74,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
int err, mem = 1, res_valid = 0;
struct device_node *np = dev->of_node;
resource_size_t iobase;
- struct resource_entry *win;
+ struct resource_entry *win, *tmp;
err = of_pci_get_host_bridge_resources(np, 0, 0xff, res, &iobase);
if (err)
@@ -84,15 +84,17 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
if (err)
goto out_release_res;
- resource_list_for_each_entry(win, res) {
+ resource_list_for_each_entry_safe(win, tmp, res) {
struct resource *res = win->res;
switch (resource_type(res)) {
case IORESOURCE_IO:
err = pci_remap_iospace(res, iobase);
- if (err)
+ if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
+ resource_list_destroy_entry(win);
+ }
break;
case IORESOURCE_MEM:
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index a81273c23341..1de23d74783f 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -76,6 +76,16 @@ struct xgene_pcie_port {
u32 version;
};
+static u32 xgene_pcie_readl(struct xgene_pcie_port *port, u32 reg)
+{
+ return readl(port->csr_base + reg);
+}
+
+static void xgene_pcie_writel(struct xgene_pcie_port *port, u32 reg, u32 val)
+{
+ writel(val, port->csr_base + reg);
+}
+
static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
{
return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
@@ -112,9 +122,9 @@ static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn)
if (!pci_is_root_bus(bus))
rtdid_val = (b << 8) | (d << 3) | f;
- writel(rtdid_val, port->csr_base + RTDID);
+ xgene_pcie_writel(port, RTDID, rtdid_val);
/* read the register back to ensure flush */
- readl(port->csr_base + RTDID);
+ xgene_pcie_readl(port, RTDID);
}
/*
@@ -179,28 +189,28 @@ static struct pci_ops xgene_pcie_ops = {
.write = pci_generic_config_write32,
};
-static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr,
+static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr,
u32 flags, u64 size)
{
u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags;
u32 val32 = 0;
u32 val;
- val32 = readl(csr_base + addr);
+ val32 = xgene_pcie_readl(port, addr);
val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16);
- writel(val, csr_base + addr);
+ xgene_pcie_writel(port, addr, val);
- val32 = readl(csr_base + addr + 0x04);
+ val32 = xgene_pcie_readl(port, addr + 0x04);
val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16);
- writel(val, csr_base + addr + 0x04);
+ xgene_pcie_writel(port, addr + 0x04, val);
- val32 = readl(csr_base + addr + 0x04);
+ val32 = xgene_pcie_readl(port, addr + 0x04);
val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16);
- writel(val, csr_base + addr + 0x04);
+ xgene_pcie_writel(port, addr + 0x04, val);
- val32 = readl(csr_base + addr + 0x08);
+ val32 = xgene_pcie_readl(port, addr + 0x08);
val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16);
- writel(val, csr_base + addr + 0x08);
+ xgene_pcie_writel(port, addr + 0x08, val);
return mask;
}
@@ -208,32 +218,32 @@ static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr,
static void xgene_pcie_linkup(struct xgene_pcie_port *port,
u32 *lanes, u32 *speed)
{
- void __iomem *csr_base = port->csr_base;
u32 val32;
port->link_up = false;
- val32 = readl(csr_base + PCIECORE_CTLANDSTATUS);
+ val32 = xgene_pcie_readl(port, PCIECORE_CTLANDSTATUS);
if (val32 & LINK_UP_MASK) {
port->link_up = true;
*speed = PIPE_PHY_RATE_RD(val32);
- val32 = readl(csr_base + BRIDGE_STATUS_0);
+ val32 = xgene_pcie_readl(port, BRIDGE_STATUS_0);
*lanes = val32 >> 26;
}
}
static int xgene_pcie_init_port(struct xgene_pcie_port *port)
{
+ struct device *dev = port->dev;
int rc;
- port->clk = clk_get(port->dev, NULL);
+ port->clk = clk_get(dev, NULL);
if (IS_ERR(port->clk)) {
- dev_err(port->dev, "clock not available\n");
+ dev_err(dev, "clock not available\n");
return -ENODEV;
}
rc = clk_prepare_enable(port->clk);
if (rc) {
- dev_err(port->dev, "clock enable failed\n");
+ dev_err(dev, "clock enable failed\n");
return rc;
}
@@ -243,15 +253,16 @@ static int xgene_pcie_init_port(struct xgene_pcie_port *port)
static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
struct platform_device *pdev)
{
+ struct device *dev = port->dev;
struct resource *res;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
- port->csr_base = devm_ioremap_resource(port->dev, res);
+ port->csr_base = devm_ioremap_resource(dev, res);
if (IS_ERR(port->csr_base))
return PTR_ERR(port->csr_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
- port->cfg_base = devm_ioremap_resource(port->dev, res);
+ port->cfg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(port->cfg_base))
return PTR_ERR(port->cfg_base);
port->cfg_addr = res->start;
@@ -263,7 +274,7 @@ static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
struct resource *res, u32 offset,
u64 cpu_addr, u64 pci_addr)
{
- void __iomem *base = port->csr_base + offset;
+ struct device *dev = port->dev;
resource_size_t size = resource_size(res);
u64 restype = resource_type(res);
u64 mask = 0;
@@ -280,22 +291,24 @@ static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
if (size >= min_size)
mask = ~(size - 1) | flag;
else
- dev_warn(port->dev, "res size 0x%llx less than minimum 0x%x\n",
+ dev_warn(dev, "res size 0x%llx less than minimum 0x%x\n",
(u64)size, min_size);
- writel(lower_32_bits(cpu_addr), base);
- writel(upper_32_bits(cpu_addr), base + 0x04);
- writel(lower_32_bits(mask), base + 0x08);
- writel(upper_32_bits(mask), base + 0x0c);
- writel(lower_32_bits(pci_addr), base + 0x10);
- writel(upper_32_bits(pci_addr), base + 0x14);
+ xgene_pcie_writel(port, offset, lower_32_bits(cpu_addr));
+ xgene_pcie_writel(port, offset + 0x04, upper_32_bits(cpu_addr));
+ xgene_pcie_writel(port, offset + 0x08, lower_32_bits(mask));
+ xgene_pcie_writel(port, offset + 0x0c, upper_32_bits(mask));
+ xgene_pcie_writel(port, offset + 0x10, lower_32_bits(pci_addr));
+ xgene_pcie_writel(port, offset + 0x14, upper_32_bits(pci_addr));
}
-static void xgene_pcie_setup_cfg_reg(void __iomem *csr_base, u64 addr)
+static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port)
{
- writel(lower_32_bits(addr), csr_base + CFGBARL);
- writel(upper_32_bits(addr), csr_base + CFGBARH);
- writel(EN_REG, csr_base + CFGCTL);
+ u64 addr = port->cfg_addr;
+
+ xgene_pcie_writel(port, CFGBARL, lower_32_bits(addr));
+ xgene_pcie_writel(port, CFGBARH, upper_32_bits(addr));
+ xgene_pcie_writel(port, CFGCTL, EN_REG);
}
static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
@@ -310,7 +323,7 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
struct resource *res = window->res;
u64 restype = resource_type(res);
- dev_dbg(port->dev, "%pR\n", res);
+ dev_dbg(dev, "%pR\n", res);
switch (restype) {
case IORESOURCE_IO:
@@ -339,17 +352,18 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
return -EINVAL;
}
}
- xgene_pcie_setup_cfg_reg(port->csr_base, port->cfg_addr);
-
+ xgene_pcie_setup_cfg_reg(port);
return 0;
}
-static void xgene_pcie_setup_pims(void *addr, u64 pim, u64 size)
+static void xgene_pcie_setup_pims(struct xgene_pcie_port *port, u32 pim_reg,
+ u64 pim, u64 size)
{
- writel(lower_32_bits(pim), addr);
- writel(upper_32_bits(pim) | EN_COHERENCY, addr + 0x04);
- writel(lower_32_bits(size), addr + 0x10);
- writel(upper_32_bits(size), addr + 0x14);
+ xgene_pcie_writel(port, pim_reg, lower_32_bits(pim));
+ xgene_pcie_writel(port, pim_reg + 0x04,
+ upper_32_bits(pim) | EN_COHERENCY);
+ xgene_pcie_writel(port, pim_reg + 0x10, lower_32_bits(size));
+ xgene_pcie_writel(port, pim_reg + 0x14, upper_32_bits(size));
}
/*
@@ -379,10 +393,10 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
struct of_pci_range *range, u8 *ib_reg_mask)
{
- void __iomem *csr_base = port->csr_base;
void __iomem *cfg_base = port->cfg_base;
+ struct device *dev = port->dev;
void *bar_addr;
- void *pim_addr;
+ u32 pim_reg;
u64 cpu_addr = range->cpu_addr;
u64 pci_addr = range->pci_addr;
u64 size = range->size;
@@ -393,7 +407,7 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
if (region < 0) {
- dev_warn(port->dev, "invalid pcie dma-range config\n");
+ dev_warn(dev, "invalid pcie dma-range config\n");
return;
}
@@ -403,29 +417,27 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
switch (region) {
case 0:
- xgene_pcie_set_ib_mask(csr_base, BRIDGE_CFG_4, flags, size);
+ xgene_pcie_set_ib_mask(port, BRIDGE_CFG_4, flags, size);
bar_addr = cfg_base + PCI_BASE_ADDRESS_0;
writel(bar_low, bar_addr);
writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
- pim_addr = csr_base + PIM1_1L;
+ pim_reg = PIM1_1L;
break;
case 1:
- bar_addr = csr_base + IBAR2;
- writel(bar_low, bar_addr);
- writel(lower_32_bits(mask), csr_base + IR2MSK);
- pim_addr = csr_base + PIM2_1L;
+ xgene_pcie_writel(port, IBAR2, bar_low);
+ xgene_pcie_writel(port, IR2MSK, lower_32_bits(mask));
+ pim_reg = PIM2_1L;
break;
case 2:
- bar_addr = csr_base + IBAR3L;
- writel(bar_low, bar_addr);
- writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
- writel(lower_32_bits(mask), csr_base + IR3MSKL);
- writel(upper_32_bits(mask), csr_base + IR3MSKL + 0x4);
- pim_addr = csr_base + PIM3_1L;
+ xgene_pcie_writel(port, IBAR3L, bar_low);
+ xgene_pcie_writel(port, IBAR3L + 0x4, upper_32_bits(cpu_addr));
+ xgene_pcie_writel(port, IR3MSKL, lower_32_bits(mask));
+ xgene_pcie_writel(port, IR3MSKL + 0x4, upper_32_bits(mask));
+ pim_reg = PIM3_1L;
break;
}
- xgene_pcie_setup_pims(pim_addr, pci_addr, ~(size - 1));
+ xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1));
}
static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
@@ -463,7 +475,7 @@ static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
for_each_of_pci_range(&parser, &range) {
u64 end = range.cpu_addr + range.size - 1;
- dev_dbg(port->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
+ dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
range.flags, range.cpu_addr, end, range.pci_addr);
xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
}
@@ -476,13 +488,14 @@ static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
int i;
for (i = PIM1_1L; i <= CFGCTL; i += 4)
- writel(0x0, port->csr_base + i);
+ xgene_pcie_writel(port, i, 0);
}
static int xgene_pcie_setup(struct xgene_pcie_port *port,
struct list_head *res,
resource_size_t io_base)
{
+ struct device *dev = port->dev;
u32 val, lanes = 0, speed = 0;
int ret;
@@ -490,7 +503,7 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port,
/* setup the vendor and device IDs correctly */
val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID;
- writel(val, port->csr_base + BRIDGE_CFG_0);
+ xgene_pcie_writel(port, BRIDGE_CFG_0, val);
ret = xgene_pcie_map_ranges(port, res, io_base);
if (ret)
@@ -502,27 +515,28 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port,
xgene_pcie_linkup(port, &lanes, &speed);
if (!port->link_up)
- dev_info(port->dev, "(rc) link down\n");
+ dev_info(dev, "(rc) link down\n");
else
- dev_info(port->dev, "(rc) x%d gen-%d link up\n",
- lanes, speed + 1);
+ dev_info(dev, "(rc) x%d gen-%d link up\n", lanes, speed + 1);
return 0;
}
static int xgene_pcie_probe_bridge(struct platform_device *pdev)
{
- struct device_node *dn = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = dev->of_node;
struct xgene_pcie_port *port;
resource_size_t iobase = 0;
struct pci_bus *bus;
int ret;
LIST_HEAD(res);
- port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
- port->node = of_node_get(pdev->dev.of_node);
- port->dev = &pdev->dev;
+
+ port->node = of_node_get(dn);
+ port->dev = dev;
port->version = XGENE_PCIE_IP_VER_UNKN;
if (of_device_is_compatible(port->node, "apm,xgene-pcie"))
@@ -540,7 +554,7 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
if (ret)
return ret;
- ret = devm_request_pci_bus_resources(&pdev->dev, &res);
+ ret = devm_request_pci_bus_resources(dev, &res);
if (ret)
goto error;
@@ -548,8 +562,7 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
if (ret)
goto error;
- bus = pci_create_root_bus(&pdev->dev, 0,
- &xgene_pcie_ops, port, &res);
+ bus = pci_create_root_bus(dev, 0, &xgene_pcie_ops, port, &res);
if (!bus) {
ret = -ENOMEM;
goto error;
@@ -558,8 +571,6 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
pci_bus_add_devices(bus);
-
- platform_set_drvdata(pdev, port);
return 0;
error:
diff --git a/drivers/pci/host/pcie-altera-msi.c b/drivers/pci/host/pcie-altera-msi.c
index 99177f4ccde2..4e5d628e8cd4 100644
--- a/drivers/pci/host/pcie-altera-msi.c
+++ b/drivers/pci/host/pcie-altera-msi.c
@@ -1,4 +1,8 @@
/*
+ * Altera PCIe MSI support
+ *
+ * Author: Ley Foon Tan <lftan@altera.com>
+ *
* Copyright Altera Corporation (C) 2013-2015. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
@@ -16,7 +20,7 @@
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -237,11 +241,6 @@ static int altera_msi_probe(struct platform_device *pdev)
msi->pdev = pdev;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
- if (!res) {
- dev_err(&pdev->dev, "no csr memory resource defined\n");
- return -ENODEV;
- }
-
msi->csr_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(msi->csr_base)) {
dev_err(&pdev->dev, "failed to map csr memory\n");
@@ -250,11 +249,6 @@ static int altera_msi_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"vector_slave");
- if (!res) {
- dev_err(&pdev->dev, "no vector_slave memory resource defined\n");
- return -ENODEV;
- }
-
msi->vector_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(msi->vector_base)) {
dev_err(&pdev->dev, "failed to map vector_slave memory\n");
@@ -308,7 +302,3 @@ static int __init altera_msi_init(void)
return platform_driver_register(&altera_msi_driver);
}
subsys_initcall(altera_msi_init);
-
-MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
-MODULE_DESCRIPTION("Altera PCIe MSI support");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index 2b7837650db8..b0ac4dfafa0b 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -1,6 +1,9 @@
/*
* Copyright Altera Corporation (C) 2013-2015. All rights reserved
*
+ * Author: Ley Foon Tan <lftan@altera.com>
+ * Description: Altera PCIe host controller driver
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
@@ -17,7 +20,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
@@ -43,6 +46,7 @@
#define RP_LTSSM_MASK 0x1f
#define LTSSM_L0 0xf
+#define PCIE_CAP_OFFSET 0x80
/* TLP configuration type 0 and 1 */
#define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */
#define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */
@@ -51,17 +55,22 @@
#define TLP_PAYLOAD_SIZE 0x01
#define TLP_READ_TAG 0x1d
#define TLP_WRITE_TAG 0x10
-#define TLP_CFG_DW0(fmttype) (((fmttype) << 24) | TLP_PAYLOAD_SIZE)
-#define TLP_CFG_DW1(reqid, tag, be) (((reqid) << 16) | (tag << 8) | (be))
+#define RP_DEVFN 0
+#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
+#define TLP_CFG_DW0(pcie, bus) \
+ ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGRD0 \
+ : TLP_FMTTYPE_CFGRD1) << 24) | \
+ TLP_PAYLOAD_SIZE)
+#define TLP_CFG_DW1(pcie, tag, be) \
+ (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
#define TLP_CFG_DW2(bus, devfn, offset) \
(((bus) << 24) | ((devfn) << 16) | (offset))
-#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
#define TLP_COMP_STATUS(s) (((s) >> 12) & 7)
#define TLP_HDR_SIZE 3
#define TLP_LOOP 500
-#define RP_DEVFN 0
-#define LINK_UP_TIMEOUT 5000
+#define LINK_UP_TIMEOUT HZ
+#define LINK_RETRAIN_TIMEOUT HZ
#define INTX_NUM 4
@@ -69,7 +78,7 @@
struct altera_pcie {
struct platform_device *pdev;
- void __iomem *cra_base;
+ void __iomem *cra_base; /* DT Cra */
int irq;
u8 root_bus_nr;
struct irq_domain *irq_domain;
@@ -99,38 +108,6 @@ static bool altera_pcie_link_is_up(struct altera_pcie *pcie)
return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
}
-static void altera_pcie_retrain(struct pci_dev *dev)
-{
- u16 linkcap, linkstat;
- struct altera_pcie *pcie = dev->bus->sysdata;
- int timeout = 0;
-
- if (!altera_pcie_link_is_up(pcie))
- return;
-
- /*
- * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but
- * current speed is 2.5 GB/s.
- */
- pcie_capability_read_word(dev, PCI_EXP_LNKCAP, &linkcap);
-
- if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
- return;
-
- pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &linkstat);
- if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
- pcie_capability_set_word(dev, PCI_EXP_LNKCTL,
- PCI_EXP_LNKCTL_RL);
- while (!altera_pcie_link_is_up(pcie)) {
- timeout++;
- if (timeout > LINK_UP_TIMEOUT)
- break;
- udelay(5);
- }
- }
-}
-DECLARE_PCI_FIXUP_EARLY(0x1172, PCI_ANY_ID, altera_pcie_retrain);
-
/*
* Altera PCIe port uses BAR0 of RC's configuration space as the translation
* from PCI bus to native BUS. Entire DDR region is mapped into PCIe space
@@ -158,7 +135,7 @@ static void tlp_write_tx(struct altera_pcie *pcie,
cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL);
}
-static bool altera_pcie_valid_config(struct altera_pcie *pcie,
+static bool altera_pcie_valid_device(struct altera_pcie *pcie,
struct pci_bus *bus, int dev)
{
/* If there is no link, then there is no device */
@@ -171,13 +148,6 @@ static bool altera_pcie_valid_config(struct altera_pcie *pcie,
if (bus->number == pcie->root_bus_nr && dev > 0)
return false;
- /*
- * Do not read more than one device on the bus directly attached
- * to root port, root port can only attach to one downstream port.
- */
- if (bus->primary == pcie->root_bus_nr && dev > 0)
- return false;
-
return true;
}
@@ -252,13 +222,8 @@ static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
{
u32 headers[TLP_HDR_SIZE];
- if (bus == pcie->root_bus_nr)
- headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD0);
- else
- headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD1);
-
- headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
- TLP_READ_TAG, byte_en);
+ headers[0] = TLP_CFG_DW0(pcie, bus);
+ headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en);
headers[2] = TLP_CFG_DW2(bus, devfn, where);
tlp_write_packet(pcie, headers, 0, false);
@@ -272,13 +237,8 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
u32 headers[TLP_HDR_SIZE];
int ret;
- if (bus == pcie->root_bus_nr)
- headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR0);
- else
- headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR1);
-
- headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
- TLP_WRITE_TAG, byte_en);
+ headers[0] = TLP_CFG_DW0(pcie, bus);
+ headers[1] = TLP_CFG_DW1(pcie, TLP_WRITE_TAG, byte_en);
headers[2] = TLP_CFG_DW2(bus, devfn, where);
/* check alignment to Qword */
@@ -301,22 +261,14 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
return PCIBIOS_SUCCESSFUL;
}
-static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 *value)
+static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size,
+ u32 *value)
{
- struct altera_pcie *pcie = bus->sysdata;
int ret;
u32 data;
u8 byte_en;
- if (altera_pcie_hide_rc_bar(bus, devfn, where))
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) {
- *value = 0xffffffff;
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
switch (size) {
case 1:
byte_en = 1 << (where & 3);
@@ -329,7 +281,7 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
break;
}
- ret = tlp_cfg_dword_read(pcie, bus->number, devfn,
+ ret = tlp_cfg_dword_read(pcie, busno, devfn,
(where & ~DWORD_MASK), byte_en, &data);
if (ret != PCIBIOS_SUCCESSFUL)
return ret;
@@ -349,20 +301,14 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_SUCCESSFUL;
}
-static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 value)
+static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size,
+ u32 value)
{
- struct altera_pcie *pcie = bus->sysdata;
u32 data32;
u32 shift = 8 * (where & 3);
u8 byte_en;
- if (altera_pcie_hide_rc_bar(bus, devfn, where))
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn)))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
switch (size) {
case 1:
data32 = (value & 0xff) << shift;
@@ -378,8 +324,40 @@ static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
break;
}
- return tlp_cfg_dword_write(pcie, bus->number, devfn,
- (where & ~DWORD_MASK), byte_en, data32);
+ return tlp_cfg_dword_write(pcie, busno, devfn, (where & ~DWORD_MASK),
+ byte_en, data32);
+}
+
+static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ struct altera_pcie *pcie = bus->sysdata;
+
+ if (altera_pcie_hide_rc_bar(bus, devfn, where))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) {
+ *value = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ return _altera_pcie_cfg_read(pcie, bus->number, devfn, where, size,
+ value);
+}
+
+static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ struct altera_pcie *pcie = bus->sysdata;
+
+ if (altera_pcie_hide_rc_bar(bus, devfn, where))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn)))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size,
+ value);
}
static struct pci_ops altera_pcie_ops = {
@@ -387,12 +365,96 @@ static struct pci_ops altera_pcie_ops = {
.write = altera_pcie_cfg_write,
};
+static int altera_read_cap_word(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int offset, u16 *value)
+{
+ u32 data;
+ int ret;
+
+ ret = _altera_pcie_cfg_read(pcie, busno, devfn,
+ PCIE_CAP_OFFSET + offset, sizeof(*value),
+ &data);
+ *value = data;
+ return ret;
+}
+
+static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int offset, u16 value)
+{
+ return _altera_pcie_cfg_write(pcie, busno, devfn,
+ PCIE_CAP_OFFSET + offset, sizeof(value),
+ value);
+}
+
+static void altera_wait_link_retrain(struct altera_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ u16 reg16;
+ unsigned long start_jiffies;
+
+ /* Wait for link training end. */
+ start_jiffies = jiffies;
+ for (;;) {
+ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
+ PCI_EXP_LNKSTA, &reg16);
+ if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ break;
+
+ if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) {
+ dev_err(dev, "link retrain timeout\n");
+ break;
+ }
+ udelay(100);
+ }
+
+ /* Wait for link is up */
+ start_jiffies = jiffies;
+ for (;;) {
+ if (altera_pcie_link_is_up(pcie))
+ break;
+
+ if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) {
+ dev_err(dev, "link up timeout\n");
+ break;
+ }
+ udelay(100);
+ }
+}
+
+static void altera_pcie_retrain(struct altera_pcie *pcie)
+{
+ u16 linkcap, linkstat, linkctl;
+
+ if (!altera_pcie_link_is_up(pcie))
+ return;
+
+ /*
+ * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but
+ * current speed is 2.5 GB/s.
+ */
+ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKCAP,
+ &linkcap);
+ if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
+ return;
+
+ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKSTA,
+ &linkstat);
+ if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
+ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
+ PCI_EXP_LNKCTL, &linkctl);
+ linkctl |= PCI_EXP_LNKCTL_RL;
+ altera_write_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
+ PCI_EXP_LNKCTL, linkctl);
+
+ altera_wait_link_retrain(pcie);
+ }
+}
+
static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
-
return 0;
}
@@ -404,12 +466,14 @@ static void altera_pcie_isr(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct altera_pcie *pcie;
+ struct device *dev;
unsigned long status;
u32 bit;
u32 virq;
chained_irq_enter(chip, desc);
pcie = irq_desc_get_handler_data(desc);
+ dev = &pcie->pdev->dev;
while ((status = cra_readl(pcie, P2A_INT_STATUS)
& P2A_INT_STS_ALL) != 0) {
@@ -421,8 +485,7 @@ static void altera_pcie_isr(struct irq_desc *desc)
if (virq)
generic_handle_irq(virq);
else
- dev_err(&pcie->pdev->dev,
- "unexpected IRQ, INT%d\n", bit);
+ dev_err(dev, "unexpected IRQ, INT%d\n", bit);
}
}
@@ -481,41 +544,42 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
static int altera_pcie_parse_dt(struct altera_pcie *pcie)
{
- struct resource *cra;
+ struct device *dev = &pcie->pdev->dev;
struct platform_device *pdev = pcie->pdev;
+ struct resource *cra;
cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra");
- if (!cra) {
- dev_err(&pdev->dev, "no Cra memory resource defined\n");
- return -ENODEV;
- }
-
- pcie->cra_base = devm_ioremap_resource(&pdev->dev, cra);
+ pcie->cra_base = devm_ioremap_resource(dev, cra);
if (IS_ERR(pcie->cra_base)) {
- dev_err(&pdev->dev, "failed to map cra memory\n");
+ dev_err(dev, "failed to map cra memory\n");
return PTR_ERR(pcie->cra_base);
}
/* setup IRQ */
pcie->irq = platform_get_irq(pdev, 0);
if (pcie->irq <= 0) {
- dev_err(&pdev->dev, "failed to get IRQ: %d\n", pcie->irq);
+ dev_err(dev, "failed to get IRQ: %d\n", pcie->irq);
return -EINVAL;
}
irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie);
-
return 0;
}
+static void altera_pcie_host_init(struct altera_pcie *pcie)
+{
+ altera_pcie_retrain(pcie);
+}
+
static int altera_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct altera_pcie *pcie;
struct pci_bus *bus;
struct pci_bus *child;
int ret;
- pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
@@ -523,7 +587,7 @@ static int altera_pcie_probe(struct platform_device *pdev)
ret = altera_pcie_parse_dt(pcie);
if (ret) {
- dev_err(&pdev->dev, "Parsing DT failed\n");
+ dev_err(dev, "Parsing DT failed\n");
return ret;
}
@@ -531,13 +595,13 @@ static int altera_pcie_probe(struct platform_device *pdev)
ret = altera_pcie_parse_request_of_pci_ranges(pcie);
if (ret) {
- dev_err(&pdev->dev, "Failed add resources\n");
+ dev_err(dev, "Failed add resources\n");
return ret;
}
ret = altera_pcie_init_irq_domain(pcie);
if (ret) {
- dev_err(&pdev->dev, "Failed creating IRQ Domain\n");
+ dev_err(dev, "Failed creating IRQ Domain\n");
return ret;
}
@@ -545,8 +609,9 @@ static int altera_pcie_probe(struct platform_device *pdev)
cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
/* enable all interrupts */
cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
+ altera_pcie_host_init(pcie);
- bus = pci_scan_root_bus(&pdev->dev, pcie->root_bus_nr, &altera_pcie_ops,
+ bus = pci_scan_root_bus(dev, pcie->root_bus_nr, &altera_pcie_ops,
pcie, &pcie->resources);
if (!bus)
return -ENOMEM;
@@ -559,8 +624,6 @@ static int altera_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
-
- platform_set_drvdata(pdev, pcie);
return ret;
}
@@ -568,7 +631,6 @@ static const struct of_device_id altera_pcie_of_match[] = {
{ .compatible = "altr,pcie-root-port-1.0", },
{},
};
-MODULE_DEVICE_TABLE(of, altera_pcie_of_match);
static struct platform_driver altera_pcie_driver = {
.probe = altera_pcie_probe,
@@ -583,8 +645,4 @@ static int altera_pcie_init(void)
{
return platform_driver_register(&altera_pcie_driver);
}
-module_init(altera_pcie_init);
-
-MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
-MODULE_DESCRIPTION("Altera PCIe host controller driver");
-MODULE_LICENSE("GPL v2");
+device_initcall(altera_pcie_init);
diff --git a/drivers/pci/host/pcie-armada8k.c b/drivers/pci/host/pcie-armada8k.c
index 0f4f570068e3..0ac0f18690f2 100644
--- a/drivers/pci/host/pcie-armada8k.c
+++ b/drivers/pci/host/pcie-armada8k.c
@@ -29,34 +29,33 @@
#include "pcie-designware.h"
struct armada8k_pcie {
- void __iomem *base;
+ struct pcie_port pp; /* pp.dbi_base is DT ctrl */
struct clk *clk;
- struct pcie_port pp;
};
#define PCIE_VENDOR_REGS_OFFSET 0x8000
-#define PCIE_GLOBAL_CONTROL_REG 0x0
+#define PCIE_GLOBAL_CONTROL_REG (PCIE_VENDOR_REGS_OFFSET + 0x0)
#define PCIE_APP_LTSSM_EN BIT(2)
#define PCIE_DEVICE_TYPE_SHIFT 4
#define PCIE_DEVICE_TYPE_MASK 0xF
#define PCIE_DEVICE_TYPE_RC 0x4 /* Root complex */
-#define PCIE_GLOBAL_STATUS_REG 0x8
+#define PCIE_GLOBAL_STATUS_REG (PCIE_VENDOR_REGS_OFFSET + 0x8)
#define PCIE_GLB_STS_RDLH_LINK_UP BIT(1)
#define PCIE_GLB_STS_PHY_LINK_UP BIT(9)
-#define PCIE_GLOBAL_INT_CAUSE1_REG 0x1C
-#define PCIE_GLOBAL_INT_MASK1_REG 0x20
+#define PCIE_GLOBAL_INT_CAUSE1_REG (PCIE_VENDOR_REGS_OFFSET + 0x1C)
+#define PCIE_GLOBAL_INT_MASK1_REG (PCIE_VENDOR_REGS_OFFSET + 0x20)
#define PCIE_INT_A_ASSERT_MASK BIT(9)
#define PCIE_INT_B_ASSERT_MASK BIT(10)
#define PCIE_INT_C_ASSERT_MASK BIT(11)
#define PCIE_INT_D_ASSERT_MASK BIT(12)
-#define PCIE_ARCACHE_TRC_REG 0x50
-#define PCIE_AWCACHE_TRC_REG 0x54
-#define PCIE_ARUSER_REG 0x5C
-#define PCIE_AWUSER_REG 0x60
+#define PCIE_ARCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x50)
+#define PCIE_AWCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x54)
+#define PCIE_ARUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x5C)
+#define PCIE_AWUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x60)
/*
* AR/AW Cache defauls: Normal memory, Write-Back, Read / Write
* allocate
@@ -72,11 +71,10 @@ struct armada8k_pcie {
static int armada8k_pcie_link_up(struct pcie_port *pp)
{
- struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
u32 reg;
u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP;
- reg = readl(pcie->base + PCIE_GLOBAL_STATUS_REG);
+ reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_STATUS_REG);
if ((reg & mask) == mask)
return 1;
@@ -85,51 +83,50 @@ static int armada8k_pcie_link_up(struct pcie_port *pp)
return 0;
}
-static void armada8k_pcie_establish_link(struct pcie_port *pp)
+static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie)
{
- struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
- void __iomem *base = pcie->base;
+ struct pcie_port *pp = &pcie->pp;
u32 reg;
if (!dw_pcie_link_up(pp)) {
/* Disable LTSSM state machine to enable configuration */
- reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
+ reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_CONTROL_REG);
reg &= ~(PCIE_APP_LTSSM_EN);
- writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
+ dw_pcie_writel_rc(pp, PCIE_GLOBAL_CONTROL_REG, reg);
}
/* Set the device to root complex mode */
- reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
+ reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_CONTROL_REG);
reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT);
reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT;
- writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
+ dw_pcie_writel_rc(pp, PCIE_GLOBAL_CONTROL_REG, reg);
/* Set the PCIe master AxCache attributes */
- writel(ARCACHE_DEFAULT_VALUE, base + PCIE_ARCACHE_TRC_REG);
- writel(AWCACHE_DEFAULT_VALUE, base + PCIE_AWCACHE_TRC_REG);
+ dw_pcie_writel_rc(pp, PCIE_ARCACHE_TRC_REG, ARCACHE_DEFAULT_VALUE);
+ dw_pcie_writel_rc(pp, PCIE_AWCACHE_TRC_REG, AWCACHE_DEFAULT_VALUE);
/* Set the PCIe master AxDomain attributes */
- reg = readl(base + PCIE_ARUSER_REG);
+ reg = dw_pcie_readl_rc(pp, PCIE_ARUSER_REG);
reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
- writel(reg, base + PCIE_ARUSER_REG);
+ dw_pcie_writel_rc(pp, PCIE_ARUSER_REG, reg);
- reg = readl(base + PCIE_AWUSER_REG);
+ reg = dw_pcie_readl_rc(pp, PCIE_AWUSER_REG);
reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
- writel(reg, base + PCIE_AWUSER_REG);
+ dw_pcie_writel_rc(pp, PCIE_AWUSER_REG, reg);
/* Enable INT A-D interrupts */
- reg = readl(base + PCIE_GLOBAL_INT_MASK1_REG);
+ reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_INT_MASK1_REG);
reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK |
PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK;
- writel(reg, base + PCIE_GLOBAL_INT_MASK1_REG);
+ dw_pcie_writel_rc(pp, PCIE_GLOBAL_INT_MASK1_REG, reg);
if (!dw_pcie_link_up(pp)) {
/* Configuration done. Start LTSSM */
- reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
+ reg = dw_pcie_readl_rc(pp, PCIE_GLOBAL_CONTROL_REG);
reg |= PCIE_APP_LTSSM_EN;
- writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
+ dw_pcie_writel_rc(pp, PCIE_GLOBAL_CONTROL_REG, reg);
}
/* Wait until the link becomes active again */
@@ -139,15 +136,16 @@ static void armada8k_pcie_establish_link(struct pcie_port *pp)
static void armada8k_pcie_host_init(struct pcie_port *pp)
{
+ struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
+
dw_pcie_setup_rc(pp);
- armada8k_pcie_establish_link(pp);
+ armada8k_pcie_establish_link(pcie);
}
static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
{
- struct pcie_port *pp = arg;
- struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
- void __iomem *base = pcie->base;
+ struct armada8k_pcie *pcie = arg;
+ struct pcie_port *pp = &pcie->pp;
u32 val;
/*
@@ -155,8 +153,8 @@ static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
* PCI device. However, they are also latched into the PCIe
* controller, so we simply discard them.
*/
- val = readl(base + PCIE_GLOBAL_INT_CAUSE1_REG);
- writel(val, base + PCIE_GLOBAL_INT_CAUSE1_REG);
+ val = dw_pcie_readl_rc(pp, PCIE_GLOBAL_INT_CAUSE1_REG);
+ dw_pcie_writel_rc(pp, PCIE_GLOBAL_INT_CAUSE1_REG, val);
return IRQ_HANDLED;
}
@@ -166,9 +164,10 @@ static struct pcie_host_ops armada8k_pcie_host_ops = {
.host_init = armada8k_pcie_host_init,
};
-static int armada8k_add_pcie_port(struct pcie_port *pp,
+static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
struct platform_device *pdev)
{
+ struct pcie_port *pp = &pcie->pp;
struct device *dev = &pdev->dev;
int ret;
@@ -182,7 +181,7 @@ static int armada8k_add_pcie_port(struct pcie_port *pp,
}
ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler,
- IRQF_SHARED, "armada8k-pcie", pp);
+ IRQF_SHARED, "armada8k-pcie", pcie);
if (ret) {
dev_err(dev, "failed to request irq %d\n", pp->irq);
return ret;
@@ -217,7 +216,6 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
pp = &pcie->pp;
pp->dev = dev;
- platform_set_drvdata(pdev, pcie);
/* Get the dw-pcie unit configuration/control registers base. */
base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
@@ -228,9 +226,7 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
goto fail;
}
- pcie->base = pp->dbi_base + PCIE_VENDOR_REGS_OFFSET;
-
- ret = armada8k_add_pcie_port(pp, pdev);
+ ret = armada8k_add_pcie_port(pcie, pdev);
if (ret)
goto fail;
diff --git a/drivers/pci/host/pcie-artpec6.c b/drivers/pci/host/pcie-artpec6.c
index 16ba70b7ec65..212786b27f1a 100644
--- a/drivers/pci/host/pcie-artpec6.c
+++ b/drivers/pci/host/pcie-artpec6.c
@@ -27,9 +27,9 @@
#define to_artpec6_pcie(x) container_of(x, struct artpec6_pcie, pp)
struct artpec6_pcie {
- struct pcie_port pp;
- struct regmap *regmap;
- void __iomem *phy_base;
+ struct pcie_port pp; /* pp.dbi_base is DT dbi */
+ struct regmap *regmap; /* DT axis,syscon-pcie */
+ void __iomem *phy_base; /* DT phy */
};
/* PCIe Port Logic registers (memory-mapped) */
@@ -65,18 +65,31 @@ struct artpec6_pcie {
#define ARTPEC6_CPU_TO_BUS_ADDR 0x0fffffff
-static int artpec6_pcie_establish_link(struct pcie_port *pp)
+static u32 artpec6_pcie_readl(struct artpec6_pcie *artpec6_pcie, u32 offset)
{
- struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pp);
+ u32 val;
+
+ regmap_read(artpec6_pcie->regmap, offset, &val);
+ return val;
+}
+
+static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u32 val)
+{
+ regmap_write(artpec6_pcie->regmap, offset, val);
+}
+
+static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
+{
+ struct pcie_port *pp = &artpec6_pcie->pp;
u32 val;
unsigned int retries;
/* Hold DW core in reset */
- regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val |= PCIECFG_CORE_RESET_REQ;
- regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
- regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */
PCIECFG_MODE_TX_DRV_EN |
PCIECFG_CISRREN | /* Reference clock term. 100 Ohm */
@@ -84,27 +97,27 @@ static int artpec6_pcie_establish_link(struct pcie_port *pp)
val |= PCIECFG_REFCLK_ENABLE;
val &= ~PCIECFG_DBG_OEN;
val &= ~PCIECFG_CLKREQ_B;
- regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
usleep_range(5000, 6000);
- regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
+ val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
val |= NOCCFG_ENABLE_CLK_PCIE;
- regmap_write(artpec6_pcie->regmap, NOCCFG, val);
+ artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
usleep_range(20, 30);
- regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val |= PCIECFG_PCLK_ENABLE | PCIECFG_PLL_ENABLE;
- regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
usleep_range(6000, 7000);
- regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
+ val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
- regmap_write(artpec6_pcie->regmap, NOCCFG, val);
+ artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
retries = 50;
do {
usleep_range(1000, 2000);
- regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
+ val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
retries--;
} while (retries &&
(val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
@@ -117,16 +130,16 @@ static int artpec6_pcie_establish_link(struct pcie_port *pp)
} while (retries && !(val & PHY_COSPLLLOCK));
/* Take DW core out of reset */
- regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val &= ~PCIECFG_CORE_RESET_REQ;
- regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
usleep_range(100, 200);
/*
* Enable writing to config regs. This is required as the Synopsys
* driver changes the class code. That register needs DBI write enable.
*/
- writel(DBI_RO_WR_EN, pp->dbi_base + MISC_CONTROL_1_OFF);
+ dw_pcie_writel_rc(pp, MISC_CONTROL_1_OFF, DBI_RO_WR_EN);
pp->io_base &= ARTPEC6_CPU_TO_BUS_ADDR;
pp->mem_base &= ARTPEC6_CPU_TO_BUS_ADDR;
@@ -137,78 +150,69 @@ static int artpec6_pcie_establish_link(struct pcie_port *pp)
dw_pcie_setup_rc(pp);
/* assert LTSSM enable */
- regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+ val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val |= PCIECFG_LTSSM_ENABLE;
- regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+ artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
/* check if the link is up or not */
if (!dw_pcie_wait_for_link(pp))
return 0;
dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
- readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
- readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
+ dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R0),
+ dw_pcie_readl_rc(pp, PCIE_PHY_DEBUG_R1));
return -ETIMEDOUT;
}
-static void artpec6_pcie_enable_interrupts(struct pcie_port *pp)
+static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie)
{
+ struct pcie_port *pp = &artpec6_pcie->pp;
+
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
}
static void artpec6_pcie_host_init(struct pcie_port *pp)
{
- artpec6_pcie_establish_link(pp);
- artpec6_pcie_enable_interrupts(pp);
-}
-
-static int artpec6_pcie_link_up(struct pcie_port *pp)
-{
- u32 rc;
-
- /*
- * Get status from Synopsys IP
- * link is debug bit 36, debug register 1 starts at bit 32
- */
- rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32));
- if (rc)
- return 1;
+ struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pp);
- return 0;
+ artpec6_pcie_establish_link(artpec6_pcie);
+ artpec6_pcie_enable_interrupts(artpec6_pcie);
}
static struct pcie_host_ops artpec6_pcie_host_ops = {
- .link_up = artpec6_pcie_link_up,
.host_init = artpec6_pcie_host_init,
};
static irqreturn_t artpec6_pcie_msi_handler(int irq, void *arg)
{
- struct pcie_port *pp = arg;
+ struct artpec6_pcie *artpec6_pcie = arg;
+ struct pcie_port *pp = &artpec6_pcie->pp;
return dw_handle_msi_irq(pp);
}
-static int __init artpec6_add_pcie_port(struct pcie_port *pp,
- struct platform_device *pdev)
+static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
+ struct platform_device *pdev)
{
+ struct pcie_port *pp = &artpec6_pcie->pp;
+ struct device *dev = pp->dev;
int ret;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
if (pp->msi_irq <= 0) {
- dev_err(&pdev->dev, "failed to get MSI irq\n");
+ dev_err(dev, "failed to get MSI irq\n");
return -ENODEV;
}
- ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ ret = devm_request_irq(dev, pp->msi_irq,
artpec6_pcie_msi_handler,
IRQF_SHARED | IRQF_NO_THREAD,
- "artpec6-pcie-msi", pp);
+ "artpec6-pcie-msi", artpec6_pcie);
if (ret) {
- dev_err(&pdev->dev, "failed to request MSI irq\n");
+ dev_err(dev, "failed to request MSI irq\n");
return ret;
}
}
@@ -218,7 +222,7 @@ static int __init artpec6_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
- dev_err(&pdev->dev, "failed to initialize host\n");
+ dev_err(dev, "failed to initialize host\n");
return ret;
}
@@ -227,41 +231,40 @@ static int __init artpec6_add_pcie_port(struct pcie_port *pp,
static int artpec6_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct artpec6_pcie *artpec6_pcie;
struct pcie_port *pp;
struct resource *dbi_base;
struct resource *phy_base;
int ret;
- artpec6_pcie = devm_kzalloc(&pdev->dev, sizeof(*artpec6_pcie),
- GFP_KERNEL);
+ artpec6_pcie = devm_kzalloc(dev, sizeof(*artpec6_pcie), GFP_KERNEL);
if (!artpec6_pcie)
return -ENOMEM;
pp = &artpec6_pcie->pp;
- pp->dev = &pdev->dev;
+ pp->dev = dev;
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
+ pp->dbi_base = devm_ioremap_resource(dev, dbi_base);
if (IS_ERR(pp->dbi_base))
return PTR_ERR(pp->dbi_base);
phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
- artpec6_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
+ artpec6_pcie->phy_base = devm_ioremap_resource(dev, phy_base);
if (IS_ERR(artpec6_pcie->phy_base))
return PTR_ERR(artpec6_pcie->phy_base);
artpec6_pcie->regmap =
- syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ syscon_regmap_lookup_by_phandle(dev->of_node,
"axis,syscon-pcie");
if (IS_ERR(artpec6_pcie->regmap))
return PTR_ERR(artpec6_pcie->regmap);
- ret = artpec6_add_pcie_port(pp, pdev);
+ ret = artpec6_add_pcie_port(artpec6_pcie, pdev);
if (ret < 0)
return ret;
- platform_set_drvdata(pdev, artpec6_pcie);
return 0;
}
diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c
index c8079dc81c10..537f58a664fa 100644
--- a/drivers/pci/host/pcie-designware-plat.c
+++ b/drivers/pci/host/pcie-designware-plat.c
@@ -25,8 +25,7 @@
#include "pcie-designware.h"
struct dw_plat_pcie {
- void __iomem *mem_base;
- struct pcie_port pp;
+ struct pcie_port pp; /* pp.dbi_base is DT 0th resource */
};
static irqreturn_t dw_plat_pcie_msi_irq_handler(int irq, void *arg)
@@ -52,6 +51,7 @@ static struct pcie_host_ops dw_plat_pcie_host_ops = {
static int dw_plat_add_pcie_port(struct pcie_port *pp,
struct platform_device *pdev)
{
+ struct device *dev = pp->dev;
int ret;
pp->irq = platform_get_irq(pdev, 1);
@@ -63,11 +63,11 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
if (pp->msi_irq < 0)
return pp->msi_irq;
- ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ ret = devm_request_irq(dev, pp->msi_irq,
dw_plat_pcie_msi_irq_handler,
IRQF_SHARED, "dw-plat-pcie-msi", pp);
if (ret) {
- dev_err(&pdev->dev, "failed to request MSI IRQ\n");
+ dev_err(dev, "failed to request MSI IRQ\n");
return ret;
}
}
@@ -77,7 +77,7 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
- dev_err(&pdev->dev, "failed to initialize host\n");
+ dev_err(dev, "failed to initialize host\n");
return ret;
}
@@ -86,34 +86,28 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
static int dw_plat_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct dw_plat_pcie *dw_plat_pcie;
struct pcie_port *pp;
struct resource *res; /* Resource from DT */
int ret;
- dw_plat_pcie = devm_kzalloc(&pdev->dev, sizeof(*dw_plat_pcie),
- GFP_KERNEL);
+ dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
if (!dw_plat_pcie)
return -ENOMEM;
pp = &dw_plat_pcie->pp;
- pp->dev = &pdev->dev;
+ pp->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- dw_plat_pcie->mem_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dw_plat_pcie->mem_base))
- return PTR_ERR(dw_plat_pcie->mem_base);
-
- pp->dbi_base = dw_plat_pcie->mem_base;
+ pp->dbi_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pp->dbi_base))
+ return PTR_ERR(pp->dbi_base);
ret = dw_plat_add_pcie_port(pp, pdev);
if (ret < 0)
return ret;
- platform_set_drvdata(pdev, dw_plat_pcie);
return 0;
}
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 12afce19890b..035f50c03281 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -14,7 +14,6 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -26,7 +25,17 @@
#include "pcie-designware.h"
-/* Synopsis specific PCIE configuration registers */
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_USLEEP_MIN 90000
+#define LINK_WAIT_USLEEP_MAX 100000
+
+/* Parameters for the waiting for iATU enabled routine */
+#define LINK_WAIT_MAX_IATU_RETRIES 5
+#define LINK_WAIT_IATU_MIN 9000
+#define LINK_WAIT_IATU_MAX 10000
+
+/* Synopsys-specific PCIe configuration registers */
#define PCIE_PORT_LINK_CONTROL 0x710
#define PORT_LINK_MODE_MASK (0x3f << 16)
#define PORT_LINK_MODE_1_LANES (0x1 << 16)
@@ -51,6 +60,7 @@
#define PCIE_ATU_VIEWPORT 0x900
#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
+#define PCIE_ATU_REGION_INDEX2 (0x2 << 0)
#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
#define PCIE_ATU_CR1 0x904
@@ -70,10 +80,26 @@
#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
#define PCIE_ATU_UPPER_TARGET 0x91C
+/*
+ * iATU Unroll-specific register definitions
+ * From 4.80 core version the address translation will be made by unroll
+ */
+#define PCIE_ATU_UNR_REGION_CTRL1 0x00
+#define PCIE_ATU_UNR_REGION_CTRL2 0x04
+#define PCIE_ATU_UNR_LOWER_BASE 0x08
+#define PCIE_ATU_UNR_UPPER_BASE 0x0C
+#define PCIE_ATU_UNR_LIMIT 0x10
+#define PCIE_ATU_UNR_LOWER_TARGET 0x14
+#define PCIE_ATU_UNR_UPPER_TARGET 0x18
+
+/* Register address builder */
+#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) ((0x3 << 20) | (region << 9))
+
/* PCIe Port Logic registers */
#define PLR_OFFSET 0x700
#define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c)
-#define PCIE_PHY_DEBUG_R1_LINK_UP 0x00000010
+#define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4)
+#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29)
static struct pci_ops dw_pcie_ops;
@@ -115,22 +141,37 @@ int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val)
return PCIBIOS_SUCCESSFUL;
}
-static inline void dw_pcie_readl_rc(struct pcie_port *pp, u32 reg, u32 *val)
+u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg)
{
if (pp->ops->readl_rc)
- pp->ops->readl_rc(pp, pp->dbi_base + reg, val);
- else
- *val = readl(pp->dbi_base + reg);
+ return pp->ops->readl_rc(pp, reg);
+
+ return readl(pp->dbi_base + reg);
}
-static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
+void dw_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val)
{
if (pp->ops->writel_rc)
- pp->ops->writel_rc(pp, val, pp->dbi_base + reg);
+ pp->ops->writel_rc(pp, reg, val);
else
writel(val, pp->dbi_base + reg);
}
+static u32 dw_pcie_readl_unroll(struct pcie_port *pp, u32 index, u32 reg)
+{
+ u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+
+ return dw_pcie_readl_rc(pp, offset + reg);
+}
+
+static void dw_pcie_writel_unroll(struct pcie_port *pp, u32 index, u32 reg,
+ u32 val)
+{
+ u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+
+ dw_pcie_writel_rc(pp, offset + reg, val);
+}
+
static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
u32 *val)
{
@@ -152,24 +193,57 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index,
int type, u64 cpu_addr, u64 pci_addr, u32 size)
{
- u32 val;
-
- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index,
- PCIE_ATU_VIEWPORT);
- dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), PCIE_ATU_LOWER_BASE);
- dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr), PCIE_ATU_UPPER_BASE);
- dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1),
- PCIE_ATU_LIMIT);
- dw_pcie_writel_rc(pp, lower_32_bits(pci_addr), PCIE_ATU_LOWER_TARGET);
- dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET);
- dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1);
- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ u32 retries, val;
+
+ if (pp->iatu_unroll_enabled) {
+ dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LOWER_BASE,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_UPPER_BASE,
+ upper_32_bits(cpu_addr));
+ dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LIMIT,
+ lower_32_bits(cpu_addr + size - 1));
+ dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LOWER_TARGET,
+ lower_32_bits(pci_addr));
+ dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_UPPER_TARGET,
+ upper_32_bits(pci_addr));
+ dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_REGION_CTRL1,
+ type);
+ dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_REGION_CTRL2,
+ PCIE_ATU_ENABLE);
+ } else {
+ dw_pcie_writel_rc(pp, PCIE_ATU_VIEWPORT,
+ PCIE_ATU_REGION_OUTBOUND | index);
+ dw_pcie_writel_rc(pp, PCIE_ATU_LOWER_BASE,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_rc(pp, PCIE_ATU_UPPER_BASE,
+ upper_32_bits(cpu_addr));
+ dw_pcie_writel_rc(pp, PCIE_ATU_LIMIT,
+ lower_32_bits(cpu_addr + size - 1));
+ dw_pcie_writel_rc(pp, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(pci_addr));
+ dw_pcie_writel_rc(pp, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(pci_addr));
+ dw_pcie_writel_rc(pp, PCIE_ATU_CR1, type);
+ dw_pcie_writel_rc(pp, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
+ }
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
- dw_pcie_readl_rc(pp, PCIE_ATU_CR2, &val);
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+ if (pp->iatu_unroll_enabled)
+ val = dw_pcie_readl_unroll(pp, index,
+ PCIE_ATU_UNR_REGION_CTRL2);
+ else
+ val = dw_pcie_readl_rc(pp, PCIE_ATU_CR2);
+
+ if (val == PCIE_ATU_ENABLE)
+ return;
+
+ usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+ }
+ dev_err(pp->dev, "iATU is not being enabled\n");
}
static struct irq_chip dw_msi_irq_chip = {
@@ -412,7 +486,8 @@ int dw_pcie_link_up(struct pcie_port *pp)
return pp->ops->link_up(pp);
val = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
- return val & PCIE_PHY_DEBUG_R1_LINK_UP;
+ return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) &&
+ (!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)));
}
static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
@@ -428,6 +503,17 @@ static const struct irq_domain_ops msi_domain_ops = {
.map = dw_pcie_msi_map,
};
+static u8 dw_pcie_iatu_unroll_enabled(struct pcie_port *pp)
+{
+ u32 val;
+
+ val = dw_pcie_readl_rc(pp, PCIE_ATU_VIEWPORT);
+ if (val == 0xffffffff)
+ return 1;
+
+ return 0;
+}
+
int dw_pcie_host_init(struct pcie_port *pp)
{
struct device_node *np = pp->dev->of_node;
@@ -436,7 +522,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
struct resource *cfg_res;
int i, ret;
LIST_HEAD(res);
- struct resource_entry *win;
+ struct resource_entry *win, *tmp;
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (cfg_res) {
@@ -457,17 +543,20 @@ int dw_pcie_host_init(struct pcie_port *pp)
goto error;
/* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry(win, &res) {
+ resource_list_for_each_entry_safe(win, tmp, &res) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
- pp->io = win->res;
- pp->io->name = "I/O";
- pp->io_size = resource_size(pp->io);
- pp->io_bus_addr = pp->io->start - win->offset;
- ret = pci_remap_iospace(pp->io, pp->io_base);
- if (ret)
+ ret = pci_remap_iospace(win->res, pp->io_base);
+ if (ret) {
dev_warn(pp->dev, "error %d: failed to map resource %pR\n",
- ret, pp->io);
+ ret, win->res);
+ resource_list_destroy_entry(win);
+ } else {
+ pp->io = win->res;
+ pp->io->name = "I/O";
+ pp->io_size = resource_size(pp->io);
+ pp->io_bus_addr = pp->io->start - win->offset;
+ }
break;
case IORESOURCE_MEM:
pp->mem = win->res;
@@ -524,6 +613,10 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (ret)
pp->lanes = 0;
+ ret = of_property_read_u32(np, "num-viewport", &pp->num_viewport);
+ if (ret)
+ pp->num_viewport = 2;
+
if (IS_ENABLED(CONFIG_PCI_MSI)) {
if (!pp->ops->msi_host_init) {
pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
@@ -544,6 +637,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
}
}
+ pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
+
if (pp->ops->host_init)
pp->ops->host_init(pp);
@@ -609,13 +704,14 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
va_cfg_base = pp->va_cfg1_base;
}
- dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
type, cpu_addr,
busdev, cfg_size);
ret = dw_pcie_cfg_read(va_cfg_base + where, size, val);
- dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
- PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
+ if (pp->num_viewport <= 2)
+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
+ PCIE_ATU_TYPE_IO, pp->io_base,
+ pp->io_bus_addr, pp->io_size);
return ret;
}
@@ -646,19 +742,20 @@ static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
va_cfg_base = pp->va_cfg1_base;
}
- dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
type, cpu_addr,
busdev, cfg_size);
ret = dw_pcie_cfg_write(va_cfg_base + where, size, val);
- dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
- PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
+ if (pp->num_viewport <= 2)
+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
+ PCIE_ATU_TYPE_IO, pp->io_base,
+ pp->io_bus_addr, pp->io_size);
return ret;
}
-static int dw_pcie_valid_config(struct pcie_port *pp,
- struct pci_bus *bus, int dev)
+static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
+ int dev)
{
/* If there is no link, then there is no device */
if (bus->number != pp->root_bus_nr) {
@@ -670,13 +767,6 @@ static int dw_pcie_valid_config(struct pcie_port *pp,
if (bus->number == pp->root_bus_nr && dev > 0)
return 0;
- /*
- * do not read more than one device on the bus directly attached
- * to RC's (Virtual Bridge's) DS side.
- */
- if (bus->primary == pp->root_bus_nr && dev > 0)
- return 0;
-
return 1;
}
@@ -685,7 +775,7 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
{
struct pcie_port *pp = bus->sysdata;
- if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
+ if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
@@ -701,7 +791,7 @@ static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
{
struct pcie_port *pp = bus->sysdata;
- if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
+ if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
return PCIBIOS_DEVICE_NOT_FOUND;
if (bus->number == pp->root_bus_nr)
@@ -720,7 +810,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
u32 val;
/* set the number of lanes */
- dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val);
+ val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
val &= ~PORT_LINK_MODE_MASK;
switch (pp->lanes) {
case 1:
@@ -739,10 +829,10 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes);
return;
}
- dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
+ dw_pcie_writel_rc(pp, PCIE_PORT_LINK_CONTROL, val);
/* set link width speed control register */
- dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, &val);
+ val = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL);
val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
switch (pp->lanes) {
case 1:
@@ -758,40 +848,45 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
break;
}
- dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ dw_pcie_writel_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
/* setup RC BARs */
- dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
- dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
+ dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 0x00000004);
+ dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_1, 0x00000000);
/* setup interrupt pins */
- dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE, &val);
+ val = dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE);
val &= 0xffff00ff;
val |= 0x00000100;
- dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE);
+ dw_pcie_writel_rc(pp, PCI_INTERRUPT_LINE, val);
/* setup bus numbers */
- dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS, &val);
+ val = dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS);
val &= 0xff000000;
val |= 0x00010100;
- dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
+ dw_pcie_writel_rc(pp, PCI_PRIMARY_BUS, val);
/* setup command register */
- dw_pcie_readl_rc(pp, PCI_COMMAND, &val);
+ val = dw_pcie_readl_rc(pp, PCI_COMMAND);
val &= 0xffff0000;
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
- dw_pcie_writel_rc(pp, val, PCI_COMMAND);
+ dw_pcie_writel_rc(pp, PCI_COMMAND, val);
/*
* If the platform provides ->rd_other_conf, it means the platform
* uses its own address translation component rather than ATU, so
* we should not program the ATU here.
*/
- if (!pp->ops->rd_other_conf)
- dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
+ if (!pp->ops->rd_other_conf) {
+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
PCIE_ATU_TYPE_MEM, pp->mem_base,
pp->mem_bus_addr, pp->mem_size);
+ if (pp->num_viewport > 2)
+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX2,
+ PCIE_ATU_TYPE_IO, pp->io_base,
+ pp->io_bus_addr, pp->io_size);
+ }
dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
@@ -802,7 +897,3 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
val |= PORT_LOGIC_SPEED_CHANGE;
dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
}
-
-MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
-MODULE_DESCRIPTION("Designware PCIe host controller driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index f437f9b5be04..a567ea288ee2 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -22,11 +22,6 @@
#define MAX_MSI_IRQS 32
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32)
-/* Parameters for the waiting for link up routine */
-#define LINK_WAIT_MAX_RETRIES 10
-#define LINK_WAIT_USLEEP_MIN 90000
-#define LINK_WAIT_USLEEP_MAX 100000
-
struct pcie_port {
struct device *dev;
u8 root_bus_nr;
@@ -49,18 +44,18 @@ struct pcie_port {
struct resource *busn;
int irq;
u32 lanes;
+ u32 num_viewport;
struct pcie_host_ops *ops;
int msi_irq;
struct irq_domain *irq_domain;
unsigned long msi_data;
+ u8 iatu_unroll_enabled;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
struct pcie_host_ops {
- void (*readl_rc)(struct pcie_port *pp,
- void __iomem *dbi_base, u32 *val);
- void (*writel_rc)(struct pcie_port *pp,
- u32 val, void __iomem *dbi_base);
+ u32 (*readl_rc)(struct pcie_port *pp, u32 reg);
+ void (*writel_rc)(struct pcie_port *pp, u32 reg, u32 val);
int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
@@ -77,6 +72,8 @@ struct pcie_host_ops {
int (*msi_host_init)(struct pcie_port *pp, struct msi_controller *chip);
};
+u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg);
+void dw_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val);
int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val);
int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val);
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c
index 7ee9dfcc45fb..56154c25980c 100644
--- a/drivers/pci/host/pcie-hisi.c
+++ b/drivers/pci/host/pcie-hisi.c
@@ -22,51 +22,38 @@
#include "pcie-designware.h"
-#define PCIE_LTSSM_LINKUP_STATE 0x11
-#define PCIE_LTSSM_STATE_MASK 0x3F
-#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818
-#define PCIE_SYS_STATE4 0x31c
-#define PCIE_HIP06_CTRL_OFF 0x1000
+#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818
+#define PCIE_HIP06_CTRL_OFF 0x1000
+#define PCIE_SYS_STATE4 (PCIE_HIP06_CTRL_OFF + 0x31c)
+#define PCIE_LTSSM_LINKUP_STATE 0x11
+#define PCIE_LTSSM_STATE_MASK 0x3F
#define to_hisi_pcie(x) container_of(x, struct hisi_pcie, pp)
struct hisi_pcie;
struct pcie_soc_ops {
- int (*hisi_pcie_link_up)(struct hisi_pcie *pcie);
+ int (*hisi_pcie_link_up)(struct hisi_pcie *hisi_pcie);
};
struct hisi_pcie {
+ struct pcie_port pp; /* pp.dbi_base is DT rc_dbi */
struct regmap *subctrl;
- void __iomem *reg_base;
u32 port_id;
- struct pcie_port pp;
struct pcie_soc_ops *soc_ops;
};
-static inline void hisi_pcie_apb_writel(struct hisi_pcie *pcie,
- u32 val, u32 reg)
-{
- writel(val, pcie->reg_base + reg);
-}
-
-static inline u32 hisi_pcie_apb_readl(struct hisi_pcie *pcie, u32 reg)
-{
- return readl(pcie->reg_base + reg);
-}
-
/* HipXX PCIe host only supports 32-bit config access */
static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size,
u32 *val)
{
u32 reg;
u32 reg_val;
- struct hisi_pcie *pcie = to_hisi_pcie(pp);
void *walker = &reg_val;
walker += (where & 0x3);
reg = where & ~0x3;
- reg_val = hisi_pcie_apb_readl(pcie, reg);
+ reg_val = dw_pcie_readl_rc(pp, reg);
if (size == 1)
*val = *(u8 __force *) walker;
@@ -86,21 +73,20 @@ static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size,
{
u32 reg_val;
u32 reg;
- struct hisi_pcie *pcie = to_hisi_pcie(pp);
void *walker = &reg_val;
walker += (where & 0x3);
reg = where & ~0x3;
if (size == 4)
- hisi_pcie_apb_writel(pcie, val, reg);
+ dw_pcie_writel_rc(pp, reg, val);
else if (size == 2) {
- reg_val = hisi_pcie_apb_readl(pcie, reg);
+ reg_val = dw_pcie_readl_rc(pp, reg);
*(u16 __force *) walker = val;
- hisi_pcie_apb_writel(pcie, reg_val, reg);
+ dw_pcie_writel_rc(pp, reg, reg_val);
} else if (size == 1) {
- reg_val = hisi_pcie_apb_readl(pcie, reg);
+ reg_val = dw_pcie_readl_rc(pp, reg);
*(u8 __force *) walker = val;
- hisi_pcie_apb_writel(pcie, reg_val, reg);
+ dw_pcie_writel_rc(pp, reg, reg_val);
} else
return PCIBIOS_BAD_REGISTER_NUMBER;
@@ -119,10 +105,10 @@ static int hisi_pcie_link_up_hip05(struct hisi_pcie *hisi_pcie)
static int hisi_pcie_link_up_hip06(struct hisi_pcie *hisi_pcie)
{
+ struct pcie_port *pp = &hisi_pcie->pp;
u32 val;
- val = hisi_pcie_apb_readl(hisi_pcie, PCIE_HIP06_CTRL_OFF +
- PCIE_SYS_STATE4);
+ val = dw_pcie_readl_rc(pp, PCIE_SYS_STATE4);
return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE);
}
@@ -140,19 +126,20 @@ static struct pcie_host_ops hisi_pcie_host_ops = {
.link_up = hisi_pcie_link_up,
};
-static int hisi_add_pcie_port(struct pcie_port *pp,
- struct platform_device *pdev)
+static int hisi_add_pcie_port(struct hisi_pcie *hisi_pcie,
+ struct platform_device *pdev)
{
+ struct pcie_port *pp = &hisi_pcie->pp;
+ struct device *dev = pp->dev;
int ret;
u32 port_id;
- struct hisi_pcie *hisi_pcie = to_hisi_pcie(pp);
- if (of_property_read_u32(pdev->dev.of_node, "port-id", &port_id)) {
- dev_err(&pdev->dev, "failed to read port-id\n");
+ if (of_property_read_u32(dev->of_node, "port-id", &port_id)) {
+ dev_err(dev, "failed to read port-id\n");
return -EINVAL;
}
if (port_id > 3) {
- dev_err(&pdev->dev, "Invalid port-id: %d\n", port_id);
+ dev_err(dev, "Invalid port-id: %d\n", port_id);
return -EINVAL;
}
hisi_pcie->port_id = port_id;
@@ -161,7 +148,7 @@ static int hisi_add_pcie_port(struct pcie_port *pp,
ret = dw_pcie_host_init(pp);
if (ret) {
- dev_err(&pdev->dev, "failed to initialize host\n");
+ dev_err(dev, "failed to initialize host\n");
return ret;
}
@@ -170,6 +157,7 @@ static int hisi_add_pcie_port(struct pcie_port *pp,
static int hisi_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct hisi_pcie *hisi_pcie;
struct pcie_port *pp;
const struct of_device_id *match;
@@ -177,40 +165,36 @@ static int hisi_pcie_probe(struct platform_device *pdev)
struct device_driver *driver;
int ret;
- hisi_pcie = devm_kzalloc(&pdev->dev, sizeof(*hisi_pcie), GFP_KERNEL);
+ hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL);
if (!hisi_pcie)
return -ENOMEM;
pp = &hisi_pcie->pp;
- pp->dev = &pdev->dev;
- driver = (pdev->dev).driver;
+ pp->dev = dev;
+ driver = dev->driver;
- match = of_match_device(driver->of_match_table, &pdev->dev);
+ match = of_match_device(driver->of_match_table, dev);
hisi_pcie->soc_ops = (struct pcie_soc_ops *) match->data;
hisi_pcie->subctrl =
syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl");
if (IS_ERR(hisi_pcie->subctrl)) {
- dev_err(pp->dev, "cannot get subctrl base\n");
+ dev_err(dev, "cannot get subctrl base\n");
return PTR_ERR(hisi_pcie->subctrl);
}
reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
- hisi_pcie->reg_base = devm_ioremap_resource(&pdev->dev, reg);
- if (IS_ERR(hisi_pcie->reg_base)) {
- dev_err(pp->dev, "cannot get rc_dbi base\n");
- return PTR_ERR(hisi_pcie->reg_base);
+ pp->dbi_base = devm_ioremap_resource(dev, reg);
+ if (IS_ERR(pp->dbi_base)) {
+ dev_err(dev, "cannot get rc_dbi base\n");
+ return PTR_ERR(pp->dbi_base);
}
- hisi_pcie->pp.dbi_base = hisi_pcie->reg_base;
-
- ret = hisi_add_pcie_port(pp, pdev);
+ ret = hisi_add_pcie_port(hisi_pcie, pdev);
if (ret)
return ret;
- platform_set_drvdata(pdev, hisi_pcie);
-
- dev_warn(pp->dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
+ dev_warn(dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
return 0;
}
diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c
index 0d7bee4a0d26..8ce089043a27 100644
--- a/drivers/pci/host/pcie-iproc-bcma.c
+++ b/drivers/pci/host/pcie-iproc-bcma.c
@@ -42,19 +42,24 @@ static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
{
+ struct device *dev = &bdev->dev;
struct iproc_pcie *pcie;
LIST_HEAD(res);
struct resource res_mem;
int ret;
- pcie = devm_kzalloc(&bdev->dev, sizeof(*pcie), GFP_KERNEL);
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
- pcie->dev = &bdev->dev;
- bcma_set_drvdata(bdev, pcie);
+ pcie->dev = dev;
pcie->base = bdev->io_addr;
+ if (!pcie->base) {
+ dev_err(dev, "no controller registers\n");
+ return -ENOMEM;
+ }
+
pcie->base_addr = bdev->addr;
res_mem.start = bdev->addr_s[0];
@@ -67,10 +72,11 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
ret = iproc_pcie_setup(pcie, &res);
if (ret)
- dev_err(pcie->dev, "PCIe controller setup failed\n");
+ dev_err(dev, "PCIe controller setup failed\n");
pci_free_resource_list(&res);
+ bcma_set_drvdata(bdev, pcie);
return ret;
}
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index 1738c5288eb6..a3de087976b3 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -40,35 +40,35 @@ MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table);
static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
const struct of_device_id *of_id;
struct iproc_pcie *pcie;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev->of_node;
struct resource reg;
resource_size_t iobase = 0;
LIST_HEAD(res);
int ret;
- of_id = of_match_device(iproc_pcie_of_match_table, &pdev->dev);
+ of_id = of_match_device(iproc_pcie_of_match_table, dev);
if (!of_id)
return -EINVAL;
- pcie = devm_kzalloc(&pdev->dev, sizeof(struct iproc_pcie), GFP_KERNEL);
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
- pcie->dev = &pdev->dev;
+ pcie->dev = dev;
pcie->type = (enum iproc_pcie_type)of_id->data;
- platform_set_drvdata(pdev, pcie);
ret = of_address_to_resource(np, 0, &reg);
if (ret < 0) {
- dev_err(pcie->dev, "unable to obtain controller resources\n");
+ dev_err(dev, "unable to obtain controller resources\n");
return ret;
}
- pcie->base = devm_ioremap(pcie->dev, reg.start, resource_size(&reg));
+ pcie->base = devm_ioremap(dev, reg.start, resource_size(&reg));
if (!pcie->base) {
- dev_err(pcie->dev, "unable to map controller registers\n");
+ dev_err(dev, "unable to map controller registers\n");
return -ENOMEM;
}
pcie->base_addr = reg.start;
@@ -79,7 +79,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "brcm,pcie-ob-axi-offset",
&val);
if (ret) {
- dev_err(pcie->dev,
+ dev_err(dev,
"missing brcm,pcie-ob-axi-offset property\n");
return ret;
}
@@ -88,7 +88,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
ret = of_property_read_u32(np, "brcm,pcie-ob-window-size",
&val);
if (ret) {
- dev_err(pcie->dev,
+ dev_err(dev,
"missing brcm,pcie-ob-window-size property\n");
return ret;
}
@@ -101,7 +101,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
}
/* PHY use is optional */
- pcie->phy = devm_phy_get(&pdev->dev, "pcie-phy");
+ pcie->phy = devm_phy_get(dev, "pcie-phy");
if (IS_ERR(pcie->phy)) {
if (PTR_ERR(pcie->phy) == -EPROBE_DEFER)
return -EPROBE_DEFER;
@@ -110,7 +110,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &iobase);
if (ret) {
- dev_err(pcie->dev,
+ dev_err(dev,
"unable to get PCI host bridge resources\n");
return ret;
}
@@ -119,10 +119,11 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
ret = iproc_pcie_setup(pcie, &res);
if (ret)
- dev_err(pcie->dev, "PCIe controller setup failed\n");
+ dev_err(dev, "PCIe controller setup failed\n");
pci_free_resource_list(&res);
+ platform_set_drvdata(pdev, pcie);
return ret;
}
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index e167b2f0098d..0b999a9fb843 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -63,6 +63,8 @@
#define OARR_SIZE_CFG_SHIFT 1
#define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT)
+#define PCI_EXP_CAP 0xac
+
#define MAX_NUM_OB_WINDOWS 2
#define IPROC_PCIE_REG_INVALID 0xffff
@@ -258,9 +260,10 @@ static void iproc_pcie_reset(struct iproc_pcie *pcie)
static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
{
+ struct device *dev = pcie->dev;
u8 hdr_type;
u32 link_ctrl, class, val;
- u16 pos, link_status;
+ u16 pos = PCI_EXP_CAP, link_status;
bool link_is_active = false;
/*
@@ -272,14 +275,14 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS);
if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) {
- dev_err(pcie->dev, "PHY or data link is INACTIVE!\n");
+ dev_err(dev, "PHY or data link is INACTIVE!\n");
return -ENODEV;
}
/* make sure we are not in EP mode */
pci_bus_read_config_byte(bus, 0, PCI_HEADER_TYPE, &hdr_type);
if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) {
- dev_err(pcie->dev, "in EP mode, hdr=%#02x\n", hdr_type);
+ dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type);
return -EFAULT;
}
@@ -293,30 +296,27 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
pci_bus_write_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, class);
/* check link status to see if link is active */
- pos = pci_bus_find_capability(bus, 0, PCI_CAP_ID_EXP);
pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, &link_status);
if (link_status & PCI_EXP_LNKSTA_NLW)
link_is_active = true;
if (!link_is_active) {
/* try GEN 1 link speed */
-#define PCI_LINK_STATUS_CTRL_2_OFFSET 0x0dc
#define PCI_TARGET_LINK_SPEED_MASK 0xf
#define PCI_TARGET_LINK_SPEED_GEN2 0x2
#define PCI_TARGET_LINK_SPEED_GEN1 0x1
pci_bus_read_config_dword(bus, 0,
- PCI_LINK_STATUS_CTRL_2_OFFSET,
+ pos + PCI_EXP_LNKCTL2,
&link_ctrl);
if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) ==
PCI_TARGET_LINK_SPEED_GEN2) {
link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK;
link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1;
pci_bus_write_config_dword(bus, 0,
- PCI_LINK_STATUS_CTRL_2_OFFSET,
+ pos + PCI_EXP_LNKCTL2,
link_ctrl);
msleep(100);
- pos = pci_bus_find_capability(bus, 0, PCI_CAP_ID_EXP);
pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA,
&link_status);
if (link_status & PCI_EXP_LNKSTA_NLW)
@@ -324,7 +324,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
}
}
- dev_info(pcie->dev, "link: %s\n", link_is_active ? "UP" : "DOWN");
+ dev_info(dev, "link: %s\n", link_is_active ? "UP" : "DOWN");
return link_is_active ? 0 : -ENODEV;
}
@@ -349,12 +349,13 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
u64 pci_addr, resource_size_t size)
{
struct iproc_pcie_ob *ob = &pcie->ob;
+ struct device *dev = pcie->dev;
unsigned i;
u64 max_size = (u64)ob->window_size * MAX_NUM_OB_WINDOWS;
u64 remainder;
if (size > max_size) {
- dev_err(pcie->dev,
+ dev_err(dev,
"res size %pap exceeds max supported size 0x%llx\n",
&size, max_size);
return -EINVAL;
@@ -362,15 +363,14 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
div64_u64_rem(size, ob->window_size, &remainder);
if (remainder) {
- dev_err(pcie->dev,
+ dev_err(dev,
"res size %pap needs to be multiple of window size %pap\n",
&size, &ob->window_size);
return -EINVAL;
}
if (axi_addr < ob->axi_offset) {
- dev_err(pcie->dev,
- "axi address %pap less than offset %pap\n",
+ dev_err(dev, "axi address %pap less than offset %pap\n",
&axi_addr, &ob->axi_offset);
return -EINVAL;
}
@@ -406,6 +406,7 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
struct list_head *resources)
{
+ struct device *dev = pcie->dev;
struct resource_entry *window;
int ret;
@@ -425,7 +426,7 @@ static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
return ret;
break;
default:
- dev_err(pcie->dev, "invalid resource %pR\n", res);
+ dev_err(dev, "invalid resource %pR\n", res);
return -EINVAL;
}
}
@@ -455,26 +456,25 @@ static void iproc_pcie_msi_disable(struct iproc_pcie *pcie)
int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
{
+ struct device *dev;
int ret;
void *sysdata;
struct pci_bus *bus;
- if (!pcie || !pcie->dev || !pcie->base)
- return -EINVAL;
-
- ret = devm_request_pci_bus_resources(pcie->dev, res);
+ dev = pcie->dev;
+ ret = devm_request_pci_bus_resources(dev, res);
if (ret)
return ret;
ret = phy_init(pcie->phy);
if (ret) {
- dev_err(pcie->dev, "unable to initialize PCIe PHY\n");
+ dev_err(dev, "unable to initialize PCIe PHY\n");
return ret;
}
ret = phy_power_on(pcie->phy);
if (ret) {
- dev_err(pcie->dev, "unable to power on PCIe PHY\n");
+ dev_err(dev, "unable to power on PCIe PHY\n");
goto err_exit_phy;
}
@@ -486,7 +486,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
pcie->reg_offsets = iproc_pcie_reg_paxc;
break;
default:
- dev_err(pcie->dev, "incompatible iProc PCIe interface\n");
+ dev_err(dev, "incompatible iProc PCIe interface\n");
ret = -EINVAL;
goto err_power_off_phy;
}
@@ -496,7 +496,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
if (pcie->need_ob_cfg) {
ret = iproc_pcie_map_ranges(pcie, res);
if (ret) {
- dev_err(pcie->dev, "map failed\n");
+ dev_err(dev, "map failed\n");
goto err_power_off_phy;
}
}
@@ -508,9 +508,9 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
sysdata = pcie;
#endif
- bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops, sysdata, res);
+ bus = pci_create_root_bus(dev, 0, &iproc_pcie_ops, sysdata, res);
if (!bus) {
- dev_err(pcie->dev, "unable to create PCI root bus\n");
+ dev_err(dev, "unable to create PCI root bus\n");
ret = -ENOMEM;
goto err_power_off_phy;
}
@@ -518,7 +518,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
ret = iproc_pcie_check_link(pcie, bus);
if (ret) {
- dev_err(pcie->dev, "no PCIe EP device detected\n");
+ dev_err(dev, "no PCIe EP device detected\n");
goto err_rm_root_bus;
}
@@ -526,7 +526,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
if (IS_ENABLED(CONFIG_PCI_MSI))
if (iproc_pcie_msi_enable(pcie))
- dev_info(pcie->dev, "not using iProc MSI\n");
+ dev_info(dev, "not using iProc MSI\n");
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
diff --git a/drivers/pci/host/pcie-qcom.c b/drivers/pci/host/pcie-qcom.c
index f2f90c50f75d..ef0a84c7a588 100644
--- a/drivers/pci/host/pcie-qcom.c
+++ b/drivers/pci/host/pcie-qcom.c
@@ -1,7 +1,11 @@
/*
+ * Qualcomm PCIe root complex driver
+ *
* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
* Copyright 2015 Linaro Limited.
*
+ * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
@@ -19,7 +23,7 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pci.h>
@@ -82,12 +86,10 @@ struct qcom_pcie_ops {
};
struct qcom_pcie {
- struct pcie_port pp;
- struct device *dev;
+ struct pcie_port pp; /* pp.dbi_base is DT dbi */
+ void __iomem *parf; /* DT parf */
+ void __iomem *elbi; /* DT elbi */
union qcom_pcie_resources res;
- void __iomem *parf;
- void __iomem *dbi;
- void __iomem *elbi;
struct phy *phy;
struct gpio_desc *reset;
struct qcom_pcie_ops *ops;
@@ -132,7 +134,7 @@ static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
- struct device *dev = pcie->dev;
+ struct device *dev = pcie->pp.dev;
res->vdda = devm_regulator_get(dev, "vdda");
if (IS_ERR(res->vdda))
@@ -184,7 +186,7 @@ static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
- struct device *dev = pcie->dev;
+ struct device *dev = pcie->pp.dev;
res->vdda = devm_regulator_get(dev, "vdda");
if (IS_ERR(res->vdda))
@@ -233,7 +235,7 @@ static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie)
static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
- struct device *dev = pcie->dev;
+ struct device *dev = pcie->pp.dev;
u32 val;
int ret;
@@ -355,7 +357,7 @@ static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
static int qcom_pcie_init_v1(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
- struct device *dev = pcie->dev;
+ struct device *dev = pcie->pp.dev;
int ret;
ret = reset_control_deassert(res->core);
@@ -422,7 +424,7 @@ err_res:
static int qcom_pcie_link_up(struct pcie_port *pp)
{
struct qcom_pcie *pcie = to_qcom_pcie(pp);
- u16 val = readw(pcie->dbi + PCIE20_CAP + PCI_EXP_LNKSTA);
+ u16 val = readw(pcie->pp.dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
@@ -505,8 +507,8 @@ static int qcom_pcie_probe(struct platform_device *pdev)
if (!pcie)
return -ENOMEM;
+ pp = &pcie->pp;
pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev);
- pcie->dev = dev;
pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
if (IS_ERR(pcie->reset))
@@ -518,9 +520,9 @@ static int qcom_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pcie->parf);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pcie->dbi = devm_ioremap_resource(dev, res);
- if (IS_ERR(pcie->dbi))
- return PTR_ERR(pcie->dbi);
+ pp->dbi_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pp->dbi_base))
+ return PTR_ERR(pp->dbi_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
pcie->elbi = devm_ioremap_resource(dev, res);
@@ -535,9 +537,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- pp = &pcie->pp;
pp->dev = dev;
- pp->dbi_base = pcie->dbi;
pp->root_bus_nr = -1;
pp->ops = &qcom_pcie_dw_ops;
@@ -565,20 +565,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
return ret;
}
- platform_set_drvdata(pdev, pcie);
-
- return 0;
-}
-
-static int qcom_pcie_remove(struct platform_device *pdev)
-{
- struct qcom_pcie *pcie = platform_get_drvdata(pdev);
-
- qcom_ep_reset_assert(pcie);
- phy_power_off(pcie->phy);
- phy_exit(pcie->phy);
- pcie->ops->deinit(pcie);
-
return 0;
}
@@ -588,19 +574,13 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
{ }
};
-MODULE_DEVICE_TABLE(of, qcom_pcie_match);
static struct platform_driver qcom_pcie_driver = {
.probe = qcom_pcie_probe,
- .remove = qcom_pcie_remove,
.driver = {
.name = "qcom-pcie",
+ .suppress_bind_attrs = true,
.of_match_table = qcom_pcie_match,
},
};
-
-module_platform_driver(qcom_pcie_driver);
-
-MODULE_AUTHOR("Stanimir Varbanov <svarbanov@mm-sol.com>");
-MODULE_DESCRIPTION("Qualcomm PCIe root complex driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(qcom_pcie_driver);
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 65db7a221509..62700d1896f4 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -31,8 +31,6 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
-#define DRV_NAME "rcar-pcie"
-
#define PCIECAR 0x000010
#define PCIECCTLR 0x000018
#define CONFIG_SEND_ENABLE (1 << 31)
@@ -84,8 +82,18 @@
#define IDSETR1 0x011004
#define TLCTLR 0x011048
#define MACSR 0x011054
+#define SPCHGFIN (1 << 4)
+#define SPCHGFAIL (1 << 6)
+#define SPCHGSUC (1 << 7)
+#define LINK_SPEED (0xf << 16)
+#define LINK_SPEED_2_5GTS (1 << 16)
+#define LINK_SPEED_5_0GTS (2 << 16)
#define MACCTLR 0x011058
+#define SPEED_CHANGE (1 << 24)
#define SCRAMBLE_DISABLE (1 << 27)
+#define MACS2R 0x011078
+#define MACCGSPSETR 0x011084
+#define SPCNGRSN (1 << 31)
/* R-Car H1 PHY */
#define H1_PCIEPHYADRR 0x04000c
@@ -385,24 +393,82 @@ static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci)
return 1;
}
+static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ unsigned int timeout = 1000;
+ u32 macsr;
+
+ if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
+ return;
+
+ if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
+ dev_err(dev, "Speed change already in progress\n");
+ return;
+ }
+
+ macsr = rcar_pci_read_reg(pcie, MACSR);
+ if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
+ goto done;
+
+ /* Set target link speed to 5.0 GT/s */
+ rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
+ PCI_EXP_LNKSTA_CLS_5_0GB);
+
+ /* Set speed change reason as intentional factor */
+ rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
+
+ /* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
+ if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
+ rcar_pci_write_reg(pcie, macsr, MACSR);
+
+ /* Start link speed change */
+ rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
+
+ while (timeout--) {
+ macsr = rcar_pci_read_reg(pcie, MACSR);
+ if (macsr & SPCHGFIN) {
+ /* Clear the interrupt bits */
+ rcar_pci_write_reg(pcie, macsr, MACSR);
+
+ if (macsr & SPCHGFAIL)
+ dev_err(dev, "Speed change failed\n");
+
+ goto done;
+ }
+
+ msleep(1);
+ };
+
+ dev_err(dev, "Speed change timed out\n");
+
+done:
+ dev_info(dev, "Current link speed is %s GT/s\n",
+ (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
+}
+
static int rcar_pcie_enable(struct rcar_pcie *pcie)
{
+ struct device *dev = pcie->dev;
struct pci_bus *bus, *child;
LIST_HEAD(res);
+ /* Try setting 5 GT/s link speed */
+ rcar_pcie_force_speedup(pcie);
+
rcar_pcie_setup(&res, pcie);
pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
if (IS_ENABLED(CONFIG_PCI_MSI))
- bus = pci_scan_root_bus_msi(pcie->dev, pcie->root_bus_nr,
+ bus = pci_scan_root_bus_msi(dev, pcie->root_bus_nr,
&rcar_pcie_ops, pcie, &res, &pcie->msi.chip);
else
- bus = pci_scan_root_bus(pcie->dev, pcie->root_bus_nr,
+ bus = pci_scan_root_bus(dev, pcie->root_bus_nr,
&rcar_pcie_ops, pcie, &res);
if (!bus) {
- dev_err(pcie->dev, "Scanning rootbus failed");
+ dev_err(dev, "Scanning rootbus failed");
return -ENODEV;
}
@@ -421,6 +487,7 @@ static int rcar_pcie_enable(struct rcar_pcie *pcie)
static int phy_wait_for_ack(struct rcar_pcie *pcie)
{
+ struct device *dev = pcie->dev;
unsigned int timeout = 100;
while (timeout--) {
@@ -430,7 +497,7 @@ static int phy_wait_for_ack(struct rcar_pcie *pcie)
udelay(100);
}
- dev_err(pcie->dev, "Access to PCIe phy timed out\n");
+ dev_err(dev, "Access to PCIe phy timed out\n");
return -ETIMEDOUT;
}
@@ -608,6 +675,18 @@ static int rcar_msi_alloc(struct rcar_msi *chip)
return msi;
}
+static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs)
+{
+ int msi;
+
+ mutex_lock(&chip->lock);
+ msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR,
+ order_base_2(no_irqs));
+ mutex_unlock(&chip->lock);
+
+ return msi;
+}
+
static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
{
mutex_lock(&chip->lock);
@@ -619,6 +698,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
{
struct rcar_pcie *pcie = data;
struct rcar_msi *msi = &pcie->msi;
+ struct device *dev = pcie->dev;
unsigned long reg;
reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
@@ -639,10 +719,10 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
if (test_bit(index, msi->used))
generic_handle_irq(irq);
else
- dev_info(pcie->dev, "unhandled MSI\n");
+ dev_info(dev, "unhandled MSI\n");
} else {
/* Unknown MSI, just clear it */
- dev_dbg(pcie->dev, "unexpected MSI\n");
+ dev_dbg(dev, "unexpected MSI\n");
}
/* see if there's any more pending in this vector */
@@ -665,7 +745,7 @@ static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
if (hwirq < 0)
return hwirq;
- irq = irq_create_mapping(msi->domain, hwirq);
+ irq = irq_find_mapping(msi->domain, hwirq);
if (!irq) {
rcar_msi_free(msi, hwirq);
return -EINVAL;
@@ -682,6 +762,58 @@ static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
return 0;
}
+static int rcar_msi_setup_irqs(struct msi_controller *chip,
+ struct pci_dev *pdev, int nvec, int type)
+{
+ struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip);
+ struct rcar_msi *msi = to_rcar_msi(chip);
+ struct msi_desc *desc;
+ struct msi_msg msg;
+ unsigned int irq;
+ int hwirq;
+ int i;
+
+ /* MSI-X interrupts are not supported */
+ if (type == PCI_CAP_ID_MSIX)
+ return -EINVAL;
+
+ WARN_ON(!list_is_singular(&pdev->dev.msi_list));
+ desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
+
+ hwirq = rcar_msi_alloc_region(msi, nvec);
+ if (hwirq < 0)
+ return -ENOSPC;
+
+ irq = irq_find_mapping(msi->domain, hwirq);
+ if (!irq)
+ return -ENOSPC;
+
+ for (i = 0; i < nvec; i++) {
+ /*
+ * irq_create_mapping() called from rcar_pcie_probe() pre-
+ * allocates descs, so there is no need to allocate descs here.
+ * We can therefore assume that if irq_find_mapping() above
+ * returns non-zero, then the descs are also successfully
+ * allocated.
+ */
+ if (irq_set_msi_desc_off(irq, i, desc)) {
+ /* TODO: clear */
+ return -EINVAL;
+ }
+ }
+
+ desc->nvec_used = nvec;
+ desc->msi_attrib.multiple = order_base_2(nvec);
+
+ msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
+ msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
+ msg.data = hwirq;
+
+ pci_write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
{
struct rcar_msi *msi = to_rcar_msi(chip);
@@ -713,38 +845,42 @@ static const struct irq_domain_ops msi_domain_ops = {
static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
{
- struct platform_device *pdev = to_platform_device(pcie->dev);
+ struct device *dev = pcie->dev;
struct rcar_msi *msi = &pcie->msi;
unsigned long base;
- int err;
+ int err, i;
mutex_init(&msi->lock);
- msi->chip.dev = pcie->dev;
+ msi->chip.dev = dev;
msi->chip.setup_irq = rcar_msi_setup_irq;
+ msi->chip.setup_irqs = rcar_msi_setup_irqs;
msi->chip.teardown_irq = rcar_msi_teardown_irq;
- msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
+ msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
&msi_domain_ops, &msi->chip);
if (!msi->domain) {
- dev_err(&pdev->dev, "failed to create IRQ domain\n");
+ dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
+ for (i = 0; i < INT_PCI_MSI_NR; i++)
+ irq_create_mapping(msi->domain, i);
+
/* Two irqs are for MSI, but they are also used for non-MSI irqs */
- err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
+ err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
rcar_msi_irq_chip.name, pcie);
if (err < 0) {
- dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ dev_err(dev, "failed to request IRQ: %d\n", err);
goto err;
}
- err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq,
+ err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
rcar_msi_irq_chip.name, pcie);
if (err < 0) {
- dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ dev_err(dev, "failed to request IRQ: %d\n", err);
goto err;
}
@@ -765,57 +901,55 @@ err:
return err;
}
-static int rcar_pcie_get_resources(struct platform_device *pdev,
- struct rcar_pcie *pcie)
+static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
{
+ struct device *dev = pcie->dev;
struct resource res;
int err, i;
- err = of_address_to_resource(pdev->dev.of_node, 0, &res);
+ err = of_address_to_resource(dev->of_node, 0, &res);
if (err)
return err;
- pcie->clk = devm_clk_get(&pdev->dev, "pcie");
+ pcie->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+ pcie->clk = devm_clk_get(dev, "pcie");
if (IS_ERR(pcie->clk)) {
- dev_err(pcie->dev, "cannot get platform clock\n");
+ dev_err(dev, "cannot get platform clock\n");
return PTR_ERR(pcie->clk);
}
err = clk_prepare_enable(pcie->clk);
if (err)
- goto fail_clk;
+ return err;
- pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
+ pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
if (IS_ERR(pcie->bus_clk)) {
- dev_err(pcie->dev, "cannot get pcie bus clock\n");
+ dev_err(dev, "cannot get pcie bus clock\n");
err = PTR_ERR(pcie->bus_clk);
goto fail_clk;
}
err = clk_prepare_enable(pcie->bus_clk);
if (err)
- goto err_map_reg;
+ goto fail_clk;
- i = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ i = irq_of_parse_and_map(dev->of_node, 0);
if (!i) {
- dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
+ dev_err(dev, "cannot get platform resources for msi interrupt\n");
err = -ENOENT;
goto err_map_reg;
}
pcie->msi.irq1 = i;
- i = irq_of_parse_and_map(pdev->dev.of_node, 1);
+ i = irq_of_parse_and_map(dev->of_node, 1);
if (!i) {
- dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
+ dev_err(dev, "cannot get platform resources for msi interrupt\n");
err = -ENOENT;
goto err_map_reg;
}
pcie->msi.irq2 = i;
- pcie->base = devm_ioremap_resource(&pdev->dev, &res);
- if (IS_ERR(pcie->base)) {
- err = PTR_ERR(pcie->base);
- goto err_map_reg;
- }
-
return 0;
err_map_reg:
@@ -865,12 +999,16 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
* Set up 64-bit inbound regions as the range parser doesn't
* distinguish between 32 and 64-bit types.
*/
- rcar_pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx));
+ rcar_pci_write_reg(pcie, lower_32_bits(pci_addr),
+ PCIEPRAR(idx));
rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
- rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags, PCIELAMR(idx));
+ rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags,
+ PCIELAMR(idx));
- rcar_pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx+1));
- rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx+1));
+ rcar_pci_write_reg(pcie, upper_32_bits(pci_addr),
+ PCIEPRAR(idx + 1));
+ rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr),
+ PCIELAR(idx + 1));
rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1));
pci_addr += size;
@@ -919,6 +1057,7 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
/* Get the dma-ranges from DT */
for_each_of_pci_range(&parser, &range) {
u64 end = range.cpu_addr + range.size - 1;
+
dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
range.flags, range.cpu_addr, end, range.pci_addr);
@@ -932,9 +1071,12 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
static const struct of_device_id rcar_pcie_of_match[] = {
{ .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 },
- { .compatible = "renesas,pcie-rcar-gen2", .data = rcar_pcie_hw_init_gen2 },
- { .compatible = "renesas,pcie-r8a7790", .data = rcar_pcie_hw_init_gen2 },
- { .compatible = "renesas,pcie-r8a7791", .data = rcar_pcie_hw_init_gen2 },
+ { .compatible = "renesas,pcie-rcar-gen2",
+ .data = rcar_pcie_hw_init_gen2 },
+ { .compatible = "renesas,pcie-r8a7790",
+ .data = rcar_pcie_hw_init_gen2 },
+ { .compatible = "renesas,pcie-r8a7791",
+ .data = rcar_pcie_hw_init_gen2 },
{ .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init },
{},
};
@@ -945,9 +1087,10 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
resource_size_t iobase;
- struct resource_entry *win;
+ struct resource_entry *win, *tmp;
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources, &iobase);
+ err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources,
+ &iobase);
if (err)
return err;
@@ -955,14 +1098,17 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
if (err)
goto out_release_res;
- resource_list_for_each_entry(win, &pci->resources) {
+ resource_list_for_each_entry_safe(win, tmp, &pci->resources) {
struct resource *res = win->res;
if (resource_type(res) == IORESOURCE_IO) {
err = pci_remap_iospace(res, iobase);
- if (err)
+ if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
+
+ resource_list_destroy_entry(win);
+ }
}
}
@@ -975,60 +1121,60 @@ out_release_res:
static int rcar_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct rcar_pcie *pcie;
unsigned int data;
const struct of_device_id *of_id;
int err;
int (*hw_init_fn)(struct rcar_pcie *);
- pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
- pcie->dev = &pdev->dev;
- platform_set_drvdata(pdev, pcie);
+ pcie->dev = dev;
INIT_LIST_HEAD(&pcie->resources);
rcar_pcie_parse_request_of_pci_ranges(pcie);
- err = rcar_pcie_get_resources(pdev, pcie);
+ err = rcar_pcie_get_resources(pcie);
if (err < 0) {
- dev_err(&pdev->dev, "failed to request resources: %d\n", err);
+ dev_err(dev, "failed to request resources: %d\n", err);
return err;
}
- err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node);
- if (err)
+ err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
+ if (err)
return err;
- of_id = of_match_device(rcar_pcie_of_match, pcie->dev);
+ of_id = of_match_device(rcar_pcie_of_match, dev);
if (!of_id || !of_id->data)
return -EINVAL;
hw_init_fn = of_id->data;
- pm_runtime_enable(pcie->dev);
- err = pm_runtime_get_sync(pcie->dev);
+ pm_runtime_enable(dev);
+ err = pm_runtime_get_sync(dev);
if (err < 0) {
- dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
+ dev_err(dev, "pm_runtime_get_sync failed\n");
goto err_pm_disable;
}
/* Failure to get a link might just be that no cards are inserted */
err = hw_init_fn(pcie);
if (err) {
- dev_info(&pdev->dev, "PCIe link down\n");
+ dev_info(dev, "PCIe link down\n");
err = 0;
goto err_pm_put;
}
data = rcar_pci_read_reg(pcie, MACSR);
- dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
+ dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
if (IS_ENABLED(CONFIG_PCI_MSI)) {
err = rcar_pcie_enable_msi(pcie);
if (err < 0) {
- dev_err(&pdev->dev,
+ dev_err(dev,
"failed to enable MSI support: %d\n",
err);
goto err_pm_put;
@@ -1042,16 +1188,16 @@ static int rcar_pcie_probe(struct platform_device *pdev)
return 0;
err_pm_put:
- pm_runtime_put(pcie->dev);
+ pm_runtime_put(dev);
err_pm_disable:
- pm_runtime_disable(pcie->dev);
+ pm_runtime_disable(dev);
return err;
}
static struct platform_driver rcar_pcie_driver = {
.driver = {
- .name = DRV_NAME,
+ .name = "rcar-pcie",
.of_match_table = rcar_pcie_of_match,
.suppress_bind_attrs = true,
},
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
new file mode 100644
index 000000000000..e0b22dab9b7a
--- /dev/null
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -0,0 +1,1227 @@
+/*
+ * Rockchip AXI PCIe host controller driver
+ *
+ * Copyright (c) 2016 Rockchip, Inc.
+ *
+ * Author: Shawn Lin <shawn.lin@rock-chips.com>
+ * Wenrui Li <wenrui.li@rock-chips.com>
+ *
+ * Bits taken from Synopsys Designware Host controller driver and
+ * ARM PCI Host generic driver.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/regmap.h>
+
+/*
+ * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
+ * bits. This allows atomic updates of the register without locking.
+ */
+#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
+#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
+
+#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4)
+
+#define PCIE_CLIENT_BASE 0x0
+#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00)
+#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001)
+#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002)
+#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
+#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
+#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
+#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080)
+#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48)
+#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000
+#define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000
+#define PCIE_CLIENT_INT_MASK (PCIE_CLIENT_BASE + 0x4c)
+#define PCIE_CLIENT_INT_STATUS (PCIE_CLIENT_BASE + 0x50)
+#define PCIE_CLIENT_INTR_MASK GENMASK(8, 5)
+#define PCIE_CLIENT_INTR_SHIFT 5
+#define PCIE_CLIENT_INT_LEGACY_DONE BIT(15)
+#define PCIE_CLIENT_INT_MSG BIT(14)
+#define PCIE_CLIENT_INT_HOT_RST BIT(13)
+#define PCIE_CLIENT_INT_DPA BIT(12)
+#define PCIE_CLIENT_INT_FATAL_ERR BIT(11)
+#define PCIE_CLIENT_INT_NFATAL_ERR BIT(10)
+#define PCIE_CLIENT_INT_CORR_ERR BIT(9)
+#define PCIE_CLIENT_INT_INTD BIT(8)
+#define PCIE_CLIENT_INT_INTC BIT(7)
+#define PCIE_CLIENT_INT_INTB BIT(6)
+#define PCIE_CLIENT_INT_INTA BIT(5)
+#define PCIE_CLIENT_INT_LOCAL BIT(4)
+#define PCIE_CLIENT_INT_UDMA BIT(3)
+#define PCIE_CLIENT_INT_PHY BIT(2)
+#define PCIE_CLIENT_INT_HOT_PLUG BIT(1)
+#define PCIE_CLIENT_INT_PWR_STCG BIT(0)
+
+#define PCIE_CLIENT_INT_LEGACY \
+ (PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \
+ PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD)
+
+#define PCIE_CLIENT_INT_CLI \
+ (PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \
+ PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \
+ PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \
+ PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \
+ PCIE_CLIENT_INT_PHY)
+
+#define PCIE_CORE_CTRL_MGMT_BASE 0x900000
+#define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000)
+#define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008
+#define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018
+#define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006
+#define PCIE_CORE_PL_CONF_LANE_SHIFT 1
+#define PCIE_CORE_CTRL_PLC1 (PCIE_CORE_CTRL_MGMT_BASE + 0x004)
+#define PCIE_CORE_CTRL_PLC1_FTS_MASK GENMASK(23, 8)
+#define PCIE_CORE_CTRL_PLC1_FTS_SHIFT 8
+#define PCIE_CORE_CTRL_PLC1_FTS_CNT 0xffff
+#define PCIE_CORE_TXCREDIT_CFG1 (PCIE_CORE_CTRL_MGMT_BASE + 0x020)
+#define PCIE_CORE_TXCREDIT_CFG1_MUI_MASK 0xFFFF0000
+#define PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT 16
+#define PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \
+ (((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT)
+#define PCIE_CORE_INT_STATUS (PCIE_CORE_CTRL_MGMT_BASE + 0x20c)
+#define PCIE_CORE_INT_PRFPE BIT(0)
+#define PCIE_CORE_INT_CRFPE BIT(1)
+#define PCIE_CORE_INT_RRPE BIT(2)
+#define PCIE_CORE_INT_PRFO BIT(3)
+#define PCIE_CORE_INT_CRFO BIT(4)
+#define PCIE_CORE_INT_RT BIT(5)
+#define PCIE_CORE_INT_RTR BIT(6)
+#define PCIE_CORE_INT_PE BIT(7)
+#define PCIE_CORE_INT_MTR BIT(8)
+#define PCIE_CORE_INT_UCR BIT(9)
+#define PCIE_CORE_INT_FCE BIT(10)
+#define PCIE_CORE_INT_CT BIT(11)
+#define PCIE_CORE_INT_UTC BIT(18)
+#define PCIE_CORE_INT_MMVC BIT(19)
+#define PCIE_CORE_INT_MASK (PCIE_CORE_CTRL_MGMT_BASE + 0x210)
+#define PCIE_RC_BAR_CONF (PCIE_CORE_CTRL_MGMT_BASE + 0x300)
+
+#define PCIE_CORE_INT \
+ (PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \
+ PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \
+ PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \
+ PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \
+ PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \
+ PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
+ PCIE_CORE_INT_MMVC)
+
+#define PCIE_RC_CONFIG_BASE 0xa00000
+#define PCIE_RC_CONFIG_VENDOR (PCIE_RC_CONFIG_BASE + 0x00)
+#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
+#define PCIE_RC_CONFIG_SCC_SHIFT 16
+#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
+#define PCIE_RC_CONFIG_LCS_RETRAIN_LINK BIT(5)
+#define PCIE_RC_CONFIG_LCS_LBMIE BIT(10)
+#define PCIE_RC_CONFIG_LCS_LABIE BIT(11)
+#define PCIE_RC_CONFIG_LCS_LBMS BIT(30)
+#define PCIE_RC_CONFIG_LCS_LAMS BIT(31)
+#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
+
+#define PCIE_CORE_AXI_CONF_BASE 0xc00000
+#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0)
+#define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f
+#define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR 0xffffff00
+#define PCIE_CORE_OB_REGION_ADDR1 (PCIE_CORE_AXI_CONF_BASE + 0x4)
+#define PCIE_CORE_OB_REGION_DESC0 (PCIE_CORE_AXI_CONF_BASE + 0x8)
+#define PCIE_CORE_OB_REGION_DESC1 (PCIE_CORE_AXI_CONF_BASE + 0xc)
+
+#define PCIE_CORE_AXI_INBOUND_BASE 0xc00800
+#define PCIE_RP_IB_ADDR0 (PCIE_CORE_AXI_INBOUND_BASE + 0x0)
+#define PCIE_CORE_IB_REGION_ADDR0_NUM_BITS 0x3f
+#define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR 0xffffff00
+#define PCIE_RP_IB_ADDR1 (PCIE_CORE_AXI_INBOUND_BASE + 0x4)
+
+/* Size of one AXI Region (not Region 0) */
+#define AXI_REGION_SIZE BIT(20)
+/* Size of Region 0, equal to sum of sizes of other regions */
+#define AXI_REGION_0_SIZE (32 * (0x1 << 20))
+#define OB_REG_SIZE_SHIFT 5
+#define IB_ROOT_PORT_REG_SIZE_SHIFT 3
+#define AXI_WRAPPER_IO_WRITE 0x6
+#define AXI_WRAPPER_MEM_WRITE 0x2
+
+#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3
+#define MIN_AXI_ADDR_BITS_PASSED 8
+#define ROCKCHIP_VENDOR_ID 0x1d87
+#define PCIE_ECAM_BUS(x) (((x) & 0xff) << 20)
+#define PCIE_ECAM_DEV(x) (((x) & 0x1f) << 15)
+#define PCIE_ECAM_FUNC(x) (((x) & 0x7) << 12)
+#define PCIE_ECAM_REG(x) (((x) & 0xfff) << 0)
+#define PCIE_ECAM_ADDR(bus, dev, func, reg) \
+ (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \
+ PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg))
+
+#define RC_REGION_0_ADDR_TRANS_H 0x00000000
+#define RC_REGION_0_ADDR_TRANS_L 0x00000000
+#define RC_REGION_0_PASS_BITS (25 - 1)
+#define MAX_AXI_WRAPPER_REGION_NUM 33
+
+struct rockchip_pcie {
+ void __iomem *reg_base; /* DT axi-base */
+ void __iomem *apb_base; /* DT apb-base */
+ struct phy *phy;
+ struct reset_control *core_rst;
+ struct reset_control *mgmt_rst;
+ struct reset_control *mgmt_sticky_rst;
+ struct reset_control *pipe_rst;
+ struct clk *aclk_pcie;
+ struct clk *aclk_perf_pcie;
+ struct clk *hclk_pcie;
+ struct clk *clk_pcie_pm;
+ struct regulator *vpcie3v3; /* 3.3V power supply */
+ struct regulator *vpcie1v8; /* 1.8V power supply */
+ struct regulator *vpcie0v9; /* 0.9V power supply */
+ struct gpio_desc *ep_gpio;
+ u32 lanes;
+ u8 root_bus_nr;
+ struct device *dev;
+ struct irq_domain *irq_domain;
+};
+
+static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg)
+{
+ return readl(rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val,
+ u32 reg)
+{
+ writel(val, rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
+{
+ u32 status;
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= (PCIE_RC_CONFIG_LCS_LBMIE | PCIE_RC_CONFIG_LCS_LABIE);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+}
+
+static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
+{
+ u32 status;
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= (PCIE_RC_CONFIG_LCS_LBMS | PCIE_RC_CONFIG_LCS_LAMS);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+}
+
+static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
+{
+ u32 val;
+
+ /* Update Tx credit maximum update interval */
+ val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1);
+ val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK;
+ val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */
+ rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1);
+}
+
+static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
+ struct pci_bus *bus, int dev)
+{
+ /* access only one slot on each root port */
+ if (bus->number == rockchip->root_bus_nr && dev > 0)
+ return 0;
+
+ /*
+ * do not read more than one device on the bus directly attached
+ * to RC's downstream side.
+ */
+ if (bus->primary == rockchip->root_bus_nr && dev > 0)
+ return 0;
+
+ return 1;
+}
+
+static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
+ int where, int size, u32 *val)
+{
+ void __iomem *addr = rockchip->apb_base + PCIE_RC_CONFIG_BASE + where;
+
+ if (!IS_ALIGNED((uintptr_t)addr, size)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ if (size == 4) {
+ *val = readl(addr);
+ } else if (size == 2) {
+ *val = readw(addr);
+ } else if (size == 1) {
+ *val = readb(addr);
+ } else {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
+ int where, int size, u32 val)
+{
+ u32 mask, tmp, offset;
+
+ offset = where & ~0x3;
+
+ if (size == 4) {
+ writel(val, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset);
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
+
+ /*
+ * N.B. This read/modify/write isn't safe in general because it can
+ * corrupt RW1C bits in adjacent registers. But the hardware
+ * doesn't support smaller writes.
+ */
+ tmp = readl(rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset) & mask;
+ tmp |= val << ((where & 0x3) * 8);
+ writel(tmp, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
+ struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 *val)
+{
+ u32 busdev;
+
+ busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where);
+
+ if (!IS_ALIGNED(busdev, size)) {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ if (size == 4) {
+ *val = readl(rockchip->reg_base + busdev);
+ } else if (size == 2) {
+ *val = readw(rockchip->reg_base + busdev);
+ } else if (size == 1) {
+ *val = readb(rockchip->reg_base + busdev);
+ } else {
+ *val = 0;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
+ struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ u32 busdev;
+
+ busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where);
+ if (!IS_ALIGNED(busdev, size))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (size == 4)
+ writel(val, rockchip->reg_base + busdev);
+ else if (size == 2)
+ writew(val, rockchip->reg_base + busdev);
+ else if (size == 1)
+ writeb(val, rockchip->reg_base + busdev);
+ else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ struct rockchip_pcie *rockchip = bus->sysdata;
+
+ if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) {
+ *val = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ if (bus->number == rockchip->root_bus_nr)
+ return rockchip_pcie_rd_own_conf(rockchip, where, size, val);
+
+ return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size, val);
+}
+
+static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ struct rockchip_pcie *rockchip = bus->sysdata;
+
+ if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn)))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (bus->number == rockchip->root_bus_nr)
+ return rockchip_pcie_wr_own_conf(rockchip, where, size, val);
+
+ return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size, val);
+}
+
+static struct pci_ops rockchip_pcie_ops = {
+ .read = rockchip_pcie_rd_conf,
+ .write = rockchip_pcie_wr_conf,
+};
+
+/**
+ * rockchip_pcie_init_port - Initialize hardware
+ * @rockchip: PCIe port information
+ */
+static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int err;
+ u32 status;
+ unsigned long timeout;
+
+ gpiod_set_value(rockchip->ep_gpio, 0);
+
+ err = phy_init(rockchip->phy);
+ if (err < 0) {
+ dev_err(dev, "fail to init phy, err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_assert(rockchip->core_rst);
+ if (err) {
+ dev_err(dev, "assert core_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_assert(rockchip->mgmt_rst);
+ if (err) {
+ dev_err(dev, "assert mgmt_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_assert(rockchip->mgmt_sticky_rst);
+ if (err) {
+ dev_err(dev, "assert mgmt_sticky_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_assert(rockchip->pipe_rst);
+ if (err) {
+ dev_err(dev, "assert pipe_rst err %d\n", err);
+ return err;
+ }
+
+ rockchip_pcie_write(rockchip,
+ PCIE_CLIENT_CONF_ENABLE |
+ PCIE_CLIENT_LINK_TRAIN_ENABLE |
+ PCIE_CLIENT_ARI_ENABLE |
+ PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes) |
+ PCIE_CLIENT_MODE_RC |
+ PCIE_CLIENT_GEN_SEL_2,
+ PCIE_CLIENT_CONFIG);
+
+ err = phy_power_on(rockchip->phy);
+ if (err) {
+ dev_err(dev, "fail to power on phy, err %d\n", err);
+ return err;
+ }
+
+ /*
+ * Please don't reorder the deassert sequence of the following
+ * four reset pins.
+ */
+ err = reset_control_deassert(rockchip->mgmt_sticky_rst);
+ if (err) {
+ dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_deassert(rockchip->core_rst);
+ if (err) {
+ dev_err(dev, "deassert core_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_deassert(rockchip->mgmt_rst);
+ if (err) {
+ dev_err(dev, "deassert mgmt_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_deassert(rockchip->pipe_rst);
+ if (err) {
+ dev_err(dev, "deassert pipe_rst err %d\n", err);
+ return err;
+ }
+
+ /*
+ * We need to read/write PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 before
+ * enabling ASPM. Otherwise L1PwrOnSc and L1PwrOnVal isn't
+ * reliable and enabling ASPM doesn't work. This is a controller
+ * bug we need to work around.
+ */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2);
+
+ /* Fix the transmitted FTS count desired to exit from L0s. */
+ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
+ status = (status & PCIE_CORE_CTRL_PLC1_FTS_MASK) |
+ (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
+ rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
+
+ /* Enable Gen1 training */
+ rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
+ PCIE_CLIENT_CONFIG);
+
+ gpiod_set_value(rockchip->ep_gpio, 1);
+
+ /* 500ms timeout value should be enough for Gen1/2 training */
+ timeout = jiffies + msecs_to_jiffies(500);
+
+ for (;;) {
+ status = rockchip_pcie_read(rockchip,
+ PCIE_CLIENT_BASIC_STATUS1);
+ if ((status & PCIE_CLIENT_LINK_STATUS_MASK) ==
+ PCIE_CLIENT_LINK_STATUS_UP) {
+ dev_dbg(dev, "PCIe link training gen1 pass!\n");
+ break;
+ }
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(dev, "PCIe link training gen1 timeout!\n");
+ return -ETIMEDOUT;
+ }
+
+ msleep(20);
+ }
+
+ /*
+ * Enable retrain for gen2. This should be configured only after
+ * gen1 finished.
+ */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCIE_RC_CONFIG_LCS_RETRAIN_LINK;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+ timeout = jiffies + msecs_to_jiffies(500);
+ for (;;) {
+ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
+ if ((status & PCIE_CORE_PL_CONF_SPEED_MASK) ==
+ PCIE_CORE_PL_CONF_SPEED_5G) {
+ dev_dbg(dev, "PCIe link training gen2 pass!\n");
+ break;
+ }
+
+ if (time_after(jiffies, timeout)) {
+ dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
+ break;
+ }
+
+ msleep(20);
+ }
+
+ /* Check the final link width from negotiated lane counter from MGMT */
+ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
+ status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
+ PCIE_CORE_PL_CONF_LANE_MASK);
+ dev_dbg(dev, "current link width is x%d\n", status);
+
+ rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
+ PCIE_RC_CONFIG_VENDOR);
+ rockchip_pcie_write(rockchip,
+ PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
+ PCIE_RC_CONFIG_RID_CCR);
+ rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
+
+ rockchip_pcie_write(rockchip,
+ (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
+ PCIE_CORE_OB_REGION_ADDR0);
+ rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
+ PCIE_CORE_OB_REGION_ADDR1);
+ rockchip_pcie_write(rockchip, 0x0080000a, PCIE_CORE_OB_REGION_DESC0);
+ rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
+
+ return 0;
+}
+
+static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct device *dev = rockchip->dev;
+ u32 reg;
+ u32 sub_reg;
+
+ reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+ if (reg & PCIE_CLIENT_INT_LOCAL) {
+ dev_dbg(dev, "local interrupt received\n");
+ sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS);
+ if (sub_reg & PCIE_CORE_INT_PRFPE)
+ dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n");
+
+ if (sub_reg & PCIE_CORE_INT_CRFPE)
+ dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n");
+
+ if (sub_reg & PCIE_CORE_INT_RRPE)
+ dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n");
+
+ if (sub_reg & PCIE_CORE_INT_PRFO)
+ dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n");
+
+ if (sub_reg & PCIE_CORE_INT_CRFO)
+ dev_dbg(dev, "overflow occurred in the completion receive FIFO\n");
+
+ if (sub_reg & PCIE_CORE_INT_RT)
+ dev_dbg(dev, "replay timer timed out\n");
+
+ if (sub_reg & PCIE_CORE_INT_RTR)
+ dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n");
+
+ if (sub_reg & PCIE_CORE_INT_PE)
+ dev_dbg(dev, "phy error detected on receive side\n");
+
+ if (sub_reg & PCIE_CORE_INT_MTR)
+ dev_dbg(dev, "malformed TLP received from the link\n");
+
+ if (sub_reg & PCIE_CORE_INT_UCR)
+ dev_dbg(dev, "malformed TLP received from the link\n");
+
+ if (sub_reg & PCIE_CORE_INT_FCE)
+ dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
+
+ if (sub_reg & PCIE_CORE_INT_CT)
+ dev_dbg(dev, "a request timed out waiting for completion\n");
+
+ if (sub_reg & PCIE_CORE_INT_UTC)
+ dev_dbg(dev, "unmapped TC error\n");
+
+ if (sub_reg & PCIE_CORE_INT_MMVC)
+ dev_dbg(dev, "MSI mask register changes\n");
+
+ rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS);
+ } else if (reg & PCIE_CLIENT_INT_PHY) {
+ dev_dbg(dev, "phy link changes\n");
+ rockchip_pcie_update_txcredit_mui(rockchip);
+ rockchip_pcie_clr_bw_int(rockchip);
+ }
+
+ rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL,
+ PCIE_CLIENT_INT_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct device *dev = rockchip->dev;
+ u32 reg;
+
+ reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+ if (reg & PCIE_CLIENT_INT_LEGACY_DONE)
+ dev_dbg(dev, "legacy done interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_MSG)
+ dev_dbg(dev, "message done interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_HOT_RST)
+ dev_dbg(dev, "hot reset interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_DPA)
+ dev_dbg(dev, "dpa interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_FATAL_ERR)
+ dev_dbg(dev, "fatal error interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_NFATAL_ERR)
+ dev_dbg(dev, "no fatal error interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_CORR_ERR)
+ dev_dbg(dev, "correctable error interrupt received\n");
+
+ if (reg & PCIE_CLIENT_INT_PHY)
+ dev_dbg(dev, "phy interrupt received\n");
+
+ rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE |
+ PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST |
+ PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR |
+ PCIE_CLIENT_INT_NFATAL_ERR |
+ PCIE_CLIENT_INT_CORR_ERR |
+ PCIE_CLIENT_INT_PHY),
+ PCIE_CLIENT_INT_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
+ struct device *dev = rockchip->dev;
+ u32 reg;
+ u32 hwirq;
+ u32 virq;
+
+ chained_irq_enter(chip, desc);
+
+ reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
+ reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT;
+
+ while (reg) {
+ hwirq = ffs(reg) - 1;
+ reg &= ~BIT(hwirq);
+
+ virq = irq_find_mapping(rockchip->irq_domain, hwirq);
+ if (virq)
+ generic_handle_irq(virq);
+ else
+ dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+
+/**
+ * rockchip_pcie_parse_dt - Parse Device Tree
+ * @rockchip: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct device_node *node = dev->of_node;
+ struct resource *regs;
+ int irq;
+ int err;
+
+ regs = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "axi-base");
+ rockchip->reg_base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(rockchip->reg_base))
+ return PTR_ERR(rockchip->reg_base);
+
+ regs = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "apb-base");
+ rockchip->apb_base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(rockchip->apb_base))
+ return PTR_ERR(rockchip->apb_base);
+
+ rockchip->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(rockchip->phy)) {
+ if (PTR_ERR(rockchip->phy) != -EPROBE_DEFER)
+ dev_err(dev, "missing phy\n");
+ return PTR_ERR(rockchip->phy);
+ }
+
+ rockchip->lanes = 1;
+ err = of_property_read_u32(node, "num-lanes", &rockchip->lanes);
+ if (!err && (rockchip->lanes == 0 ||
+ rockchip->lanes == 3 ||
+ rockchip->lanes > 4)) {
+ dev_warn(dev, "invalid num-lanes, default to use one lane\n");
+ rockchip->lanes = 1;
+ }
+
+ rockchip->core_rst = devm_reset_control_get(dev, "core");
+ if (IS_ERR(rockchip->core_rst)) {
+ if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing core reset property in node\n");
+ return PTR_ERR(rockchip->core_rst);
+ }
+
+ rockchip->mgmt_rst = devm_reset_control_get(dev, "mgmt");
+ if (IS_ERR(rockchip->mgmt_rst)) {
+ if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing mgmt reset property in node\n");
+ return PTR_ERR(rockchip->mgmt_rst);
+ }
+
+ rockchip->mgmt_sticky_rst = devm_reset_control_get(dev, "mgmt-sticky");
+ if (IS_ERR(rockchip->mgmt_sticky_rst)) {
+ if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing mgmt-sticky reset property in node\n");
+ return PTR_ERR(rockchip->mgmt_sticky_rst);
+ }
+
+ rockchip->pipe_rst = devm_reset_control_get(dev, "pipe");
+ if (IS_ERR(rockchip->pipe_rst)) {
+ if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
+ dev_err(dev, "missing pipe reset property in node\n");
+ return PTR_ERR(rockchip->pipe_rst);
+ }
+
+ rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH);
+ if (IS_ERR(rockchip->ep_gpio)) {
+ dev_err(dev, "missing ep-gpios property in node\n");
+ return PTR_ERR(rockchip->ep_gpio);
+ }
+
+ rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
+ if (IS_ERR(rockchip->aclk_pcie)) {
+ dev_err(dev, "aclk clock not found\n");
+ return PTR_ERR(rockchip->aclk_pcie);
+ }
+
+ rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
+ if (IS_ERR(rockchip->aclk_perf_pcie)) {
+ dev_err(dev, "aclk_perf clock not found\n");
+ return PTR_ERR(rockchip->aclk_perf_pcie);
+ }
+
+ rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
+ if (IS_ERR(rockchip->hclk_pcie)) {
+ dev_err(dev, "hclk clock not found\n");
+ return PTR_ERR(rockchip->hclk_pcie);
+ }
+
+ rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
+ if (IS_ERR(rockchip->clk_pcie_pm)) {
+ dev_err(dev, "pm clock not found\n");
+ return PTR_ERR(rockchip->clk_pcie_pm);
+ }
+
+ irq = platform_get_irq_byname(pdev, "sys");
+ if (irq < 0) {
+ dev_err(dev, "missing sys IRQ resource\n");
+ return -EINVAL;
+ }
+
+ err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler,
+ IRQF_SHARED, "pcie-sys", rockchip);
+ if (err) {
+ dev_err(dev, "failed to request PCIe subsystem IRQ\n");
+ return err;
+ }
+
+ irq = platform_get_irq_byname(pdev, "legacy");
+ if (irq < 0) {
+ dev_err(dev, "missing legacy IRQ resource\n");
+ return -EINVAL;
+ }
+
+ irq_set_chained_handler_and_data(irq,
+ rockchip_pcie_legacy_int_handler,
+ rockchip);
+
+ irq = platform_get_irq_byname(pdev, "client");
+ if (irq < 0) {
+ dev_err(dev, "missing client IRQ resource\n");
+ return -EINVAL;
+ }
+
+ err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler,
+ IRQF_SHARED, "pcie-client", rockchip);
+ if (err) {
+ dev_err(dev, "failed to request PCIe client IRQ\n");
+ return err;
+ }
+
+ rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
+ if (IS_ERR(rockchip->vpcie3v3)) {
+ if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no vpcie3v3 regulator found\n");
+ }
+
+ rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
+ if (IS_ERR(rockchip->vpcie1v8)) {
+ if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no vpcie1v8 regulator found\n");
+ }
+
+ rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
+ if (IS_ERR(rockchip->vpcie0v9)) {
+ if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "no vpcie0v9 regulator found\n");
+ }
+
+ return 0;
+}
+
+static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int err;
+
+ if (!IS_ERR(rockchip->vpcie3v3)) {
+ err = regulator_enable(rockchip->vpcie3v3);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie3v3 regulator\n");
+ goto err_out;
+ }
+ }
+
+ if (!IS_ERR(rockchip->vpcie1v8)) {
+ err = regulator_enable(rockchip->vpcie1v8);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie1v8 regulator\n");
+ goto err_disable_3v3;
+ }
+ }
+
+ if (!IS_ERR(rockchip->vpcie0v9)) {
+ err = regulator_enable(rockchip->vpcie0v9);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+ goto err_disable_1v8;
+ }
+ }
+
+ return 0;
+
+err_disable_1v8:
+ if (!IS_ERR(rockchip->vpcie1v8))
+ regulator_disable(rockchip->vpcie1v8);
+err_disable_3v3:
+ if (!IS_ERR(rockchip->vpcie3v3))
+ regulator_disable(rockchip->vpcie3v3);
+err_out:
+ return err;
+}
+
+static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip)
+{
+ rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) &
+ (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK);
+ rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT),
+ PCIE_CORE_INT_MASK);
+
+ rockchip_pcie_enable_bw_int(rockchip);
+}
+
+static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = rockchip_pcie_intx_map,
+};
+
+static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ struct device_node *intc = of_get_next_child(dev->of_node, NULL);
+
+ if (!intc) {
+ dev_err(dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ rockchip->irq_domain = irq_domain_add_linear(intc, 4,
+ &intx_domain_ops, rockchip);
+ if (!rockchip->irq_domain) {
+ dev_err(dev, "failed to get a INTx IRQ domain\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip,
+ int region_no, int type, u8 num_pass_bits,
+ u32 lower_addr, u32 upper_addr)
+{
+ u32 ob_addr_0;
+ u32 ob_addr_1;
+ u32 ob_desc_0;
+ u32 aw_offset;
+
+ if (region_no >= MAX_AXI_WRAPPER_REGION_NUM)
+ return -EINVAL;
+ if (num_pass_bits + 1 < 8)
+ return -EINVAL;
+ if (num_pass_bits > 63)
+ return -EINVAL;
+ if (region_no == 0) {
+ if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
+ return -EINVAL;
+ }
+ if (region_no != 0) {
+ if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
+ return -EINVAL;
+ }
+
+ aw_offset = (region_no << OB_REG_SIZE_SHIFT);
+
+ ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS;
+ ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR;
+ ob_addr_1 = upper_addr;
+ ob_desc_0 = (1 << 23 | type);
+
+ rockchip_pcie_write(rockchip, ob_addr_0,
+ PCIE_CORE_OB_REGION_ADDR0 + aw_offset);
+ rockchip_pcie_write(rockchip, ob_addr_1,
+ PCIE_CORE_OB_REGION_ADDR1 + aw_offset);
+ rockchip_pcie_write(rockchip, ob_desc_0,
+ PCIE_CORE_OB_REGION_DESC0 + aw_offset);
+ rockchip_pcie_write(rockchip, 0,
+ PCIE_CORE_OB_REGION_DESC1 + aw_offset);
+
+ return 0;
+}
+
+static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
+ int region_no, u8 num_pass_bits,
+ u32 lower_addr, u32 upper_addr)
+{
+ u32 ib_addr_0;
+ u32 ib_addr_1;
+ u32 aw_offset;
+
+ if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM)
+ return -EINVAL;
+ if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED)
+ return -EINVAL;
+ if (num_pass_bits > 63)
+ return -EINVAL;
+
+ aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT);
+
+ ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS;
+ ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR;
+ ib_addr_1 = upper_addr;
+
+ rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset);
+ rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset);
+
+ return 0;
+}
+
+static int rockchip_pcie_probe(struct platform_device *pdev)
+{
+ struct rockchip_pcie *rockchip;
+ struct device *dev = &pdev->dev;
+ struct pci_bus *bus, *child;
+ struct resource_entry *win;
+ resource_size_t io_base;
+ struct resource *mem;
+ struct resource *io;
+ phys_addr_t io_bus_addr = 0;
+ u32 io_size;
+ phys_addr_t mem_bus_addr = 0;
+ u32 mem_size = 0;
+ int reg_no;
+ int err;
+ int offset;
+
+ LIST_HEAD(res);
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
+ if (!rockchip)
+ return -ENOMEM;
+
+ rockchip->dev = dev;
+
+ err = rockchip_pcie_parse_dt(rockchip);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(rockchip->aclk_pcie);
+ if (err) {
+ dev_err(dev, "unable to enable aclk_pcie clock\n");
+ goto err_aclk_pcie;
+ }
+
+ err = clk_prepare_enable(rockchip->aclk_perf_pcie);
+ if (err) {
+ dev_err(dev, "unable to enable aclk_perf_pcie clock\n");
+ goto err_aclk_perf_pcie;
+ }
+
+ err = clk_prepare_enable(rockchip->hclk_pcie);
+ if (err) {
+ dev_err(dev, "unable to enable hclk_pcie clock\n");
+ goto err_hclk_pcie;
+ }
+
+ err = clk_prepare_enable(rockchip->clk_pcie_pm);
+ if (err) {
+ dev_err(dev, "unable to enable hclk_pcie clock\n");
+ goto err_pcie_pm;
+ }
+
+ err = rockchip_pcie_set_vpcie(rockchip);
+ if (err) {
+ dev_err(dev, "failed to set vpcie regulator\n");
+ goto err_set_vpcie;
+ }
+
+ err = rockchip_pcie_init_port(rockchip);
+ if (err)
+ goto err_vpcie;
+
+ rockchip_pcie_enable_interrupts(rockchip);
+
+ err = rockchip_pcie_init_irq_domain(rockchip);
+ if (err < 0)
+ goto err_vpcie;
+
+ err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff,
+ &res, &io_base);
+ if (err)
+ goto err_vpcie;
+
+ err = devm_request_pci_bus_resources(dev, &res);
+ if (err)
+ goto err_vpcie;
+
+ /* Get the I/O and memory ranges from DT */
+ io_size = 0;
+ resource_list_for_each_entry(win, &res) {
+ switch (resource_type(win->res)) {
+ case IORESOURCE_IO:
+ io = win->res;
+ io->name = "I/O";
+ io_size = resource_size(io);
+ io_bus_addr = io->start - win->offset;
+ err = pci_remap_iospace(io, io_base);
+ if (err) {
+ dev_warn(dev, "error %d: failed to map resource %pR\n",
+ err, io);
+ continue;
+ }
+ break;
+ case IORESOURCE_MEM:
+ mem = win->res;
+ mem->name = "MEM";
+ mem_size = resource_size(mem);
+ mem_bus_addr = mem->start - win->offset;
+ break;
+ case IORESOURCE_BUS:
+ rockchip->root_bus_nr = win->res->start;
+ break;
+ default:
+ continue;
+ }
+ }
+
+ if (mem_size) {
+ for (reg_no = 0; reg_no < (mem_size >> 20); reg_no++) {
+ err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
+ AXI_WRAPPER_MEM_WRITE,
+ 20 - 1,
+ mem_bus_addr +
+ (reg_no << 20),
+ 0);
+ if (err) {
+ dev_err(dev, "program RC mem outbound ATU failed\n");
+ goto err_vpcie;
+ }
+ }
+ }
+
+ err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
+ if (err) {
+ dev_err(dev, "program RC mem inbound ATU failed\n");
+ goto err_vpcie;
+ }
+
+ offset = mem_size >> 20;
+
+ if (io_size) {
+ for (reg_no = 0; reg_no < (io_size >> 20); reg_no++) {
+ err = rockchip_pcie_prog_ob_atu(rockchip,
+ reg_no + 1 + offset,
+ AXI_WRAPPER_IO_WRITE,
+ 20 - 1,
+ io_bus_addr +
+ (reg_no << 20),
+ 0);
+ if (err) {
+ dev_err(dev, "program RC io outbound ATU failed\n");
+ goto err_vpcie;
+ }
+ }
+ }
+
+ bus = pci_scan_root_bus(&pdev->dev, 0, &rockchip_pcie_ops, rockchip, &res);
+ if (!bus) {
+ err = -ENOMEM;
+ goto err_vpcie;
+ }
+
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ pci_bus_add_devices(bus);
+
+ dev_warn(dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
+
+ return err;
+
+err_vpcie:
+ if (!IS_ERR(rockchip->vpcie3v3))
+ regulator_disable(rockchip->vpcie3v3);
+ if (!IS_ERR(rockchip->vpcie1v8))
+ regulator_disable(rockchip->vpcie1v8);
+ if (!IS_ERR(rockchip->vpcie0v9))
+ regulator_disable(rockchip->vpcie0v9);
+err_set_vpcie:
+ clk_disable_unprepare(rockchip->clk_pcie_pm);
+err_pcie_pm:
+ clk_disable_unprepare(rockchip->hclk_pcie);
+err_hclk_pcie:
+ clk_disable_unprepare(rockchip->aclk_perf_pcie);
+err_aclk_perf_pcie:
+ clk_disable_unprepare(rockchip->aclk_pcie);
+err_aclk_pcie:
+ return err;
+}
+
+static const struct of_device_id rockchip_pcie_of_match[] = {
+ { .compatible = "rockchip,rk3399-pcie", },
+ {}
+};
+
+static struct platform_driver rockchip_pcie_driver = {
+ .driver = {
+ .name = "rockchip-pcie",
+ .of_match_table = rockchip_pcie_of_match,
+ },
+ .probe = rockchip_pcie_probe,
+
+};
+builtin_platform_driver(rockchip_pcie_driver);
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index a4060b85ab23..3cf197ba7f37 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -15,7 +15,7 @@
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
@@ -25,10 +25,10 @@
#include "pcie-designware.h"
struct spear13xx_pcie {
+ struct pcie_port pp; /* DT dbi is pp.dbi_base */
void __iomem *app_base;
struct phy *phy;
struct clk *clk;
- struct pcie_port pp;
bool is_gen1;
};
@@ -57,96 +57,26 @@ struct pcie_app_reg {
};
/* CR0 ID */
-#define RX_LANE_FLIP_EN_ID 0
-#define TX_LANE_FLIP_EN_ID 1
-#define SYS_AUX_PWR_DET_ID 2
#define APP_LTSSM_ENABLE_ID 3
-#define SYS_ATTEN_BUTTON_PRESSED_ID 4
-#define SYS_MRL_SENSOR_STATE_ID 5
-#define SYS_PWR_FAULT_DET_ID 6
-#define SYS_MRL_SENSOR_CHGED_ID 7
-#define SYS_PRE_DET_CHGED_ID 8
-#define SYS_CMD_CPLED_INT_ID 9
-#define APP_INIT_RST_0_ID 11
-#define APP_REQ_ENTR_L1_ID 12
-#define APP_READY_ENTR_L23_ID 13
-#define APP_REQ_EXIT_L1_ID 14
-#define DEVICE_TYPE_EP (0 << 25)
-#define DEVICE_TYPE_LEP (1 << 25)
#define DEVICE_TYPE_RC (4 << 25)
-#define SYS_INT_ID 29
#define MISCTRL_EN_ID 30
#define REG_TRANSLATION_ENABLE 31
-/* CR1 ID */
-#define APPS_PM_XMT_TURNOFF_ID 2
-#define APPS_PM_XMT_PME_ID 5
-
/* CR3 ID */
-#define XMLH_LTSSM_STATE_DETECT_QUIET 0x00
-#define XMLH_LTSSM_STATE_DETECT_ACT 0x01
-#define XMLH_LTSSM_STATE_POLL_ACTIVE 0x02
-#define XMLH_LTSSM_STATE_POLL_COMPLIANCE 0x03
-#define XMLH_LTSSM_STATE_POLL_CONFIG 0x04
-#define XMLH_LTSSM_STATE_PRE_DETECT_QUIET 0x05
-#define XMLH_LTSSM_STATE_DETECT_WAIT 0x06
-#define XMLH_LTSSM_STATE_CFG_LINKWD_START 0x07
-#define XMLH_LTSSM_STATE_CFG_LINKWD_ACEPT 0x08
-#define XMLH_LTSSM_STATE_CFG_LANENUM_WAIT 0x09
-#define XMLH_LTSSM_STATE_CFG_LANENUM_ACEPT 0x0A
-#define XMLH_LTSSM_STATE_CFG_COMPLETE 0x0B
-#define XMLH_LTSSM_STATE_CFG_IDLE 0x0C
-#define XMLH_LTSSM_STATE_RCVRY_LOCK 0x0D
-#define XMLH_LTSSM_STATE_RCVRY_SPEED 0x0E
-#define XMLH_LTSSM_STATE_RCVRY_RCVRCFG 0x0F
-#define XMLH_LTSSM_STATE_RCVRY_IDLE 0x10
-#define XMLH_LTSSM_STATE_L0 0x11
-#define XMLH_LTSSM_STATE_L0S 0x12
-#define XMLH_LTSSM_STATE_L123_SEND_EIDLE 0x13
-#define XMLH_LTSSM_STATE_L1_IDLE 0x14
-#define XMLH_LTSSM_STATE_L2_IDLE 0x15
-#define XMLH_LTSSM_STATE_L2_WAKE 0x16
-#define XMLH_LTSSM_STATE_DISABLED_ENTRY 0x17
-#define XMLH_LTSSM_STATE_DISABLED_IDLE 0x18
-#define XMLH_LTSSM_STATE_DISABLED 0x19
-#define XMLH_LTSSM_STATE_LPBK_ENTRY 0x1A
-#define XMLH_LTSSM_STATE_LPBK_ACTIVE 0x1B
-#define XMLH_LTSSM_STATE_LPBK_EXIT 0x1C
-#define XMLH_LTSSM_STATE_LPBK_EXIT_TIMEOUT 0x1D
-#define XMLH_LTSSM_STATE_HOT_RESET_ENTRY 0x1E
-#define XMLH_LTSSM_STATE_HOT_RESET 0x1F
-#define XMLH_LTSSM_STATE_MASK 0x3F
#define XMLH_LINK_UP (1 << 6)
-/* CR4 ID */
-#define CFG_MSI_EN_ID 18
-
/* CR6 */
-#define INTA_CTRL_INT (1 << 7)
-#define INTB_CTRL_INT (1 << 8)
-#define INTC_CTRL_INT (1 << 9)
-#define INTD_CTRL_INT (1 << 10)
#define MSI_CTRL_INT (1 << 26)
-/* CR19 ID */
-#define VEN_MSI_REQ_ID 11
-#define VEN_MSI_FUN_NUM_ID 8
-#define VEN_MSI_TC_ID 5
-#define VEN_MSI_VECTOR_ID 0
-#define VEN_MSI_REQ_EN ((u32)0x1 << VEN_MSI_REQ_ID)
-#define VEN_MSI_FUN_NUM_MASK ((u32)0x7 << VEN_MSI_FUN_NUM_ID)
-#define VEN_MSI_TC_MASK ((u32)0x7 << VEN_MSI_TC_ID)
-#define VEN_MSI_VECTOR_MASK ((u32)0x1F << VEN_MSI_VECTOR_ID)
-
#define EXP_CAP_ID_OFFSET 0x70
#define to_spear13xx_pcie(x) container_of(x, struct spear13xx_pcie, pp)
-static int spear13xx_pcie_establish_link(struct pcie_port *pp)
+static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
{
- u32 val;
- struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+ struct pcie_port *pp = &spear13xx_pcie->pp;
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+ u32 val;
u32 exp_cap_off = EXP_CAP_ID_OFFSET;
if (dw_pcie_link_up(pp)) {
@@ -203,9 +133,9 @@ static int spear13xx_pcie_establish_link(struct pcie_port *pp)
static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
{
- struct pcie_port *pp = arg;
- struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+ struct spear13xx_pcie *spear13xx_pcie = arg;
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+ struct pcie_port *pp = &spear13xx_pcie->pp;
unsigned int status;
status = readl(&app_reg->int_sts);
@@ -220,9 +150,9 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static void spear13xx_pcie_enable_interrupts(struct pcie_port *pp)
+static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie)
{
- struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+ struct pcie_port *pp = &spear13xx_pcie->pp;
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
/* Enable MSI interrupt */
@@ -246,8 +176,10 @@ static int spear13xx_pcie_link_up(struct pcie_port *pp)
static void spear13xx_pcie_host_init(struct pcie_port *pp)
{
- spear13xx_pcie_establish_link(pp);
- spear13xx_pcie_enable_interrupts(pp);
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+
+ spear13xx_pcie_establish_link(spear13xx_pcie);
+ spear13xx_pcie_enable_interrupts(spear13xx_pcie);
}
static struct pcie_host_ops spear13xx_pcie_host_ops = {
@@ -255,10 +187,11 @@ static struct pcie_host_ops spear13xx_pcie_host_ops = {
.host_init = spear13xx_pcie_host_init,
};
-static int spear13xx_add_pcie_port(struct pcie_port *pp,
- struct platform_device *pdev)
+static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
+ struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
+ struct pcie_port *pp = &spear13xx_pcie->pp;
+ struct device *dev = pp->dev;
int ret;
pp->irq = platform_get_irq(pdev, 0);
@@ -268,7 +201,7 @@ static int spear13xx_add_pcie_port(struct pcie_port *pp,
}
ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD,
- "spear1340-pcie", pp);
+ "spear1340-pcie", spear13xx_pcie);
if (ret) {
dev_err(dev, "failed to request irq %d\n", pp->irq);
return ret;
@@ -288,10 +221,10 @@ static int spear13xx_add_pcie_port(struct pcie_port *pp,
static int spear13xx_pcie_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct spear13xx_pcie *spear13xx_pcie;
struct pcie_port *pp;
- struct device *dev = &pdev->dev;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev->of_node;
struct resource *dbi_base;
int ret;
@@ -323,7 +256,6 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
}
pp = &spear13xx_pcie->pp;
-
pp->dev = dev;
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
@@ -338,7 +270,7 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
if (of_property_read_bool(np, "st,pcie-is-gen1"))
spear13xx_pcie->is_gen1 = true;
- ret = spear13xx_add_pcie_port(pp, pdev);
+ ret = spear13xx_add_pcie_port(spear13xx_pcie, pdev);
if (ret < 0)
goto fail_clk;
@@ -355,7 +287,6 @@ static const struct of_device_id spear13xx_pcie_of_match[] = {
{ .compatible = "st,spear1340-pcie", },
{},
};
-MODULE_DEVICE_TABLE(of, spear13xx_pcie_of_match);
static struct platform_driver spear13xx_pcie_driver = {
.probe = spear13xx_pcie_probe,
@@ -365,14 +296,8 @@ static struct platform_driver spear13xx_pcie_driver = {
},
};
-/* SPEAr13xx PCIe driver does not allow module unload */
-
static int __init spear13xx_pcie_init(void)
{
return platform_driver_register(&spear13xx_pcie_driver);
}
-module_init(spear13xx_pcie_init);
-
-MODULE_DESCRIPTION("ST Microelectronics SPEAr13xx PCIe host controller driver");
-MODULE_AUTHOR("Pratyush Anand <pratyush.anand@gmail.com>");
-MODULE_LICENSE("GPL v2");
+device_initcall(spear13xx_pcie_init);
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 0b597d9190b4..43eaa4afab94 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -15,7 +15,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -85,10 +85,15 @@
#define MSGF_MISC_SR_MASTER_ERR BIT(5)
#define MSGF_MISC_SR_I_ADDR_ERR BIT(6)
#define MSGF_MISC_SR_E_ADDR_ERR BIT(7)
-#define MSGF_MISC_SR_UR_DETECT BIT(20)
-
-#define MSGF_MISC_SR_PCIE_CORE GENMASK(18, 16)
-#define MSGF_MISC_SR_PCIE_CORE_ERR GENMASK(31, 22)
+#define MSGF_MISC_SR_FATAL_AER BIT(16)
+#define MSGF_MISC_SR_NON_FATAL_AER BIT(17)
+#define MSGF_MISC_SR_CORR_AER BIT(18)
+#define MSGF_MISC_SR_UR_DETECT BIT(20)
+#define MSGF_MISC_SR_NON_FATAL_DEV BIT(22)
+#define MSGF_MISC_SR_FATAL_DEV BIT(23)
+#define MSGF_MISC_SR_LINK_DOWN BIT(24)
+#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25)
+#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26)
#define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \
MSGF_MISC_SR_RXMSG_OVER | \
@@ -96,9 +101,15 @@
MSGF_MISC_SR_MASTER_ERR | \
MSGF_MISC_SR_I_ADDR_ERR | \
MSGF_MISC_SR_E_ADDR_ERR | \
+ MSGF_MISC_SR_FATAL_AER | \
+ MSGF_MISC_SR_NON_FATAL_AER | \
+ MSGF_MISC_SR_CORR_AER | \
MSGF_MISC_SR_UR_DETECT | \
- MSGF_MISC_SR_PCIE_CORE | \
- MSGF_MISC_SR_PCIE_CORE_ERR)
+ MSGF_MISC_SR_NON_FATAL_DEV | \
+ MSGF_MISC_SR_FATAL_DEV | \
+ MSGF_MISC_SR_LINK_DOWN | \
+ MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \
+ MSGF_MSIC_SR_LINK_BWIDTH)
/* Legacy interrupt status mask bits */
#define MSGF_LEG_SR_INTA BIT(0)
@@ -109,8 +120,8 @@
MSGF_LEG_SR_INTC | MSGF_LEG_SR_INTD)
/* MSI interrupt status mask bits */
-#define MSGF_MSI_SR_LO_MASK BIT(0)
-#define MSGF_MSI_SR_HI_MASK BIT(0)
+#define MSGF_MSI_SR_LO_MASK GENMASK(31, 0)
+#define MSGF_MSI_SR_HI_MASK GENMASK(31, 0)
#define MSII_PRESENT BIT(0)
#define MSII_ENABLE BIT(0)
@@ -201,6 +212,7 @@ static bool nwl_phy_link_up(struct nwl_pcie *pcie)
static int nwl_wait_for_link(struct nwl_pcie *pcie)
{
+ struct device *dev = pcie->dev;
int retries;
/* check if the link is up or not */
@@ -210,7 +222,7 @@ static int nwl_wait_for_link(struct nwl_pcie *pcie)
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
- dev_err(pcie->dev, "PHY link never came up\n");
+ dev_err(dev, "PHY link never came up\n");
return -ETIMEDOUT;
}
@@ -266,6 +278,7 @@ static struct pci_ops nwl_pcie_ops = {
static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
{
struct nwl_pcie *pcie = data;
+ struct device *dev = pcie->dev;
u32 misc_stat;
/* Checking for misc interrupts */
@@ -275,24 +288,43 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
return IRQ_NONE;
if (misc_stat & MSGF_MISC_SR_RXMSG_OVER)
- dev_err(pcie->dev, "Received Message FIFO Overflow\n");
+ dev_err(dev, "Received Message FIFO Overflow\n");
if (misc_stat & MSGF_MISC_SR_SLAVE_ERR)
- dev_err(pcie->dev, "Slave error\n");
+ dev_err(dev, "Slave error\n");
if (misc_stat & MSGF_MISC_SR_MASTER_ERR)
- dev_err(pcie->dev, "Master error\n");
+ dev_err(dev, "Master error\n");
if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR)
- dev_err(pcie->dev,
- "In Misc Ingress address translation error\n");
+ dev_err(dev, "In Misc Ingress address translation error\n");
if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR)
- dev_err(pcie->dev,
- "In Misc Egress address translation error\n");
+ dev_err(dev, "In Misc Egress address translation error\n");
+
+ if (misc_stat & MSGF_MISC_SR_FATAL_AER)
+ dev_err(dev, "Fatal Error in AER Capability\n");
+
+ if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER)
+ dev_err(dev, "Non-Fatal Error in AER Capability\n");
+
+ if (misc_stat & MSGF_MISC_SR_CORR_AER)
+ dev_err(dev, "Correctable Error in AER Capability\n");
+
+ if (misc_stat & MSGF_MISC_SR_UR_DETECT)
+ dev_err(dev, "Unsupported request Detected\n");
+
+ if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV)
+ dev_err(dev, "Non-Fatal Error Detected\n");
+
+ if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
+ dev_err(dev, "Fatal Error Detected\n");
+
+ if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
+ dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
- if (misc_stat & MSGF_MISC_SR_PCIE_CORE_ERR)
- dev_err(pcie->dev, "PCIe Core error\n");
+ if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
+ dev_info(dev, "Link Bandwidth Management Status bit set\n");
/* Clear misc interrupt status */
nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS);
@@ -459,57 +491,24 @@ static const struct irq_domain_ops dev_msi_domain_ops = {
.free = nwl_irq_domain_free,
};
-static void nwl_msi_free_irq_domain(struct nwl_pcie *pcie)
-{
- struct nwl_msi *msi = &pcie->msi;
-
- if (msi->irq_msi0)
- irq_set_chained_handler_and_data(msi->irq_msi0, NULL, NULL);
- if (msi->irq_msi1)
- irq_set_chained_handler_and_data(msi->irq_msi1, NULL, NULL);
-
- if (msi->msi_domain)
- irq_domain_remove(msi->msi_domain);
- if (msi->dev_domain)
- irq_domain_remove(msi->dev_domain);
-
- kfree(msi->bitmap);
- msi->bitmap = NULL;
-}
-
-static void nwl_pcie_free_irq_domain(struct nwl_pcie *pcie)
-{
- int i;
- u32 irq;
-
- for (i = 0; i < INTX_NUM; i++) {
- irq = irq_find_mapping(pcie->legacy_irq_domain, i + 1);
- if (irq > 0)
- irq_dispose_mapping(irq);
- }
- if (pcie->legacy_irq_domain)
- irq_domain_remove(pcie->legacy_irq_domain);
-
- nwl_msi_free_irq_domain(pcie);
-}
-
static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
{
#ifdef CONFIG_PCI_MSI
- struct fwnode_handle *fwnode = of_node_to_fwnode(pcie->dev->of_node);
+ struct device *dev = pcie->dev;
+ struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
struct nwl_msi *msi = &pcie->msi;
msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR,
&dev_msi_domain_ops, pcie);
if (!msi->dev_domain) {
- dev_err(pcie->dev, "failed to create dev IRQ domain\n");
+ dev_err(dev, "failed to create dev IRQ domain\n");
return -ENOMEM;
}
msi->msi_domain = pci_msi_create_irq_domain(fwnode,
&nwl_msi_domain_info,
msi->dev_domain);
if (!msi->msi_domain) {
- dev_err(pcie->dev, "failed to create msi IRQ domain\n");
+ dev_err(dev, "failed to create msi IRQ domain\n");
irq_domain_remove(msi->dev_domain);
return -ENOMEM;
}
@@ -519,12 +518,13 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
{
- struct device_node *node = pcie->dev->of_node;
+ struct device *dev = pcie->dev;
+ struct device_node *node = dev->of_node;
struct device_node *legacy_intc_node;
legacy_intc_node = of_get_next_child(node, NULL);
if (!legacy_intc_node) {
- dev_err(pcie->dev, "No legacy intc node found\n");
+ dev_err(dev, "No legacy intc node found\n");
return -EINVAL;
}
@@ -534,7 +534,7 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
pcie);
if (!pcie->legacy_irq_domain) {
- dev_err(pcie->dev, "failed to create IRQ domain\n");
+ dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
@@ -544,7 +544,8 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
{
- struct platform_device *pdev = to_platform_device(pcie->dev);
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
struct nwl_msi *msi = &pcie->msi;
unsigned long base;
int ret;
@@ -559,7 +560,7 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
/* Get msi_1 IRQ number */
msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1");
if (msi->irq_msi1 < 0) {
- dev_err(&pdev->dev, "failed to get IRQ#%d\n", msi->irq_msi1);
+ dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi1);
ret = -EINVAL;
goto err;
}
@@ -570,7 +571,7 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
/* Get msi_0 IRQ number */
msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0");
if (msi->irq_msi0 < 0) {
- dev_err(&pdev->dev, "failed to get IRQ#%d\n", msi->irq_msi0);
+ dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi0);
ret = -EINVAL;
goto err;
}
@@ -581,7 +582,7 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
/* Check for msii_present bit */
ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT;
if (!ret) {
- dev_err(pcie->dev, "MSI not present\n");
+ dev_err(dev, "MSI not present\n");
ret = -EIO;
goto err;
}
@@ -630,13 +631,14 @@ err:
static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
{
- struct platform_device *pdev = to_platform_device(pcie->dev);
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
u32 breg_val, ecam_val, first_busno = 0;
int err;
breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT;
if (!breg_val) {
- dev_err(pcie->dev, "BREG is not present\n");
+ dev_err(dev, "BREG is not present\n");
return breg_val;
}
@@ -667,7 +669,7 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
ecam_val = nwl_bridge_readl(pcie, E_ECAM_CAPABILITIES) & E_ECAM_PRESENT;
if (!ecam_val) {
- dev_err(pcie->dev, "ECAM is not present\n");
+ dev_err(dev, "ECAM is not present\n");
return ecam_val;
}
@@ -694,23 +696,23 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS));
if (nwl_pcie_link_up(pcie))
- dev_info(pcie->dev, "Link is UP\n");
+ dev_info(dev, "Link is UP\n");
else
- dev_info(pcie->dev, "Link is DOWN\n");
+ dev_info(dev, "Link is DOWN\n");
/* Get misc IRQ number */
pcie->irq_misc = platform_get_irq_byname(pdev, "misc");
if (pcie->irq_misc < 0) {
- dev_err(&pdev->dev, "failed to get misc IRQ %d\n",
+ dev_err(dev, "failed to get misc IRQ %d\n",
pcie->irq_misc);
return -EINVAL;
}
- err = devm_request_irq(pcie->dev, pcie->irq_misc,
+ err = devm_request_irq(dev, pcie->irq_misc,
nwl_pcie_misc_handler, IRQF_SHARED,
"nwl_pcie:misc", pcie);
if (err) {
- dev_err(pcie->dev, "fail to register misc IRQ#%d\n",
+ dev_err(dev, "fail to register misc IRQ#%d\n",
pcie->irq_misc);
return err;
}
@@ -746,31 +748,32 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
struct platform_device *pdev)
{
- struct device_node *node = pcie->dev->of_node;
+ struct device *dev = pcie->dev;
+ struct device_node *node = dev->of_node;
struct resource *res;
const char *type;
/* Check for device type */
type = of_get_property(node, "device_type", NULL);
if (!type || strcmp(type, "pci")) {
- dev_err(pcie->dev, "invalid \"device_type\" %s\n", type);
+ dev_err(dev, "invalid \"device_type\" %s\n", type);
return -EINVAL;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
- pcie->breg_base = devm_ioremap_resource(pcie->dev, res);
+ pcie->breg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->breg_base))
return PTR_ERR(pcie->breg_base);
pcie->phys_breg_base = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcireg");
- pcie->pcireg_base = devm_ioremap_resource(pcie->dev, res);
+ pcie->pcireg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->pcireg_base))
return PTR_ERR(pcie->pcireg_base);
pcie->phys_pcie_reg_base = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
- pcie->ecam_base = devm_ioremap_resource(pcie->dev, res);
+ pcie->ecam_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->ecam_base))
return PTR_ERR(pcie->ecam_base);
pcie->phys_ecam_base = res->start;
@@ -778,8 +781,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
/* Get intx IRQ number */
pcie->irq_intx = platform_get_irq_byname(pdev, "intx");
if (pcie->irq_intx < 0) {
- dev_err(&pdev->dev, "failed to get intx IRQ %d\n",
- pcie->irq_intx);
+ dev_err(dev, "failed to get intx IRQ %d\n", pcie->irq_intx);
return -EINVAL;
}
@@ -796,7 +798,8 @@ static const struct of_device_id nwl_pcie_of_match[] = {
static int nwl_pcie_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
struct nwl_pcie *pcie;
struct pci_bus *bus;
struct pci_bus *child;
@@ -804,42 +807,42 @@ static int nwl_pcie_probe(struct platform_device *pdev)
resource_size_t iobase = 0;
LIST_HEAD(res);
- pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
- pcie->dev = &pdev->dev;
+ pcie->dev = dev;
pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT;
err = nwl_pcie_parse_dt(pcie, pdev);
if (err) {
- dev_err(pcie->dev, "Parsing DT failed\n");
+ dev_err(dev, "Parsing DT failed\n");
return err;
}
err = nwl_pcie_bridge_init(pcie);
if (err) {
- dev_err(pcie->dev, "HW Initialization failed\n");
+ dev_err(dev, "HW Initialization failed\n");
return err;
}
err = of_pci_get_host_bridge_resources(node, 0, 0xff, &res, &iobase);
if (err) {
- dev_err(pcie->dev, "Getting bridge resources failed\n");
+ dev_err(dev, "Getting bridge resources failed\n");
return err;
}
- err = devm_request_pci_bus_resources(pcie->dev, &res);
+ err = devm_request_pci_bus_resources(dev, &res);
if (err)
goto error;
err = nwl_pcie_init_irq_domain(pcie);
if (err) {
- dev_err(pcie->dev, "Failed creating IRQ Domain\n");
+ dev_err(dev, "Failed creating IRQ Domain\n");
goto error;
}
- bus = pci_create_root_bus(&pdev->dev, pcie->root_busno,
+ bus = pci_create_root_bus(dev, pcie->root_busno,
&nwl_pcie_ops, pcie, &res);
if (!bus) {
err = -ENOMEM;
@@ -849,8 +852,7 @@ static int nwl_pcie_probe(struct platform_device *pdev)
if (IS_ENABLED(CONFIG_PCI_MSI)) {
err = nwl_pcie_enable_msi(pcie, bus);
if (err < 0) {
- dev_err(&pdev->dev,
- "failed to enable MSI support: %d\n", err);
+ dev_err(dev, "failed to enable MSI support: %d\n", err);
goto error;
}
}
@@ -859,7 +861,6 @@ static int nwl_pcie_probe(struct platform_device *pdev)
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
- platform_set_drvdata(pdev, pcie);
return 0;
error:
@@ -867,25 +868,12 @@ error:
return err;
}
-static int nwl_pcie_remove(struct platform_device *pdev)
-{
- struct nwl_pcie *pcie = platform_get_drvdata(pdev);
-
- nwl_pcie_free_irq_domain(pcie);
- platform_set_drvdata(pdev, NULL);
- return 0;
-}
-
static struct platform_driver nwl_pcie_driver = {
.driver = {
.name = "nwl-pcie",
+ .suppress_bind_attrs = true,
.of_match_table = nwl_pcie_of_match,
},
.probe = nwl_pcie_probe,
- .remove = nwl_pcie_remove,
};
-module_platform_driver(nwl_pcie_driver);
-
-MODULE_AUTHOR("Xilinx, Inc");
-MODULE_DESCRIPTION("NWL PCIe driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(nwl_pcie_driver);
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index a30e01639557..c8616fadccf1 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -18,7 +18,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -101,7 +101,8 @@
* @msi_pages: MSI pages
* @root_busno: Root Bus number
* @dev: Device pointer
- * @irq_domain: IRQ domain pointer
+ * @msi_domain: MSI IRQ domain pointer
+ * @leg_domain: Legacy IRQ domain pointer
* @resources: Bus Resources
*/
struct xilinx_pcie_port {
@@ -110,7 +111,8 @@ struct xilinx_pcie_port {
unsigned long msi_pages;
u8 root_busno;
struct device *dev;
- struct irq_domain *irq_domain;
+ struct irq_domain *msi_domain;
+ struct irq_domain *leg_domain;
struct list_head resources;
};
@@ -138,10 +140,11 @@ static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
*/
static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
{
+ struct device *dev = port->dev;
unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
- dev_dbg(port->dev, "Requester ID %lu\n",
+ dev_dbg(dev, "Requester ID %lu\n",
val & XILINX_PCIE_RPEFR_REQ_ID);
pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
XILINX_PCIE_REG_RPEFR);
@@ -168,13 +171,6 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
if (bus->number == port->root_busno && devfn > 0)
return false;
- /*
- * Do not read more than one device on the bus directly attached
- * to RC.
- */
- if (bus->primary == port->root_busno && devfn > 0)
- return false;
-
return true;
}
@@ -219,23 +215,24 @@ static void xilinx_pcie_destroy_msi(unsigned int irq)
{
struct msi_desc *msi;
struct xilinx_pcie_port *port;
+ struct irq_data *d = irq_get_irq_data(irq);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
- if (!test_bit(irq, msi_irq_in_use)) {
+ if (!test_bit(hwirq, msi_irq_in_use)) {
msi = irq_get_msi_desc(irq);
port = msi_desc_to_pci_sysdata(msi);
dev_err(port->dev, "Trying to free unused MSI#%d\n", irq);
} else {
- clear_bit(irq, msi_irq_in_use);
+ clear_bit(hwirq, msi_irq_in_use);
}
}
/**
* xilinx_pcie_assign_msi - Allocate MSI number
- * @port: PCIe port structure
*
* Return: A valid IRQ on success and error value on failure.
*/
-static int xilinx_pcie_assign_msi(struct xilinx_pcie_port *port)
+static int xilinx_pcie_assign_msi(void)
{
int pos;
@@ -257,6 +254,7 @@ static void xilinx_msi_teardown_irq(struct msi_controller *chip,
unsigned int irq)
{
xilinx_pcie_destroy_msi(irq);
+ irq_dispose_mapping(irq);
}
/**
@@ -277,11 +275,11 @@ static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip,
struct msi_msg msg;
phys_addr_t msg_addr;
- hwirq = xilinx_pcie_assign_msi(port);
+ hwirq = xilinx_pcie_assign_msi();
if (hwirq < 0)
return hwirq;
- irq = irq_create_mapping(port->irq_domain, hwirq);
+ irq = irq_create_mapping(port->msi_domain, hwirq);
if (!irq)
return -EINVAL;
@@ -385,6 +383,7 @@ static const struct irq_domain_ops intx_domain_ops = {
static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
{
struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data;
+ struct device *dev = port->dev;
u32 val, mask, status, msi_data;
/* Read interrupt decode and mask registers */
@@ -396,32 +395,32 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
return IRQ_NONE;
if (status & XILINX_PCIE_INTR_LINK_DOWN)
- dev_warn(port->dev, "Link Down\n");
+ dev_warn(dev, "Link Down\n");
if (status & XILINX_PCIE_INTR_ECRC_ERR)
- dev_warn(port->dev, "ECRC failed\n");
+ dev_warn(dev, "ECRC failed\n");
if (status & XILINX_PCIE_INTR_STR_ERR)
- dev_warn(port->dev, "Streaming error\n");
+ dev_warn(dev, "Streaming error\n");
if (status & XILINX_PCIE_INTR_HOT_RESET)
- dev_info(port->dev, "Hot reset\n");
+ dev_info(dev, "Hot reset\n");
if (status & XILINX_PCIE_INTR_CFG_TIMEOUT)
- dev_warn(port->dev, "ECAM access timeout\n");
+ dev_warn(dev, "ECAM access timeout\n");
if (status & XILINX_PCIE_INTR_CORRECTABLE) {
- dev_warn(port->dev, "Correctable error message\n");
+ dev_warn(dev, "Correctable error message\n");
xilinx_pcie_clear_err_interrupts(port);
}
if (status & XILINX_PCIE_INTR_NONFATAL) {
- dev_warn(port->dev, "Non fatal error message\n");
+ dev_warn(dev, "Non fatal error message\n");
xilinx_pcie_clear_err_interrupts(port);
}
if (status & XILINX_PCIE_INTR_FATAL) {
- dev_warn(port->dev, "Fatal error message\n");
+ dev_warn(dev, "Fatal error message\n");
xilinx_pcie_clear_err_interrupts(port);
}
@@ -431,8 +430,8 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
/* Check whether interrupt valid */
if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
- dev_warn(port->dev, "RP Intr FIFO1 read error\n");
- return IRQ_HANDLED;
+ dev_warn(dev, "RP Intr FIFO1 read error\n");
+ goto error;
}
if (!(val & XILINX_PCIE_RPIFR1_MSI_INTR)) {
@@ -443,7 +442,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
/* Handle INTx Interrupt */
val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1;
- generic_handle_irq(irq_find_mapping(port->irq_domain,
+ generic_handle_irq(irq_find_mapping(port->leg_domain,
val));
}
}
@@ -453,8 +452,8 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
- dev_warn(port->dev, "RP Intr FIFO1 read error\n");
- return IRQ_HANDLED;
+ dev_warn(dev, "RP Intr FIFO1 read error\n");
+ goto error;
}
if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
@@ -473,32 +472,33 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
}
if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
- dev_warn(port->dev, "Slave unsupported request\n");
+ dev_warn(dev, "Slave unsupported request\n");
if (status & XILINX_PCIE_INTR_SLV_UNEXP)
- dev_warn(port->dev, "Slave unexpected completion\n");
+ dev_warn(dev, "Slave unexpected completion\n");
if (status & XILINX_PCIE_INTR_SLV_COMPL)
- dev_warn(port->dev, "Slave completion timeout\n");
+ dev_warn(dev, "Slave completion timeout\n");
if (status & XILINX_PCIE_INTR_SLV_ERRP)
- dev_warn(port->dev, "Slave Error Poison\n");
+ dev_warn(dev, "Slave Error Poison\n");
if (status & XILINX_PCIE_INTR_SLV_CMPABT)
- dev_warn(port->dev, "Slave Completer Abort\n");
+ dev_warn(dev, "Slave Completer Abort\n");
if (status & XILINX_PCIE_INTR_SLV_ILLBUR)
- dev_warn(port->dev, "Slave Illegal Burst\n");
+ dev_warn(dev, "Slave Illegal Burst\n");
if (status & XILINX_PCIE_INTR_MST_DECERR)
- dev_warn(port->dev, "Master decode error\n");
+ dev_warn(dev, "Master decode error\n");
if (status & XILINX_PCIE_INTR_MST_SLVERR)
- dev_warn(port->dev, "Master slave error\n");
+ dev_warn(dev, "Master slave error\n");
if (status & XILINX_PCIE_INTR_MST_ERRP)
- dev_warn(port->dev, "Master error poison\n");
+ dev_warn(dev, "Master error poison\n");
+error:
/* Clear the Interrupt Decode register */
pcie_write(port, status, XILINX_PCIE_REG_IDR);
@@ -506,35 +506,6 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
}
/**
- * xilinx_pcie_free_irq_domain - Free IRQ domain
- * @port: PCIe port information
- */
-static void xilinx_pcie_free_irq_domain(struct xilinx_pcie_port *port)
-{
- int i;
- u32 irq, num_irqs;
-
- /* Free IRQ Domain */
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
-
- free_pages(port->msi_pages, 0);
-
- num_irqs = XILINX_NUM_MSI_IRQS;
- } else {
- /* INTx */
- num_irqs = 4;
- }
-
- for (i = 0; i < num_irqs; i++) {
- irq = irq_find_mapping(port->irq_domain, i);
- if (irq > 0)
- irq_dispose_mapping(irq);
- }
-
- irq_domain_remove(port->irq_domain);
-}
-
-/**
* xilinx_pcie_init_irq_domain - Initialize IRQ domain
* @port: PCIe port information
*
@@ -553,21 +524,21 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
return -ENODEV;
}
- port->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
+ port->leg_domain = irq_domain_add_linear(pcie_intc_node, 4,
&intx_domain_ops,
port);
- if (!port->irq_domain) {
+ if (!port->leg_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
return -ENODEV;
}
/* Setup MSI */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- port->irq_domain = irq_domain_add_linear(node,
+ port->msi_domain = irq_domain_add_linear(node,
XILINX_NUM_MSI_IRQS,
&msi_domain_ops,
&xilinx_pcie_msi_chip);
- if (!port->irq_domain) {
+ if (!port->msi_domain) {
dev_err(dev, "Failed to get a MSI IRQ domain\n");
return -ENODEV;
}
@@ -584,10 +555,12 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
*/
static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
{
+ struct device *dev = port->dev;
+
if (xilinx_pcie_link_is_up(port))
- dev_info(port->dev, "PCIe Link is UP\n");
+ dev_info(dev, "PCIe Link is UP\n");
else
- dev_info(port->dev, "PCIe Link is DOWN\n");
+ dev_info(dev, "PCIe Link is DOWN\n");
/* Disable all interrupts */
pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK,
@@ -657,8 +630,8 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
*/
static int xilinx_pcie_probe(struct platform_device *pdev)
{
- struct xilinx_pcie_port *port;
struct device *dev = &pdev->dev;
+ struct xilinx_pcie_port *port;
struct pci_bus *bus;
int err;
resource_size_t iobase = 0;
@@ -698,15 +671,14 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
if (err)
goto error;
- bus = pci_create_root_bus(&pdev->dev, 0,
- &xilinx_pcie_ops, port, &res);
+ bus = pci_create_root_bus(dev, 0, &xilinx_pcie_ops, port, &res);
if (!bus) {
err = -ENOMEM;
goto error;
}
#ifdef CONFIG_PCI_MSI
- xilinx_pcie_msi_chip.dev = port->dev;
+ xilinx_pcie_msi_chip.dev = dev;
bus->msi = &xilinx_pcie_msi_chip;
#endif
pci_scan_child_bus(bus);
@@ -715,8 +687,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
#endif
pci_bus_add_devices(bus);
- platform_set_drvdata(pdev, port);
-
return 0;
error:
@@ -724,21 +694,6 @@ error:
return err;
}
-/**
- * xilinx_pcie_remove - Remove function
- * @pdev: Platform device pointer
- *
- * Return: '0' always
- */
-static int xilinx_pcie_remove(struct platform_device *pdev)
-{
- struct xilinx_pcie_port *port = platform_get_drvdata(pdev);
-
- xilinx_pcie_free_irq_domain(port);
-
- return 0;
-}
-
static struct of_device_id xilinx_pcie_of_match[] = {
{ .compatible = "xlnx,axi-pcie-host-1.00.a", },
{}
@@ -751,10 +706,5 @@ static struct platform_driver xilinx_pcie_driver = {
.suppress_bind_attrs = true,
},
.probe = xilinx_pcie_probe,
- .remove = xilinx_pcie_remove,
};
-module_platform_driver(xilinx_pcie_driver);
-
-MODULE_AUTHOR("Xilinx Inc");
-MODULE_DESCRIPTION("Xilinx AXI PCIe driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(xilinx_pcie_driver);
diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c
new file mode 100644
index 000000000000..37e29b580be3
--- /dev/null
+++ b/drivers/pci/host/vmd.c
@@ -0,0 +1,768 @@
+/*
+ * Volume Management Device driver
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+
+#include <asm/irqdomain.h>
+#include <asm/device.h>
+#include <asm/msi.h>
+#include <asm/msidef.h>
+
+#define VMD_CFGBAR 0
+#define VMD_MEMBAR1 2
+#define VMD_MEMBAR2 4
+
+/*
+ * Lock for manipulating VMD IRQ lists.
+ */
+static DEFINE_RAW_SPINLOCK(list_lock);
+
+/**
+ * struct vmd_irq - private data to map driver IRQ to the VMD shared vector
+ * @node: list item for parent traversal.
+ * @rcu: RCU callback item for freeing.
+ * @irq: back pointer to parent.
+ * @enabled: true if driver enabled IRQ
+ * @virq: the virtual IRQ value provided to the requesting driver.
+ *
+ * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
+ * a VMD IRQ using this structure.
+ */
+struct vmd_irq {
+ struct list_head node;
+ struct rcu_head rcu;
+ struct vmd_irq_list *irq;
+ bool enabled;
+ unsigned int virq;
+};
+
+/**
+ * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector
+ * @irq_list: the list of irq's the VMD one demuxes to.
+ * @count: number of child IRQs assigned to this vector; used to track
+ * sharing.
+ */
+struct vmd_irq_list {
+ struct list_head irq_list;
+ unsigned int count;
+};
+
+struct vmd_dev {
+ struct pci_dev *dev;
+
+ spinlock_t cfg_lock;
+ char __iomem *cfgbar;
+
+ int msix_count;
+ struct vmd_irq_list *irqs;
+
+ struct pci_sysdata sysdata;
+ struct resource resources[3];
+ struct irq_domain *irq_domain;
+ struct pci_bus *bus;
+
+#ifdef CONFIG_X86_DEV_DMA_OPS
+ struct dma_map_ops dma_ops;
+ struct dma_domain dma_domain;
+#endif
+};
+
+static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
+{
+ return container_of(bus->sysdata, struct vmd_dev, sysdata);
+}
+
+static inline unsigned int index_from_irqs(struct vmd_dev *vmd,
+ struct vmd_irq_list *irqs)
+{
+ return irqs - vmd->irqs;
+}
+
+/*
+ * Drivers managing a device in a VMD domain allocate their own IRQs as before,
+ * but the MSI entry for the hardware it's driving will be programmed with a
+ * destination ID for the VMD MSI-X table. The VMD muxes interrupts in its
+ * domain into one of its own, and the VMD driver de-muxes these for the
+ * handlers sharing that VMD IRQ. The vmd irq_domain provides the operations
+ * and irq_chip to set this up.
+ */
+static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct vmd_irq *vmdirq = data->chip_data;
+ struct vmd_irq_list *irq = vmdirq->irq;
+ struct vmd_dev *vmd = irq_data_get_irq_handler_data(data);
+
+ msg->address_hi = MSI_ADDR_BASE_HI;
+ msg->address_lo = MSI_ADDR_BASE_LO |
+ MSI_ADDR_DEST_ID(index_from_irqs(vmd, irq));
+ msg->data = 0;
+}
+
+/*
+ * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops.
+ */
+static void vmd_irq_enable(struct irq_data *data)
+{
+ struct vmd_irq *vmdirq = data->chip_data;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&list_lock, flags);
+ WARN_ON(vmdirq->enabled);
+ list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
+ vmdirq->enabled = true;
+ raw_spin_unlock_irqrestore(&list_lock, flags);
+
+ data->chip->irq_unmask(data);
+}
+
+static void vmd_irq_disable(struct irq_data *data)
+{
+ struct vmd_irq *vmdirq = data->chip_data;
+ unsigned long flags;
+
+ data->chip->irq_mask(data);
+
+ raw_spin_lock_irqsave(&list_lock, flags);
+ if (vmdirq->enabled) {
+ list_del_rcu(&vmdirq->node);
+ vmdirq->enabled = false;
+ }
+ raw_spin_unlock_irqrestore(&list_lock, flags);
+}
+
+/*
+ * XXX: Stubbed until we develop acceptable way to not create conflicts with
+ * other devices sharing the same vector.
+ */
+static int vmd_irq_set_affinity(struct irq_data *data,
+ const struct cpumask *dest, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip vmd_msi_controller = {
+ .name = "VMD-MSI",
+ .irq_enable = vmd_irq_enable,
+ .irq_disable = vmd_irq_disable,
+ .irq_compose_msi_msg = vmd_compose_msi_msg,
+ .irq_set_affinity = vmd_irq_set_affinity,
+};
+
+static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
+ msi_alloc_info_t *arg)
+{
+ return 0;
+}
+
+/*
+ * XXX: We can be even smarter selecting the best IRQ once we solve the
+ * affinity problem.
+ */
+static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc)
+{
+ int i, best = 1;
+ unsigned long flags;
+
+ if (!desc->msi_attrib.is_msix || vmd->msix_count == 1)
+ return &vmd->irqs[0];
+
+ raw_spin_lock_irqsave(&list_lock, flags);
+ for (i = 1; i < vmd->msix_count; i++)
+ if (vmd->irqs[i].count < vmd->irqs[best].count)
+ best = i;
+ vmd->irqs[best].count++;
+ raw_spin_unlock_irqrestore(&list_lock, flags);
+
+ return &vmd->irqs[best];
+}
+
+static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
+ unsigned int virq, irq_hw_number_t hwirq,
+ msi_alloc_info_t *arg)
+{
+ struct msi_desc *desc = arg->desc;
+ struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
+ struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
+ unsigned int index, vector;
+
+ if (!vmdirq)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&vmdirq->node);
+ vmdirq->irq = vmd_next_irq(vmd, desc);
+ vmdirq->virq = virq;
+ index = index_from_irqs(vmd, vmdirq->irq);
+ vector = pci_irq_vector(vmd->dev, index);
+
+ irq_domain_set_info(domain, virq, vector, info->chip, vmdirq,
+ handle_untracked_irq, vmd, NULL);
+ return 0;
+}
+
+static void vmd_msi_free(struct irq_domain *domain,
+ struct msi_domain_info *info, unsigned int virq)
+{
+ struct vmd_irq *vmdirq = irq_get_chip_data(virq);
+ unsigned long flags;
+
+ synchronize_rcu();
+
+ /* XXX: Potential optimization to rebalance */
+ raw_spin_lock_irqsave(&list_lock, flags);
+ vmdirq->irq->count--;
+ raw_spin_unlock_irqrestore(&list_lock, flags);
+
+ kfree_rcu(vmdirq, rcu);
+}
+
+static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *arg)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
+
+ if (nvec > vmd->msix_count)
+ return vmd->msix_count;
+
+ memset(arg, 0, sizeof(*arg));
+ return 0;
+}
+
+static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+ arg->desc = desc;
+}
+
+static struct msi_domain_ops vmd_msi_domain_ops = {
+ .get_hwirq = vmd_get_hwirq,
+ .msi_init = vmd_msi_init,
+ .msi_free = vmd_msi_free,
+ .msi_prepare = vmd_msi_prepare,
+ .set_desc = vmd_set_desc,
+};
+
+static struct msi_domain_info vmd_msi_domain_info = {
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX,
+ .ops = &vmd_msi_domain_ops,
+ .chip = &vmd_msi_controller,
+};
+
+#ifdef CONFIG_X86_DEV_DMA_OPS
+/*
+ * VMD replaces the requester ID with its own. DMA mappings for devices in a
+ * VMD domain need to be mapped for the VMD, not the device requiring
+ * the mapping.
+ */
+static struct device *to_vmd_dev(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
+
+ return &vmd->dev->dev;
+}
+
+static struct dma_map_ops *vmd_dma_ops(struct device *dev)
+{
+ return get_dma_ops(to_vmd_dev(dev));
+}
+
+static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
+ gfp_t flag, unsigned long attrs)
+{
+ return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag,
+ attrs);
+}
+
+static void vmd_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t addr, unsigned long attrs)
+{
+ return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr,
+ attrs);
+}
+
+static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t addr, size_t size,
+ unsigned long attrs)
+{
+ return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr,
+ size, attrs);
+}
+
+static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t addr, size_t size,
+ unsigned long attrs)
+{
+ return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr,
+ addr, size, attrs);
+}
+
+static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size,
+ dir, attrs);
+}
+
+static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs);
+}
+
+static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
+}
+
+static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
+}
+
+static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+ vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
+}
+
+static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+ vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size,
+ dir);
+}
+
+static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
+}
+
+static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
+}
+
+static int vmd_mapping_error(struct device *dev, dma_addr_t addr)
+{
+ return vmd_dma_ops(dev)->mapping_error(to_vmd_dev(dev), addr);
+}
+
+static int vmd_dma_supported(struct device *dev, u64 mask)
+{
+ return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask);
+}
+
+#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
+static u64 vmd_get_required_mask(struct device *dev)
+{
+ return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev));
+}
+#endif
+
+static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
+{
+ struct dma_domain *domain = &vmd->dma_domain;
+
+ if (get_dma_ops(&vmd->dev->dev))
+ del_dma_domain(domain);
+}
+
+#define ASSIGN_VMD_DMA_OPS(source, dest, fn) \
+ do { \
+ if (source->fn) \
+ dest->fn = vmd_##fn; \
+ } while (0)
+
+static void vmd_setup_dma_ops(struct vmd_dev *vmd)
+{
+ const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev);
+ struct dma_map_ops *dest = &vmd->dma_ops;
+ struct dma_domain *domain = &vmd->dma_domain;
+
+ domain->domain_nr = vmd->sysdata.domain;
+ domain->dma_ops = dest;
+
+ if (!source)
+ return;
+ ASSIGN_VMD_DMA_OPS(source, dest, alloc);
+ ASSIGN_VMD_DMA_OPS(source, dest, free);
+ ASSIGN_VMD_DMA_OPS(source, dest, mmap);
+ ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable);
+ ASSIGN_VMD_DMA_OPS(source, dest, map_page);
+ ASSIGN_VMD_DMA_OPS(source, dest, unmap_page);
+ ASSIGN_VMD_DMA_OPS(source, dest, map_sg);
+ ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg);
+ ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu);
+ ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device);
+ ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu);
+ ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device);
+ ASSIGN_VMD_DMA_OPS(source, dest, mapping_error);
+ ASSIGN_VMD_DMA_OPS(source, dest, dma_supported);
+#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
+ ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
+#endif
+ add_dma_domain(domain);
+}
+#undef ASSIGN_VMD_DMA_OPS
+#else
+static void vmd_teardown_dma_ops(struct vmd_dev *vmd) {}
+static void vmd_setup_dma_ops(struct vmd_dev *vmd) {}
+#endif
+
+static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
+ unsigned int devfn, int reg, int len)
+{
+ char __iomem *addr = vmd->cfgbar +
+ (bus->number << 20) + (devfn << 12) + reg;
+
+ if ((addr - vmd->cfgbar) + len >=
+ resource_size(&vmd->dev->resource[VMD_CFGBAR]))
+ return NULL;
+
+ return addr;
+}
+
+/*
+ * CPU may deadlock if config space is not serialized on some versions of this
+ * hardware, so all config space access is done under a spinlock.
+ */
+static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
+ int len, u32 *value)
+{
+ struct vmd_dev *vmd = vmd_from_bus(bus);
+ char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
+ unsigned long flags;
+ int ret = 0;
+
+ if (!addr)
+ return -EFAULT;
+
+ spin_lock_irqsave(&vmd->cfg_lock, flags);
+ switch (len) {
+ case 1:
+ *value = readb(addr);
+ break;
+ case 2:
+ *value = readw(addr);
+ break;
+ case 4:
+ *value = readl(addr);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ spin_unlock_irqrestore(&vmd->cfg_lock, flags);
+ return ret;
+}
+
+/*
+ * VMD h/w converts non-posted config writes to posted memory writes. The
+ * read-back in this function forces the completion so it returns only after
+ * the config space was written, as expected.
+ */
+static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
+ int len, u32 value)
+{
+ struct vmd_dev *vmd = vmd_from_bus(bus);
+ char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
+ unsigned long flags;
+ int ret = 0;
+
+ if (!addr)
+ return -EFAULT;
+
+ spin_lock_irqsave(&vmd->cfg_lock, flags);
+ switch (len) {
+ case 1:
+ writeb(value, addr);
+ readb(addr);
+ break;
+ case 2:
+ writew(value, addr);
+ readw(addr);
+ break;
+ case 4:
+ writel(value, addr);
+ readl(addr);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ spin_unlock_irqrestore(&vmd->cfg_lock, flags);
+ return ret;
+}
+
+static struct pci_ops vmd_ops = {
+ .read = vmd_pci_read,
+ .write = vmd_pci_write,
+};
+
+static void vmd_attach_resources(struct vmd_dev *vmd)
+{
+ vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1];
+ vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2];
+}
+
+static void vmd_detach_resources(struct vmd_dev *vmd)
+{
+ vmd->dev->resource[VMD_MEMBAR1].child = NULL;
+ vmd->dev->resource[VMD_MEMBAR2].child = NULL;
+}
+
+/*
+ * VMD domains start at 0x1000 to not clash with ACPI _SEG domains.
+ */
+static int vmd_find_free_domain(void)
+{
+ int domain = 0xffff;
+ struct pci_bus *bus = NULL;
+
+ while ((bus = pci_find_next_bus(bus)) != NULL)
+ domain = max_t(int, domain, pci_domain_nr(bus));
+ return domain + 1;
+}
+
+static int vmd_enable_domain(struct vmd_dev *vmd)
+{
+ struct pci_sysdata *sd = &vmd->sysdata;
+ struct resource *res;
+ u32 upper_bits;
+ unsigned long flags;
+ LIST_HEAD(resources);
+
+ res = &vmd->dev->resource[VMD_CFGBAR];
+ vmd->resources[0] = (struct resource) {
+ .name = "VMD CFGBAR",
+ .start = 0,
+ .end = (resource_size(res) >> 20) - 1,
+ .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
+ };
+
+ /*
+ * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can
+ * put 32-bit resources in the window.
+ *
+ * There's no hardware reason why a 64-bit window *couldn't*
+ * contain a 32-bit resource, but pbus_size_mem() computes the
+ * bridge window size assuming a 64-bit window will contain no
+ * 32-bit resources. __pci_assign_resource() enforces that
+ * artificial restriction to make sure everything will fit.
+ *
+ * The only way we could use a 64-bit non-prefechable MEMBAR is
+ * if its address is <4GB so that we can convert it to a 32-bit
+ * resource. To be visible to the host OS, all VMD endpoints must
+ * be initially configured by platform BIOS, which includes setting
+ * up these resources. We can assume the device is configured
+ * according to the platform needs.
+ */
+ res = &vmd->dev->resource[VMD_MEMBAR1];
+ upper_bits = upper_32_bits(res->end);
+ flags = res->flags & ~IORESOURCE_SIZEALIGN;
+ if (!upper_bits)
+ flags &= ~IORESOURCE_MEM_64;
+ vmd->resources[1] = (struct resource) {
+ .name = "VMD MEMBAR1",
+ .start = res->start,
+ .end = res->end,
+ .flags = flags,
+ .parent = res,
+ };
+
+ res = &vmd->dev->resource[VMD_MEMBAR2];
+ upper_bits = upper_32_bits(res->end);
+ flags = res->flags & ~IORESOURCE_SIZEALIGN;
+ if (!upper_bits)
+ flags &= ~IORESOURCE_MEM_64;
+ vmd->resources[2] = (struct resource) {
+ .name = "VMD MEMBAR2",
+ .start = res->start + 0x2000,
+ .end = res->end,
+ .flags = flags,
+ .parent = res,
+ };
+
+ sd->vmd_domain = true;
+ sd->domain = vmd_find_free_domain();
+ if (sd->domain < 0)
+ return sd->domain;
+
+ sd->node = pcibus_to_node(vmd->dev->bus);
+
+ vmd->irq_domain = pci_msi_create_irq_domain(NULL, &vmd_msi_domain_info,
+ x86_vector_domain);
+ if (!vmd->irq_domain)
+ return -ENODEV;
+
+ pci_add_resource(&resources, &vmd->resources[0]);
+ pci_add_resource(&resources, &vmd->resources[1]);
+ pci_add_resource(&resources, &vmd->resources[2]);
+ vmd->bus = pci_create_root_bus(&vmd->dev->dev, 0, &vmd_ops, sd,
+ &resources);
+ if (!vmd->bus) {
+ pci_free_resource_list(&resources);
+ irq_domain_remove(vmd->irq_domain);
+ return -ENODEV;
+ }
+
+ vmd_attach_resources(vmd);
+ vmd_setup_dma_ops(vmd);
+ dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
+ pci_rescan_bus(vmd->bus);
+
+ WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
+ "domain"), "Can't create symlink to domain\n");
+ return 0;
+}
+
+static irqreturn_t vmd_irq(int irq, void *data)
+{
+ struct vmd_irq_list *irqs = data;
+ struct vmd_irq *vmdirq;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
+ generic_handle_irq(vmdirq->virq);
+ rcu_read_unlock();
+
+ return IRQ_HANDLED;
+}
+
+static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct vmd_dev *vmd;
+ int i, err;
+
+ if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
+ return -ENOMEM;
+
+ vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL);
+ if (!vmd)
+ return -ENOMEM;
+
+ vmd->dev = dev;
+ err = pcim_enable_device(dev);
+ if (err < 0)
+ return err;
+
+ vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0);
+ if (!vmd->cfgbar)
+ return -ENOMEM;
+
+ pci_set_master(dev);
+ if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) &&
+ dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)))
+ return -ENODEV;
+
+ vmd->msix_count = pci_msix_vec_count(dev);
+ if (vmd->msix_count < 0)
+ return -ENODEV;
+
+ vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ if (vmd->msix_count < 0)
+ return vmd->msix_count;
+
+ vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
+ GFP_KERNEL);
+ if (!vmd->irqs)
+ return -ENOMEM;
+
+ for (i = 0; i < vmd->msix_count; i++) {
+ INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
+ err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
+ vmd_irq, 0, "vmd", &vmd->irqs[i]);
+ if (err)
+ return err;
+ }
+
+ spin_lock_init(&vmd->cfg_lock);
+ pci_set_drvdata(dev, vmd);
+ err = vmd_enable_domain(vmd);
+ if (err)
+ return err;
+
+ dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n",
+ vmd->sysdata.domain);
+ return 0;
+}
+
+static void vmd_remove(struct pci_dev *dev)
+{
+ struct vmd_dev *vmd = pci_get_drvdata(dev);
+
+ vmd_detach_resources(vmd);
+ pci_set_drvdata(dev, NULL);
+ sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
+ pci_stop_root_bus(vmd->bus);
+ pci_remove_root_bus(vmd->bus);
+ vmd_teardown_dma_ops(vmd);
+ irq_domain_remove(vmd->irq_domain);
+}
+
+#ifdef CONFIG_PM
+static int vmd_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ pci_save_state(pdev);
+ return 0;
+}
+
+static int vmd_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ pci_restore_state(pdev);
+ return 0;
+}
+#endif
+static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
+
+static const struct pci_device_id vmd_ids[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x201d),},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, vmd_ids);
+
+static struct pci_driver vmd_drv = {
+ .name = "vmd",
+ .id_table = vmd_ids,
+ .probe = vmd_probe,
+ .remove = vmd_remove,
+ .driver = {
+ .pm = &vmd_dev_pm_ops,
+ },
+};
+module_pci_driver(vmd_drv);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.6");
diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
index 555bcde3b196..60e66e027ebc 100644
--- a/drivers/pci/hotplug/cpci_hotplug.h
+++ b/drivers/pci/hotplug/cpci_hotplug.h
@@ -101,10 +101,8 @@ int cpci_unconfigure_slot(struct slot *slot);
#ifdef CONFIG_HOTPLUG_PCI_CPCI
int cpci_hotplug_init(int debug);
-void cpci_hotplug_exit(void);
#else
static inline int cpci_hotplug_init(int debug) { return 0; }
-static inline void cpci_hotplug_exit(void) { }
#endif
#endif /* _CPCI_HOTPLUG_H */
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index 7d3866c47312..7ec8a8f72c69 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -719,13 +719,3 @@ cpci_hotplug_init(int debug)
cpci_debug = debug;
return 0;
}
-
-void __exit
-cpci_hotplug_exit(void)
-{
- /*
- * Clean everything up.
- */
- cpci_hp_stop();
- cpci_hp_unregister_controller(controller);
-}
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 9acd1997c6fe..fea0b8b33589 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -25,7 +25,7 @@
*
*/
-#include <linux/module.h>
+#include <linux/module.h> /* try_module_get & module_put */
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -537,17 +537,11 @@ static int __init pci_hotplug_init(void)
info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
return result;
}
+device_initcall(pci_hotplug_init);
-static void __exit pci_hotplug_exit(void)
-{
- cpci_hotplug_exit();
-}
-
-module_init(pci_hotplug_init);
-module_exit(pci_hotplug_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+/*
+ * not really modular, but the easiest way to keep compat with existing
+ * bootargs behaviour is to continue using module_param here.
+ */
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index e764918641ae..37d70b5ad22f 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -152,6 +152,9 @@ bool pciehp_check_link_active(struct controller *ctrl);
void pciehp_release_ctrl(struct controller *ctrl);
int pciehp_reset_slot(struct slot *slot, int probe);
+int pciehp_set_raw_indicator_status(struct hotplug_slot *h_slot, u8 status);
+int pciehp_get_raw_indicator_status(struct hotplug_slot *h_slot, u8 *status);
+
static inline const char *slot_name(struct slot *slot)
{
return hotplug_slot_name(slot->hotplug_slot);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index ac531e674a05..7d32fa33dcef 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -27,7 +27,6 @@
*
*/
-#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -47,10 +46,10 @@ static bool pciehp_force;
#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
#define DRIVER_DESC "PCI Express Hot Plug Controller Driver"
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-
+/*
+ * not really modular, but the easiest way to keep compat with existing
+ * bootargs behaviour is to continue using module_param here.
+ */
module_param(pciehp_debug, bool, 0644);
module_param(pciehp_poll_mode, bool, 0644);
module_param(pciehp_poll_time, int, 0644);
@@ -114,6 +113,9 @@ static int init_slot(struct controller *ctrl)
if (ATTN_LED(ctrl)) {
ops->get_attention_status = get_attention_status;
ops->set_attention_status = set_attention_status;
+ } else if (ctrl->pcie->port->hotplug_user_indicators) {
+ ops->get_attention_status = pciehp_get_raw_indicator_status;
+ ops->set_attention_status = pciehp_set_raw_indicator_status;
}
/* register this slot with the hotplug pci core */
@@ -337,13 +339,4 @@ static int __init pcied_init(void)
return retval;
}
-
-static void __exit pcied_cleanup(void)
-{
- dbg("unload_pciehpd()\n");
- pcie_port_service_unregister(&hpdriver_portdrv);
- info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
-}
-
-module_init(pcied_init);
-module_exit(pcied_cleanup);
+device_initcall(pcied_init);
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 880978b6d534..efe69e879455 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -106,7 +106,7 @@ static int board_added(struct slot *p_slot)
/* Check for a power fault */
if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) {
- ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
+ ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(p_slot));
retval = -EIO;
goto err_exit;
}
@@ -120,6 +120,7 @@ static int board_added(struct slot *p_slot)
}
pciehp_green_led_on(p_slot);
+ pciehp_set_attention_status(p_slot, 0);
return 0;
err_exit:
@@ -253,11 +254,11 @@ static void handle_button_press_event(struct slot *p_slot)
pciehp_get_power_status(p_slot, &getstatus);
if (getstatus) {
p_slot->state = BLINKINGOFF_STATE;
- ctrl_info(ctrl, "PCI slot #%s - powering off due to button press\n",
+ ctrl_info(ctrl, "Slot(%s): Powering off due to button press\n",
slot_name(p_slot));
} else {
p_slot->state = BLINKINGON_STATE;
- ctrl_info(ctrl, "PCI slot #%s - powering on due to button press\n",
+ ctrl_info(ctrl, "Slot(%s) Powering on due to button press\n",
slot_name(p_slot));
}
/* blink green LED and turn off amber */
@@ -272,14 +273,14 @@ static void handle_button_press_event(struct slot *p_slot)
* press the attention again before the 5 sec. limit
* expires to cancel hot-add or hot-remove
*/
- ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot));
+ ctrl_info(ctrl, "Slot(%s): Button cancel\n", slot_name(p_slot));
cancel_delayed_work(&p_slot->work);
if (p_slot->state == BLINKINGOFF_STATE)
pciehp_green_led_on(p_slot);
else
pciehp_green_led_off(p_slot);
pciehp_set_attention_status(p_slot, 0);
- ctrl_info(ctrl, "PCI slot #%s - action canceled due to button press\n",
+ ctrl_info(ctrl, "Slot(%s): Action canceled due to button press\n",
slot_name(p_slot));
p_slot->state = STATIC_STATE;
break;
@@ -290,10 +291,12 @@ static void handle_button_press_event(struct slot *p_slot)
* this means that the previous attention button action
* to hot-add or hot-remove is undergoing
*/
- ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot));
+ ctrl_info(ctrl, "Slot(%s): Button ignored\n",
+ slot_name(p_slot));
break;
default:
- ctrl_warn(ctrl, "ignoring invalid state %#x\n", p_slot->state);
+ ctrl_err(ctrl, "Slot(%s): Ignoring invalid state %#x\n",
+ slot_name(p_slot), p_slot->state);
break;
}
}
@@ -301,20 +304,6 @@ static void handle_button_press_event(struct slot *p_slot)
/*
* Note: This function must be called with slot->lock held
*/
-static void handle_surprise_event(struct slot *p_slot)
-{
- u8 getstatus;
-
- pciehp_get_adapter_status(p_slot, &getstatus);
- if (!getstatus)
- pciehp_queue_power_work(p_slot, DISABLE_REQ);
- else
- pciehp_queue_power_work(p_slot, ENABLE_REQ);
-}
-
-/*
- * Note: This function must be called with slot->lock held
- */
static void handle_link_event(struct slot *p_slot, u32 event)
{
struct controller *ctrl = p_slot->ctrl;
@@ -330,31 +319,27 @@ static void handle_link_event(struct slot *p_slot, u32 event)
break;
case POWERON_STATE:
if (event == INT_LINK_UP) {
- ctrl_info(ctrl,
- "Link Up event ignored on slot(%s): already powering on\n",
+ ctrl_info(ctrl, "Slot(%s): Link Up event ignored; already powering on\n",
slot_name(p_slot));
} else {
- ctrl_info(ctrl,
- "Link Down event queued on slot(%s): currently getting powered on\n",
+ ctrl_info(ctrl, "Slot(%s): Link Down event queued; currently getting powered on\n",
slot_name(p_slot));
pciehp_queue_power_work(p_slot, DISABLE_REQ);
}
break;
case POWEROFF_STATE:
if (event == INT_LINK_UP) {
- ctrl_info(ctrl,
- "Link Up event queued on slot(%s): currently getting powered off\n",
+ ctrl_info(ctrl, "Slot(%s): Link Up event queued; currently getting powered off\n",
slot_name(p_slot));
pciehp_queue_power_work(p_slot, ENABLE_REQ);
} else {
- ctrl_info(ctrl,
- "Link Down event ignored on slot(%s): already powering off\n",
+ ctrl_info(ctrl, "Slot(%s): Link Down event ignored; already powering off\n",
slot_name(p_slot));
}
break;
default:
- ctrl_err(ctrl, "ignoring invalid state %#x on slot(%s)\n",
- p_slot->state, slot_name(p_slot));
+ ctrl_err(ctrl, "Slot(%s): Ignoring invalid state %#x\n",
+ slot_name(p_slot), p_slot->state);
break;
}
}
@@ -377,14 +362,14 @@ static void interrupt_event_handler(struct work_struct *work)
pciehp_green_led_off(p_slot);
break;
case INT_PRESENCE_ON:
- handle_surprise_event(p_slot);
+ pciehp_queue_power_work(p_slot, ENABLE_REQ);
break;
case INT_PRESENCE_OFF:
/*
* Regardless of surprise capability, we need to
* definitely remove a card that has been pulled out!
*/
- handle_surprise_event(p_slot);
+ pciehp_queue_power_work(p_slot, DISABLE_REQ);
break;
case INT_LINK_UP:
case INT_LINK_DOWN:
@@ -404,18 +389,17 @@ static void interrupt_event_handler(struct work_struct *work)
int pciehp_enable_slot(struct slot *p_slot)
{
u8 getstatus = 0;
- int rc;
struct controller *ctrl = p_slot->ctrl;
pciehp_get_adapter_status(p_slot, &getstatus);
if (!getstatus) {
- ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
+ ctrl_info(ctrl, "Slot(%s): No adapter\n", slot_name(p_slot));
return -ENODEV;
}
if (MRL_SENS(p_slot->ctrl)) {
pciehp_get_latch_status(p_slot, &getstatus);
if (getstatus) {
- ctrl_info(ctrl, "Latch open on slot(%s)\n",
+ ctrl_info(ctrl, "Slot(%s): Latch open\n",
slot_name(p_slot));
return -ENODEV;
}
@@ -424,19 +408,13 @@ int pciehp_enable_slot(struct slot *p_slot)
if (POWER_CTRL(p_slot->ctrl)) {
pciehp_get_power_status(p_slot, &getstatus);
if (getstatus) {
- ctrl_info(ctrl, "Already enabled on slot(%s)\n",
+ ctrl_info(ctrl, "Slot(%s): Already enabled\n",
slot_name(p_slot));
return -EINVAL;
}
}
- pciehp_get_latch_status(p_slot, &getstatus);
-
- rc = board_added(p_slot);
- if (rc)
- pciehp_get_latch_status(p_slot, &getstatus);
-
- return rc;
+ return board_added(p_slot);
}
/*
@@ -453,7 +431,7 @@ int pciehp_disable_slot(struct slot *p_slot)
if (POWER_CTRL(p_slot->ctrl)) {
pciehp_get_power_status(p_slot, &getstatus);
if (!getstatus) {
- ctrl_info(ctrl, "Already disabled on slot(%s)\n",
+ ctrl_info(ctrl, "Slot(%s): Already disabled\n",
slot_name(p_slot));
return -EINVAL;
}
@@ -481,17 +459,17 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
p_slot->state = STATIC_STATE;
break;
case POWERON_STATE:
- ctrl_info(ctrl, "Slot %s is already in powering on state\n",
+ ctrl_info(ctrl, "Slot(%s): Already in powering on state\n",
slot_name(p_slot));
break;
case BLINKINGOFF_STATE:
case POWEROFF_STATE:
- ctrl_info(ctrl, "Already enabled on slot %s\n",
+ ctrl_info(ctrl, "Slot(%s): Already enabled\n",
slot_name(p_slot));
break;
default:
- ctrl_err(ctrl, "invalid state %#x on slot %s\n",
- p_slot->state, slot_name(p_slot));
+ ctrl_err(ctrl, "Slot(%s): Invalid state %#x\n",
+ slot_name(p_slot), p_slot->state);
break;
}
mutex_unlock(&p_slot->lock);
@@ -518,17 +496,17 @@ int pciehp_sysfs_disable_slot(struct slot *p_slot)
p_slot->state = STATIC_STATE;
break;
case POWEROFF_STATE:
- ctrl_info(ctrl, "Slot %s is already in powering off state\n",
+ ctrl_info(ctrl, "Slot(%s): Already in powering off state\n",
slot_name(p_slot));
break;
case BLINKINGON_STATE:
case POWERON_STATE:
- ctrl_info(ctrl, "Already disabled on slot %s\n",
+ ctrl_info(ctrl, "Slot(%s): Already disabled\n",
slot_name(p_slot));
break;
default:
- ctrl_err(ctrl, "invalid state %#x on slot %s\n",
- p_slot->state, slot_name(p_slot));
+ ctrl_err(ctrl, "Slot(%s): Invalid state %#x\n",
+ slot_name(p_slot), p_slot->state);
break;
}
mutex_unlock(&p_slot->lock);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 08e84d61874e..b57fc6d6e28a 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -355,6 +355,18 @@ static int pciehp_link_enable(struct controller *ctrl)
return __pciehp_link_set(ctrl, true);
}
+int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
+ u8 *status)
+{
+ struct slot *slot = hotplug_slot->private;
+ struct pci_dev *pdev = ctrl_dev(slot->ctrl);
+ u16 slot_ctrl;
+
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
+ *status = (slot_ctrl & (PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC)) >> 6;
+ return 0;
+}
+
void pciehp_get_attention_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
@@ -431,6 +443,17 @@ int pciehp_query_power_fault(struct slot *slot)
return !!(slot_status & PCI_EXP_SLTSTA_PFD);
}
+int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
+ u8 status)
+{
+ struct slot *slot = hotplug_slot->private;
+ struct controller *ctrl = slot->ctrl;
+
+ pcie_write_cmd_nowait(ctrl, status << 6,
+ PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
+ return 0;
+}
+
void pciehp_set_attention_status(struct slot *slot, u8 value)
{
struct controller *ctrl = slot->ctrl;
@@ -535,14 +558,14 @@ void pciehp_power_off_slot(struct slot *slot)
PCI_EXP_SLTCTL_PWR_OFF);
}
-static irqreturn_t pcie_isr(int irq, void *dev_id)
+static irqreturn_t pciehp_isr(int irq, void *dev_id)
{
struct controller *ctrl = (struct controller *)dev_id;
struct pci_dev *pdev = ctrl_dev(ctrl);
struct pci_bus *subordinate = pdev->subordinate;
struct pci_dev *dev;
struct slot *slot = ctrl->slot;
- u16 detected, intr_loc;
+ u16 status, events;
u8 present;
bool link;
@@ -550,36 +573,31 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
if (pdev->current_state == PCI_D3cold)
return IRQ_NONE;
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
+ if (status == (u16) ~0) {
+ ctrl_info(ctrl, "%s: no response from device\n", __func__);
+ return IRQ_NONE;
+ }
+
/*
- * In order to guarantee that all interrupt events are
- * serviced, we need to re-inspect Slot Status register after
- * clearing what is presumed to be the last pending interrupt.
+ * Slot Status contains plain status bits as well as event
+ * notification bits; right now we only want the event bits.
*/
- intr_loc = 0;
- do {
- pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &detected);
- if (detected == (u16) ~0) {
- ctrl_info(ctrl, "%s: no response from device\n",
- __func__);
- return IRQ_HANDLED;
- }
+ events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
+ PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
+ PCI_EXP_SLTSTA_DLLSC);
+ if (!events)
+ return IRQ_NONE;
- detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
- PCI_EXP_SLTSTA_PDC |
- PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
- detected &= ~intr_loc;
- intr_loc |= detected;
- if (!intr_loc)
- return IRQ_NONE;
- if (detected)
- pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
- intr_loc);
- } while (detected);
+ /* Capture link status before clearing interrupts */
+ if (events & PCI_EXP_SLTSTA_DLLSC)
+ link = pciehp_check_link_active(ctrl);
- ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", intr_loc);
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
+ ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
/* Check Command Complete Interrupt Pending */
- if (intr_loc & PCI_EXP_SLTSTA_CC) {
+ if (events & PCI_EXP_SLTSTA_CC) {
ctrl->cmd_busy = 0;
smp_mb();
wake_up(&ctrl->queue);
@@ -589,42 +607,38 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
list_for_each_entry(dev, &subordinate->devices, bus_list) {
if (dev->ignore_hotplug) {
ctrl_dbg(ctrl, "ignoring hotplug event %#06x (%s requested no hotplug)\n",
- intr_loc, pci_name(dev));
+ events, pci_name(dev));
return IRQ_HANDLED;
}
}
}
- if (!(intr_loc & ~PCI_EXP_SLTSTA_CC))
- return IRQ_HANDLED;
-
/* Check Attention Button Pressed */
- if (intr_loc & PCI_EXP_SLTSTA_ABP) {
- ctrl_info(ctrl, "Button pressed on Slot(%s)\n",
+ if (events & PCI_EXP_SLTSTA_ABP) {
+ ctrl_info(ctrl, "Slot(%s): Attention button pressed\n",
slot_name(slot));
pciehp_queue_interrupt_event(slot, INT_BUTTON_PRESS);
}
/* Check Presence Detect Changed */
- if (intr_loc & PCI_EXP_SLTSTA_PDC) {
- pciehp_get_adapter_status(slot, &present);
- ctrl_info(ctrl, "Card %spresent on Slot(%s)\n",
- present ? "" : "not ", slot_name(slot));
+ if (events & PCI_EXP_SLTSTA_PDC) {
+ present = !!(status & PCI_EXP_SLTSTA_PDS);
+ ctrl_info(ctrl, "Slot(%s): Card %spresent\n", slot_name(slot),
+ present ? "" : "not ");
pciehp_queue_interrupt_event(slot, present ? INT_PRESENCE_ON :
INT_PRESENCE_OFF);
}
/* Check Power Fault Detected */
- if ((intr_loc & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
+ if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
ctrl->power_fault_detected = 1;
- ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(slot));
+ ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
pciehp_queue_interrupt_event(slot, INT_POWER_FAULT);
}
- if (intr_loc & PCI_EXP_SLTSTA_DLLSC) {
- link = pciehp_check_link_active(ctrl);
- ctrl_info(ctrl, "slot(%s): Link %s event\n",
- slot_name(slot), link ? "Up" : "Down");
+ if (events & PCI_EXP_SLTSTA_DLLSC) {
+ ctrl_info(ctrl, "Slot(%s): Link %s\n", slot_name(slot),
+ link ? "Up" : "Down");
pciehp_queue_interrupt_event(slot, link ? INT_LINK_UP :
INT_LINK_DOWN);
}
@@ -632,6 +646,25 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t pcie_isr(int irq, void *dev_id)
+{
+ irqreturn_t rc, handled = IRQ_NONE;
+
+ /*
+ * To guarantee that all interrupt events are serviced, we need to
+ * re-inspect Slot Status register after clearing what is presumed
+ * to be the last pending interrupt.
+ */
+ do {
+ rc = pciehp_isr(irq, dev_id);
+ if (rc == IRQ_HANDLED)
+ handled = IRQ_HANDLED;
+ } while (rc == IRQ_HANDLED);
+
+ /* Return IRQ_HANDLED if we handled one or more events */
+ return handled;
+}
+
void pcie_enable_notification(struct controller *ctrl)
{
u16 cmd, mask;
@@ -804,6 +837,10 @@ struct controller *pcie_init(struct pcie_device *dev)
}
ctrl->pcie = dev;
pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
+
+ if (pdev->hotplug_user_indicators)
+ slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP);
+
ctrl->slot_cap = slot_cap;
mutex_init(&ctrl->ctrl_lock);
init_waitqueue_head(&ctrl->queue);
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index e6245b03f0a1..56efaf72d08e 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -22,6 +22,12 @@
#define DRIVER_AUTHOR "Gavin Shan, IBM Corporation"
#define DRIVER_DESC "PowerPC PowerNV PCI Hotplug Driver"
+struct pnv_php_event {
+ bool added;
+ struct pnv_php_slot *php_slot;
+ struct work_struct work;
+};
+
static LIST_HEAD(pnv_php_slot_list);
static DEFINE_SPINLOCK(pnv_php_lock);
@@ -29,12 +35,40 @@ static void pnv_php_register(struct device_node *dn);
static void pnv_php_unregister_one(struct device_node *dn);
static void pnv_php_unregister(struct device_node *dn);
+static void pnv_php_disable_irq(struct pnv_php_slot *php_slot)
+{
+ struct pci_dev *pdev = php_slot->pdev;
+ u16 ctrl;
+
+ if (php_slot->irq > 0) {
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &ctrl);
+ ctrl &= ~(PCI_EXP_SLTCTL_HPIE |
+ PCI_EXP_SLTCTL_PDCE |
+ PCI_EXP_SLTCTL_DLLSCE);
+ pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, ctrl);
+
+ free_irq(php_slot->irq, php_slot);
+ php_slot->irq = 0;
+ }
+
+ if (php_slot->wq) {
+ destroy_workqueue(php_slot->wq);
+ php_slot->wq = NULL;
+ }
+
+ if (pdev->msix_enabled)
+ pci_disable_msix(pdev);
+ else if (pdev->msi_enabled)
+ pci_disable_msi(pdev);
+}
+
static void pnv_php_free_slot(struct kref *kref)
{
struct pnv_php_slot *php_slot = container_of(kref,
struct pnv_php_slot, kref);
WARN_ON(!list_empty(&php_slot->children));
+ pnv_php_disable_irq(php_slot);
kfree(php_slot->name);
kfree(php_slot);
}
@@ -122,7 +156,7 @@ static void pnv_php_detach_device_nodes(struct device_node *parent)
of_node_put(dn);
refcount = atomic_read(&dn->kobj.kref.refcount);
- if (unlikely(refcount != 1))
+ if (refcount != 1)
pr_warn("Invalid refcount %d on <%s>\n",
refcount, of_node_full_name(dn));
@@ -184,11 +218,11 @@ static int pnv_php_populate_changeset(struct of_changeset *ocs,
for_each_child_of_node(dn, child) {
ret = of_changeset_attach_node(ocs, child);
- if (unlikely(ret))
+ if (ret)
break;
ret = pnv_php_populate_changeset(ocs, child);
- if (unlikely(ret))
+ if (ret)
break;
}
@@ -201,7 +235,7 @@ static void *pnv_php_add_one_pdn(struct device_node *dn, void *data)
struct pci_dn *pdn;
pdn = pci_add_device_node_info(hose, dn);
- if (unlikely(!pdn))
+ if (!pdn)
return ERR_PTR(-ENOMEM);
return NULL;
@@ -224,21 +258,21 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
* fits the real size.
*/
fdt1 = kzalloc(0x10000, GFP_KERNEL);
- if (unlikely(!fdt1)) {
+ if (!fdt1) {
ret = -ENOMEM;
dev_warn(&php_slot->pdev->dev, "Cannot alloc FDT blob\n");
goto out;
}
ret = pnv_pci_get_device_tree(php_slot->dn->phandle, fdt1, 0x10000);
- if (unlikely(ret)) {
+ if (ret) {
dev_warn(&php_slot->pdev->dev, "Error %d getting FDT blob\n",
ret);
goto free_fdt1;
}
fdt = kzalloc(fdt_totalsize(fdt1), GFP_KERNEL);
- if (unlikely(!fdt)) {
+ if (!fdt) {
ret = -ENOMEM;
dev_warn(&php_slot->pdev->dev, "Cannot %d bytes memory\n",
fdt_totalsize(fdt1));
@@ -248,7 +282,7 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
/* Unflatten device tree blob */
memcpy(fdt, fdt1, fdt_totalsize(fdt1));
dt = of_fdt_unflatten_tree(fdt, php_slot->dn, NULL);
- if (unlikely(!dt)) {
+ if (!dt) {
ret = -EINVAL;
dev_warn(&php_slot->pdev->dev, "Cannot unflatten FDT\n");
goto free_fdt;
@@ -258,7 +292,7 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
of_changeset_init(&php_slot->ocs);
pnv_php_reverse_nodes(php_slot->dn);
ret = pnv_php_populate_changeset(&php_slot->ocs, php_slot->dn);
- if (unlikely(ret)) {
+ if (ret) {
pnv_php_reverse_nodes(php_slot->dn);
dev_warn(&php_slot->pdev->dev, "Error %d populating changeset\n",
ret);
@@ -267,7 +301,7 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
php_slot->dn->child = NULL;
ret = of_changeset_apply(&php_slot->ocs);
- if (unlikely(ret)) {
+ if (ret) {
dev_warn(&php_slot->pdev->dev, "Error %d applying changeset\n",
ret);
goto destroy_changeset;
@@ -301,7 +335,7 @@ int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
int ret;
ret = pnv_pci_set_power_state(php_slot->id, state, &msg);
- if (likely(ret > 0)) {
+ if (ret > 0) {
if (be64_to_cpu(msg.params[1]) != php_slot->dn->phandle ||
be64_to_cpu(msg.params[2]) != state ||
be64_to_cpu(msg.params[3]) != OPAL_SUCCESS) {
@@ -311,7 +345,7 @@ int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
be64_to_cpu(msg.params[3]));
return -ENOMSG;
}
- } else if (unlikely(ret < 0)) {
+ } else if (ret < 0) {
dev_warn(&php_slot->pdev->dev, "Error %d powering %s\n",
ret, (state == OPAL_PCI_SLOT_POWER_ON) ? "on" : "off");
return ret;
@@ -338,7 +372,7 @@ static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
* be on.
*/
ret = pnv_pci_get_power_state(php_slot->id, &power_state);
- if (unlikely(ret)) {
+ if (ret) {
dev_warn(&php_slot->pdev->dev, "Error %d getting power status\n",
ret);
} else {
@@ -360,7 +394,7 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
* get that, it will fail back to be empty.
*/
ret = pnv_pci_get_presence_state(php_slot->id, &presence);
- if (likely(ret >= 0)) {
+ if (ret >= 0) {
*state = presence;
slot->info->adapter_status = presence;
ret = 0;
@@ -393,7 +427,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
/* Retrieve slot presence status */
ret = pnv_php_get_adapter_state(slot, &presence);
- if (unlikely(ret))
+ if (ret)
return ret;
/* Proceed if there have nothing behind the slot */
@@ -414,7 +448,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
php_slot->power_state_check = true;
ret = pnv_php_get_power_state(slot, &power_status);
- if (unlikely(ret))
+ if (ret)
return ret;
if (power_status != OPAL_PCI_SLOT_POWER_ON)
@@ -423,7 +457,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
/* Check the power status. Scan the slot if it is already on */
ret = pnv_php_get_power_state(slot, &power_status);
- if (unlikely(ret))
+ if (ret)
return ret;
if (power_status == OPAL_PCI_SLOT_POWER_ON)
@@ -431,7 +465,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
/* Power is off, turn it on and then scan the slot */
ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_ON);
- if (unlikely(ret))
+ if (ret)
return ret;
scan:
@@ -513,29 +547,30 @@ static struct pnv_php_slot *pnv_php_alloc_slot(struct device_node *dn)
struct pci_bus *bus;
const char *label;
uint64_t id;
+ int ret;
- label = of_get_property(dn, "ibm,slot-label", NULL);
- if (unlikely(!label))
+ ret = of_property_read_string(dn, "ibm,slot-label", &label);
+ if (ret)
return NULL;
if (pnv_pci_get_slot_id(dn, &id))
return NULL;
bus = pci_find_bus_by_node(dn);
- if (unlikely(!bus))
+ if (!bus)
return NULL;
php_slot = kzalloc(sizeof(*php_slot), GFP_KERNEL);
- if (unlikely(!php_slot))
+ if (!php_slot)
return NULL;
php_slot->name = kstrdup(label, GFP_KERNEL);
- if (unlikely(!php_slot->name)) {
+ if (!php_slot->name) {
kfree(php_slot);
return NULL;
}
- if (likely(dn->child && PCI_DN(dn->child)))
+ if (dn->child && PCI_DN(dn->child))
php_slot->slot_no = PCI_SLOT(PCI_DN(dn->child)->devfn);
else
php_slot->slot_no = -1; /* Placeholder slot */
@@ -567,7 +602,7 @@ static int pnv_php_register_slot(struct pnv_php_slot *php_slot)
/* Check if the slot is registered or not */
parent = pnv_php_find_slot(php_slot->dn);
- if (unlikely(parent)) {
+ if (parent) {
pnv_php_put_slot(parent);
return -EEXIST;
}
@@ -575,7 +610,7 @@ static int pnv_php_register_slot(struct pnv_php_slot *php_slot)
/* Register PCI slot */
ret = pci_hp_register(&php_slot->slot, php_slot->bus,
php_slot->slot_no, php_slot->name);
- if (unlikely(ret)) {
+ if (ret) {
dev_warn(&php_slot->pdev->dev, "Error %d registering slot\n",
ret);
return ret;
@@ -609,33 +644,213 @@ static int pnv_php_register_slot(struct pnv_php_slot *php_slot)
return 0;
}
+static int pnv_php_enable_msix(struct pnv_php_slot *php_slot)
+{
+ struct pci_dev *pdev = php_slot->pdev;
+ struct msix_entry entry;
+ int nr_entries, ret;
+ u16 pcie_flag;
+
+ /* Get total number of MSIx entries */
+ nr_entries = pci_msix_vec_count(pdev);
+ if (nr_entries < 0)
+ return nr_entries;
+
+ /* Check hotplug MSIx entry is in range */
+ pcie_capability_read_word(pdev, PCI_EXP_FLAGS, &pcie_flag);
+ entry.entry = (pcie_flag & PCI_EXP_FLAGS_IRQ) >> 9;
+ if (entry.entry >= nr_entries)
+ return -ERANGE;
+
+ /* Enable MSIx */
+ ret = pci_enable_msix_exact(pdev, &entry, 1);
+ if (ret) {
+ dev_warn(&pdev->dev, "Error %d enabling MSIx\n", ret);
+ return ret;
+ }
+
+ return entry.vector;
+}
+
+static void pnv_php_event_handler(struct work_struct *work)
+{
+ struct pnv_php_event *event =
+ container_of(work, struct pnv_php_event, work);
+ struct pnv_php_slot *php_slot = event->php_slot;
+
+ if (event->added)
+ pnv_php_enable_slot(&php_slot->slot);
+ else
+ pnv_php_disable_slot(&php_slot->slot);
+
+ kfree(event);
+}
+
+static irqreturn_t pnv_php_interrupt(int irq, void *data)
+{
+ struct pnv_php_slot *php_slot = data;
+ struct pci_dev *pchild, *pdev = php_slot->pdev;
+ struct eeh_dev *edev;
+ struct eeh_pe *pe;
+ struct pnv_php_event *event;
+ u16 sts, lsts;
+ u8 presence;
+ bool added;
+ unsigned long flags;
+ int ret;
+
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &sts);
+ sts &= (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, sts);
+ if (sts & PCI_EXP_SLTSTA_DLLSC) {
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lsts);
+ added = !!(lsts & PCI_EXP_LNKSTA_DLLLA);
+ } else if (sts & PCI_EXP_SLTSTA_PDC) {
+ ret = pnv_pci_get_presence_state(php_slot->id, &presence);
+ if (!ret)
+ return IRQ_HANDLED;
+ added = !!(presence == OPAL_PCI_SLOT_PRESENT);
+ } else {
+ return IRQ_NONE;
+ }
+
+ /* Freeze the removed PE to avoid unexpected error reporting */
+ if (!added) {
+ pchild = list_first_entry_or_null(&php_slot->bus->devices,
+ struct pci_dev, bus_list);
+ edev = pchild ? pci_dev_to_eeh_dev(pchild) : NULL;
+ pe = edev ? edev->pe : NULL;
+ if (pe) {
+ eeh_serialize_lock(&flags);
+ eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+ eeh_serialize_unlock(flags);
+ eeh_pe_set_option(pe, EEH_OPT_FREEZE_PE);
+ }
+ }
+
+ /*
+ * The PE is left in frozen state if the event is missed. It's
+ * fine as the PCI devices (PE) aren't functional any more.
+ */
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event) {
+ dev_warn(&pdev->dev, "PCI slot [%s] missed hotplug event 0x%04x\n",
+ php_slot->name, sts);
+ return IRQ_HANDLED;
+ }
+
+ dev_info(&pdev->dev, "PCI slot [%s] %s (IRQ: %d)\n",
+ php_slot->name, added ? "added" : "removed", irq);
+ INIT_WORK(&event->work, pnv_php_event_handler);
+ event->added = added;
+ event->php_slot = php_slot;
+ queue_work(php_slot->wq, &event->work);
+
+ return IRQ_HANDLED;
+}
+
+static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
+{
+ struct pci_dev *pdev = php_slot->pdev;
+ u16 sts, ctrl;
+ int ret;
+
+ /* Allocate workqueue */
+ php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name);
+ if (!php_slot->wq) {
+ dev_warn(&pdev->dev, "Cannot alloc workqueue\n");
+ pnv_php_disable_irq(php_slot);
+ return;
+ }
+
+ /* Clear pending interrupts */
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &sts);
+ sts |= (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, sts);
+
+ /* Request the interrupt */
+ ret = request_irq(irq, pnv_php_interrupt, IRQF_SHARED,
+ php_slot->name, php_slot);
+ if (ret) {
+ pnv_php_disable_irq(php_slot);
+ dev_warn(&pdev->dev, "Error %d enabling IRQ %d\n", ret, irq);
+ return;
+ }
+
+ /* Enable the interrupts */
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &ctrl);
+ ctrl |= (PCI_EXP_SLTCTL_HPIE |
+ PCI_EXP_SLTCTL_PDCE |
+ PCI_EXP_SLTCTL_DLLSCE);
+ pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, ctrl);
+
+ /* The interrupt is initialized successfully when @irq is valid */
+ php_slot->irq = irq;
+}
+
+static void pnv_php_enable_irq(struct pnv_php_slot *php_slot)
+{
+ struct pci_dev *pdev = php_slot->pdev;
+ int irq, ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_warn(&pdev->dev, "Error %d enabling device\n", ret);
+ return;
+ }
+
+ pci_set_master(pdev);
+
+ /* Enable MSIx interrupt */
+ irq = pnv_php_enable_msix(php_slot);
+ if (irq > 0) {
+ pnv_php_init_irq(php_slot, irq);
+ return;
+ }
+
+ /*
+ * Use MSI if MSIx doesn't work. Fail back to legacy INTx
+ * if MSI doesn't work either
+ */
+ ret = pci_enable_msi(pdev);
+ if (!ret || pdev->irq) {
+ irq = pdev->irq;
+ pnv_php_init_irq(php_slot, irq);
+ }
+}
+
static int pnv_php_register_one(struct device_node *dn)
{
struct pnv_php_slot *php_slot;
- const __be32 *prop32;
+ u32 prop32;
int ret;
/* Check if it's hotpluggable slot */
- prop32 = of_get_property(dn, "ibm,slot-pluggable", NULL);
- if (!prop32 || !of_read_number(prop32, 1))
+ ret = of_property_read_u32(dn, "ibm,slot-pluggable", &prop32);
+ if (ret || !prop32)
return -ENXIO;
- prop32 = of_get_property(dn, "ibm,reset-by-firmware", NULL);
- if (!prop32 || !of_read_number(prop32, 1))
+ ret = of_property_read_u32(dn, "ibm,reset-by-firmware", &prop32);
+ if (ret || !prop32)
return -ENXIO;
php_slot = pnv_php_alloc_slot(dn);
- if (unlikely(!php_slot))
+ if (!php_slot)
return -ENODEV;
ret = pnv_php_register_slot(php_slot);
- if (unlikely(ret))
+ if (ret)
goto free_slot;
ret = pnv_php_enable(php_slot, false);
- if (unlikely(ret))
+ if (ret)
goto unregister_slot;
+ /* Enable interrupt if the slot supports surprise hotplug */
+ ret = of_property_read_u32(dn, "ibm,slot-surprise-pluggable", &prop32);
+ if (!ret && prop32)
+ pnv_php_enable_irq(php_slot);
+
return 0;
unregister_slot:
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 2194b447201d..e30f05c8517f 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -136,7 +136,10 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
virtfn->devfn = pci_iov_virtfn_devfn(dev, id);
virtfn->vendor = dev->vendor;
pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
- pci_setup_device(virtfn);
+ rc = pci_setup_device(virtfn);
+ if (rc)
+ goto failed0;
+
virtfn->dev.parent = dev->dev.parent;
virtfn->physfn = pci_dev_get(dev);
virtfn->is_virtfn = 1;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 98f12223c734..bfdd0744b686 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -19,6 +19,7 @@
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/io.h>
+#include <linux/acpi_iort.h>
#include <linux/slab.h>
#include <linux/irqdomain.h>
#include <linux/of_irq.h>
@@ -549,15 +550,23 @@ error_attrs:
return ret;
}
-static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec)
+static struct msi_desc *
+msi_setup_entry(struct pci_dev *dev, int nvec, bool affinity)
{
- u16 control;
+ struct cpumask *masks = NULL;
struct msi_desc *entry;
+ u16 control;
+
+ if (affinity) {
+ masks = irq_create_affinity_masks(dev->irq_affinity, nvec);
+ if (!masks)
+ pr_err("Unable to allocate affinity masks, ignoring\n");
+ }
/* MSI Entry Initialization */
- entry = alloc_msi_entry(&dev->dev);
+ entry = alloc_msi_entry(&dev->dev, nvec, masks);
if (!entry)
- return NULL;
+ goto out;
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
@@ -568,8 +577,6 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec)
entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
- entry->nvec_used = nvec;
- entry->affinity = dev->irq_affinity;
if (control & PCI_MSI_FLAGS_64BIT)
entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
@@ -580,6 +587,8 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec)
if (entry->msi_attrib.maskbit)
pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
+out:
+ kfree(masks);
return entry;
}
@@ -608,7 +617,7 @@ static int msi_verify_entries(struct pci_dev *dev)
* an error, and a positive return value indicates the number of interrupts
* which could have been allocated.
*/
-static int msi_capability_init(struct pci_dev *dev, int nvec)
+static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity)
{
struct msi_desc *entry;
int ret;
@@ -616,7 +625,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
pci_msi_set_enable(dev, 0); /* Disable MSI during set up */
- entry = msi_setup_entry(dev, nvec);
+ entry = msi_setup_entry(dev, nvec, affinity);
if (!entry)
return -ENOMEM;
@@ -679,28 +688,29 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
}
static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
- struct msix_entry *entries, int nvec)
+ struct msix_entry *entries, int nvec,
+ bool affinity)
{
- const struct cpumask *mask = NULL;
+ struct cpumask *curmsk, *masks = NULL;
struct msi_desc *entry;
- int cpu = -1, i;
-
- for (i = 0; i < nvec; i++) {
- if (dev->irq_affinity) {
- cpu = cpumask_next(cpu, dev->irq_affinity);
- if (cpu >= nr_cpu_ids)
- cpu = cpumask_first(dev->irq_affinity);
- mask = cpumask_of(cpu);
- }
+ int ret, i;
+
+ if (affinity) {
+ masks = irq_create_affinity_masks(dev->irq_affinity, nvec);
+ if (!masks)
+ pr_err("Unable to allocate affinity masks, ignoring\n");
+ }
- entry = alloc_msi_entry(&dev->dev);
+ for (i = 0, curmsk = masks; i < nvec; i++) {
+ entry = alloc_msi_entry(&dev->dev, 1, curmsk);
if (!entry) {
if (!i)
iounmap(base);
else
free_msi_irqs(dev);
/* No enough memory. Don't try again */
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
entry->msi_attrib.is_msix = 1;
@@ -711,12 +721,14 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
entry->msi_attrib.entry_nr = i;
entry->msi_attrib.default_irq = dev->irq;
entry->mask_base = base;
- entry->nvec_used = 1;
- entry->affinity = mask;
list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
+ if (masks)
+ curmsk++;
}
-
+ ret = 0;
+out:
+ kfree(masks);
return 0;
}
@@ -745,8 +757,8 @@ static void msix_program_entries(struct pci_dev *dev,
* single MSI-X irq. A return of zero indicates the successful setup of
* requested MSI-X entries with allocated irqs or non-zero for otherwise.
**/
-static int msix_capability_init(struct pci_dev *dev,
- struct msix_entry *entries, int nvec)
+static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
+ int nvec, bool affinity)
{
int ret;
u16 control;
@@ -761,7 +773,7 @@ static int msix_capability_init(struct pci_dev *dev,
if (!base)
return -ENOMEM;
- ret = msix_setup_entries(dev, base, entries, nvec);
+ ret = msix_setup_entries(dev, base, entries, nvec, affinity);
if (ret)
return ret;
@@ -941,22 +953,8 @@ int pci_msix_vec_count(struct pci_dev *dev)
}
EXPORT_SYMBOL(pci_msix_vec_count);
-/**
- * pci_enable_msix - configure device's MSI-X capability structure
- * @dev: pointer to the pci_dev data structure of MSI-X device function
- * @entries: pointer to an array of MSI-X entries (optional)
- * @nvec: number of MSI-X irqs requested for allocation by device driver
- *
- * Setup the MSI-X capability structure of device function with the number
- * of requested irqs upon its software driver call to request for
- * MSI-X mode enabled on its hardware device function. A return of zero
- * indicates the successful configuration of MSI-X capability structure
- * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
- * Or a return of > 0 indicates that driver request is exceeding the number
- * of irqs or MSI-X vectors available. Driver should use the returned value to
- * re-send its request.
- **/
-int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
+static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
+ int nvec, bool affinity)
{
int nr_entries;
int i, j;
@@ -988,7 +986,27 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n");
return -EINVAL;
}
- return msix_capability_init(dev, entries, nvec);
+ return msix_capability_init(dev, entries, nvec, affinity);
+}
+
+/**
+ * pci_enable_msix - configure device's MSI-X capability structure
+ * @dev: pointer to the pci_dev data structure of MSI-X device function
+ * @entries: pointer to an array of MSI-X entries (optional)
+ * @nvec: number of MSI-X irqs requested for allocation by device driver
+ *
+ * Setup the MSI-X capability structure of device function with the number
+ * of requested irqs upon its software driver call to request for
+ * MSI-X mode enabled on its hardware device function. A return of zero
+ * indicates the successful configuration of MSI-X capability structure
+ * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
+ * Or a return of > 0 indicates that driver request is exceeding the number
+ * of irqs or MSI-X vectors available. Driver should use the returned value to
+ * re-send its request.
+ **/
+int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
+{
+ return __pci_enable_msix(dev, entries, nvec, false);
}
EXPORT_SYMBOL(pci_enable_msix);
@@ -1041,6 +1059,7 @@ EXPORT_SYMBOL(pci_msi_enabled);
static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
unsigned int flags)
{
+ bool affinity = flags & PCI_IRQ_AFFINITY;
int nvec;
int rc;
@@ -1069,19 +1088,17 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
nvec = maxvec;
for (;;) {
- if (flags & PCI_IRQ_AFFINITY) {
- dev->irq_affinity = irq_create_affinity_mask(&nvec);
+ if (affinity) {
+ nvec = irq_calc_affinity_vectors(dev->irq_affinity,
+ nvec);
if (nvec < minvec)
return -ENOSPC;
}
- rc = msi_capability_init(dev, nvec);
+ rc = msi_capability_init(dev, nvec, affinity);
if (rc == 0)
return nvec;
- kfree(dev->irq_affinity);
- dev->irq_affinity = NULL;
-
if (rc < 0)
return rc;
if (rc < minvec)
@@ -1113,26 +1130,24 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
struct msix_entry *entries, int minvec, int maxvec,
unsigned int flags)
{
- int nvec = maxvec;
- int rc;
+ bool affinity = flags & PCI_IRQ_AFFINITY;
+ int rc, nvec = maxvec;
if (maxvec < minvec)
return -ERANGE;
for (;;) {
- if (flags & PCI_IRQ_AFFINITY) {
- dev->irq_affinity = irq_create_affinity_mask(&nvec);
+ if (affinity) {
+ nvec = irq_calc_affinity_vectors(dev->irq_affinity,
+ nvec);
if (nvec < minvec)
return -ENOSPC;
}
- rc = pci_enable_msix(dev, entries, nvec);
+ rc = __pci_enable_msix(dev, entries, nvec, affinity);
if (rc == 0)
return nvec;
- kfree(dev->irq_affinity);
- dev->irq_affinity = NULL;
-
if (rc < 0)
return rc;
if (rc < minvec)
@@ -1256,6 +1271,37 @@ int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
}
EXPORT_SYMBOL(pci_irq_vector);
+/**
+ * pci_irq_get_affinity - return the affinity of a particular msi vector
+ * @dev: PCI device to operate on
+ * @nr: device-relative interrupt vector index (0-based).
+ */
+const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
+{
+ if (dev->msix_enabled) {
+ struct msi_desc *entry;
+ int i = 0;
+
+ for_each_pci_msi_entry(entry, dev) {
+ if (i == nr)
+ return entry->affinity;
+ i++;
+ }
+ WARN_ON_ONCE(1);
+ return NULL;
+ } else if (dev->msi_enabled) {
+ struct msi_desc *entry = first_pci_msi_entry(dev);
+
+ if (WARN_ON_ONCE(!entry || nr >= entry->nvec_used))
+ return NULL;
+
+ return &entry->affinity[nr];
+ } else {
+ return cpu_possible_mask;
+ }
+}
+EXPORT_SYMBOL(pci_irq_get_affinity);
+
struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
{
return to_pci_dev(desc->dev);
@@ -1502,8 +1548,8 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
of_node = irq_domain_get_of_node(domain);
- if (of_node)
- rid = of_msi_map_rid(&pdev->dev, of_node, rid);
+ rid = of_node ? of_msi_map_rid(&pdev->dev, of_node, rid) :
+ iort_msi_map_rid(&pdev->dev, rid);
return rid;
}
@@ -1519,9 +1565,13 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
*/
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
{
+ struct irq_domain *dom;
u32 rid = 0;
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
- return of_msi_map_get_device_domain(&pdev->dev, rid);
+ dom = of_msi_map_get_device_domain(&pdev->dev, rid);
+ if (!dom)
+ dom = iort_get_device_domain(&pdev->dev, rid);
+ return dom;
}
#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 9a033e8ee9a4..d966d47c9e80 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -452,6 +452,27 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
return error;
}
+static pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+ static const pci_power_t state_conv[] = {
+ [ACPI_STATE_D0] = PCI_D0,
+ [ACPI_STATE_D1] = PCI_D1,
+ [ACPI_STATE_D2] = PCI_D2,
+ [ACPI_STATE_D3_HOT] = PCI_D3hot,
+ [ACPI_STATE_D3_COLD] = PCI_D3cold,
+ };
+ int state;
+
+ if (!adev || !acpi_device_power_manageable(adev))
+ return PCI_UNKNOWN;
+
+ if (acpi_device_get_power(adev, &state) || state == ACPI_STATE_UNKNOWN)
+ return PCI_UNKNOWN;
+
+ return state_conv[state];
+}
+
static bool acpi_pci_can_wakeup(struct pci_dev *dev)
{
struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
@@ -534,6 +555,7 @@ static bool acpi_pci_need_resume(struct pci_dev *dev)
static const struct pci_platform_pm_ops acpi_pci_platform_pm = {
.is_manageable = acpi_pci_power_manageable,
.set_state = acpi_pci_set_power_state,
+ .get_state = acpi_pci_get_power_state,
.choose_state = acpi_pci_choose_state,
.sleep_wake = acpi_pci_sleep_wake,
.run_wake = acpi_pci_run_wake,
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index e39a67c8ef39..1ccce1cd6aca 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -466,7 +466,6 @@ static void pci_device_shutdown(struct device *dev)
pci_msi_shutdown(pci_dev);
pci_msix_shutdown(pci_dev);
-#ifdef CONFIG_KEXEC_CORE
/*
* If this is a kexec reboot, turn off Bus Master bit on the
* device to tell it to not continue to do DMA. Don't touch
@@ -476,7 +475,6 @@ static void pci_device_shutdown(struct device *dev)
*/
if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot))
pci_clear_master(pci_dev);
-#endif
}
#ifdef CONFIG_PM
@@ -684,8 +682,19 @@ static int pci_pm_prepare(struct device *dev)
static void pci_pm_complete(struct device *dev)
{
- pci_dev_complete_resume(to_pci_dev(dev));
- pm_complete_with_resume_check(dev);
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+
+ pci_dev_complete_resume(pci_dev);
+ pm_generic_complete(dev);
+
+ /* Resume device if platform firmware has put it in reset-power-on */
+ if (dev->power.direct_complete && pm_resume_via_firmware()) {
+ pci_power_t pre_sleep_state = pci_dev->current_state;
+
+ pci_update_current_state(pci_dev, pci_dev->current_state);
+ if (pci_dev->current_state < pre_sleep_state)
+ pm_request_resume(dev);
+ }
}
#else /* !CONFIG_PM_SLEEP */
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
index c878aa71173b..55f453de562e 100644
--- a/drivers/pci/pci-mid.c
+++ b/drivers/pci/pci-mid.c
@@ -60,8 +60,13 @@ static struct pci_platform_pm_ops mid_pci_platform_pm = {
#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
+/*
+ * This table should be in sync with the one in
+ * arch/x86/platform/intel-mid/pwr.c.
+ */
static const struct x86_cpu_id lpss_cpu_ids[] = {
- ICPU(INTEL_FAM6_ATOM_MERRIFIELD1),
+ ICPU(INTEL_FAM6_ATOM_PENWELL),
+ ICPU(INTEL_FAM6_ATOM_MERRIFIELD),
{}
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index aab9d5115a5f..ba34907538f6 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -480,6 +480,30 @@ struct resource *pci_find_parent_resource(const struct pci_dev *dev,
EXPORT_SYMBOL(pci_find_parent_resource);
/**
+ * pci_find_resource - Return matching PCI device resource
+ * @dev: PCI device to query
+ * @res: Resource to look for
+ *
+ * Goes over standard PCI resources (BARs) and checks if the given resource
+ * is partially or fully contained in any of them. In that case the
+ * matching resource is returned, %NULL otherwise.
+ */
+struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
+{
+ int i;
+
+ for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (r->start && resource_contains(r, res))
+ return r;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(pci_find_resource);
+
+/**
* pci_find_pcie_root_port - return PCIe Root Port
* @dev: PCI device to query
*
@@ -552,8 +576,9 @@ static const struct pci_platform_pm_ops *pci_platform_pm;
int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
{
- if (!ops->is_manageable || !ops->set_state || !ops->choose_state ||
- !ops->sleep_wake || !ops->run_wake || !ops->need_resume)
+ if (!ops->is_manageable || !ops->set_state || !ops->get_state ||
+ !ops->choose_state || !ops->sleep_wake || !ops->run_wake ||
+ !ops->need_resume)
return -EINVAL;
pci_platform_pm = ops;
return 0;
@@ -570,6 +595,11 @@ static inline int platform_pci_set_power_state(struct pci_dev *dev,
return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
}
+static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
+{
+ return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
+}
+
static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
{
return pci_platform_pm ?
@@ -701,26 +731,25 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
}
/**
- * pci_update_current_state - Read PCI power state of given device from its
- * PCI PM registers and cache it
+ * pci_update_current_state - Read power state of given device and cache it
* @dev: PCI device to handle.
* @state: State to cache in case the device doesn't have the PM capability
+ *
+ * The power state is read from the PMCSR register, which however is
+ * inaccessible in D3cold. The platform firmware is therefore queried first
+ * to detect accessibility of the register. In case the platform firmware
+ * reports an incorrect state or the device isn't power manageable by the
+ * platform at all, we try to detect D3cold by testing accessibility of the
+ * vendor ID in config space.
*/
void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
{
- if (dev->pm_cap) {
+ if (platform_pci_get_power_state(dev) == PCI_D3cold ||
+ !pci_device_is_present(dev)) {
+ dev->current_state = PCI_D3cold;
+ } else if (dev->pm_cap) {
u16 pmcsr;
- /*
- * Configuration space is not accessible for device in
- * D3cold, so just keep or set D3cold for safety
- */
- if (dev->current_state == PCI_D3cold)
- return;
- if (state == PCI_D3cold) {
- dev->current_state = PCI_D3cold;
- return;
- }
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
} else {
@@ -1959,9 +1988,22 @@ static pci_power_t pci_target_state(struct pci_dev *dev)
default:
target_state = state;
}
- } else if (!dev->pm_cap) {
+
+ return target_state;
+ }
+
+ if (!dev->pm_cap)
target_state = PCI_D0;
- } else if (device_may_wakeup(&dev->dev)) {
+
+ /*
+ * If the device is in D3cold even though it's not power-manageable by
+ * the platform, it may have been powered down by non-standard means.
+ * Best to let it slumber.
+ */
+ if (dev->current_state == PCI_D3cold)
+ target_state = PCI_D3cold;
+
+ if (device_may_wakeup(&dev->dev)) {
/*
* Find the deepest state from which the device can generate
* wake-up events, make it the target state and enable device
@@ -4959,6 +5001,13 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
spin_lock(&resource_alignment_lock);
p = resource_alignment_param;
+ if (!*p)
+ goto out;
+ if (pci_has_flag(PCI_PROBE_ONLY)) {
+ pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
+ goto out;
+ }
+
while (*p) {
count = 0;
if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
@@ -5023,6 +5072,7 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
}
p++;
}
+out:
spin_unlock(&resource_alignment_lock);
return align;
}
@@ -5041,6 +5091,15 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
resource_size_t align, size;
u16 command;
+ /*
+ * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
+ * 3.4.1.11. Their resources are allocated from the space
+ * described by the VF BARx register in the PF's SR-IOV capability.
+ * We can't influence their alignment here.
+ */
+ if (dev->is_virtfn)
+ return;
+
/* check if specified PCI is target device to reassign */
align = pci_specified_resource_alignment(dev);
if (!align)
@@ -5063,6 +5122,12 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
r = &dev->resource[i];
if (!(r->flags & IORESOURCE_MEM))
continue;
+ if (r->flags & IORESOURCE_PCI_FIXED) {
+ dev_info(&dev->dev, "Ignoring requested alignment for BAR%d: %pR\n",
+ i, r);
+ continue;
+ }
+
size = resource_size(r);
if (size < align) {
size = align;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 9730c474b016..451856210e18 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -42,6 +42,8 @@ int pci_probe_reset_function(struct pci_dev *dev);
*
* @set_state: invokes the platform firmware to set the device's power state
*
+ * @get_state: queries the platform firmware for a device's current power state
+ *
* @choose_state: returns PCI power state of given device preferred by the
* platform; to be used during system-wide transitions from a
* sleeping state to the working state and vice versa
@@ -62,6 +64,7 @@ int pci_probe_reset_function(struct pci_dev *dev);
struct pci_platform_pm_ops {
bool (*is_manageable)(struct pci_dev *dev);
int (*set_state)(struct pci_dev *dev, pci_power_t state);
+ pci_power_t (*get_state)(struct pci_dev *dev);
pci_power_t (*choose_state)(struct pci_dev *dev);
int (*sleep_wake)(struct pci_dev *dev, bool enable);
int (*run_wake)(struct pci_dev *dev, bool enable);
@@ -332,6 +335,12 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
void pci_enable_acs(struct pci_dev *dev);
+#ifdef CONFIG_PCIE_PTM
+void pci_ptm_init(struct pci_dev *dev);
+#else
+static inline void pci_ptm_init(struct pci_dev *dev) { }
+#endif
+
struct pci_dev_reset_methods {
u16 vendor;
u16 device;
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 7fcea75afa4c..7ce77635e5ad 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -92,3 +92,14 @@ config PCIE_DPC
will be handled by the DPC driver. If your system doesn't
have this capability or you do not want to use this feature,
it is safe to answer N.
+
+config PCIE_PTM
+ bool "PCIe Precision Time Measurement support"
+ default n
+ depends on PCIEPORTBUS
+ help
+ This enables PCI Express Precision Time Measurement (PTM)
+ support.
+
+ This is only useful if you have devices that support PTM, but it
+ is safe to enable even if you don't.
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index b24525b3dec1..36e35ea8fde7 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -16,3 +16,4 @@ obj-$(CONFIG_PCIEAER) += aer/
obj-$(CONFIG_PCIE_PME) += pme.o
obj-$(CONFIG_PCIE_DPC) += pcie-dpc.o
+obj-$(CONFIG_PCIE_PTM) += ptm.o
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 48d21e0edd56..139150b2bdfd 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -15,7 +15,6 @@
*
*/
-#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/sched.h>
@@ -37,9 +36,6 @@
#define DRIVER_VERSION "v1.0"
#define DRIVER_AUTHOR "tom.l.nguyen@intel.com"
#define DRIVER_DESC "Root Port Advanced Error Reporting Driver"
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
static int aer_probe(struct pcie_device *dev);
static void aer_remove(struct pcie_device *dev);
@@ -70,7 +66,7 @@ static int pcie_aer_disable;
void pci_no_aer(void)
{
- pcie_aer_disable = 1; /* has priority over 'forceload' */
+ pcie_aer_disable = 1;
}
bool pci_aer_available(void)
@@ -134,7 +130,7 @@ static void aer_enable_rootport(struct aer_rpc *rpc)
pcie_capability_clear_word(pdev, PCI_EXP_RTCTL,
SYSTEM_ERROR_INTR_ON_MESG_MASK);
- aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ aer_pos = pdev->aer_cap;
/* Clear error status */
pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32);
pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32);
@@ -173,7 +169,7 @@ static void aer_disable_rootport(struct aer_rpc *rpc)
*/
set_downstream_devices_error_reporting(pdev, false);
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ pos = pdev->aer_cap;
/* Disable Root's interrupt in response to error messages */
pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
@@ -200,7 +196,7 @@ irqreturn_t aer_irq(int irq, void *context)
unsigned long flags;
int pos;
- pos = pci_find_ext_capability(pdev->port, PCI_EXT_CAP_ID_ERR);
+ pos = pdev->port->aer_cap;
/*
* Must lock access to Root Error Status Reg, Root Error ID Reg,
* and Root error producer/consumer index
@@ -294,7 +290,6 @@ static void aer_remove(struct pcie_device *dev)
/**
* aer_probe - initialize resources
* @dev: pointer to the pcie_dev data structure
- * @id: pointer to the service id data structure
*
* Invoked when PCI Express bus loads AER service driver.
*/
@@ -304,11 +299,6 @@ static int aer_probe(struct pcie_device *dev)
struct aer_rpc *rpc;
struct device *device = &dev->device;
- /* Init */
- status = aer_init(dev);
- if (status)
- return status;
-
/* Alloc rpc data structure */
rpc = aer_alloc_rpc(dev);
if (!rpc) {
@@ -343,7 +333,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
u32 reg32;
int pos;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ pos = dev->aer_cap;
/* Disable Root's interrupt in response to error messages */
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
@@ -396,7 +386,7 @@ static void aer_error_resume(struct pci_dev *dev)
pcie_capability_write_word(dev, PCI_EXP_DEVSTA, reg16);
/* Clean AER Root Error Status */
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ pos = dev->aer_cap;
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
if (dev->error_state == pci_channel_io_normal)
@@ -417,16 +407,4 @@ static int __init aer_service_init(void)
return -ENXIO;
return pcie_port_service_register(&aerdriver);
}
-
-/**
- * aer_service_exit - unregister AER root service driver
- *
- * Invoked when AER root service driver is unloaded.
- */
-static void __exit aer_service_exit(void)
-{
- pcie_port_service_unregister(&aerdriver);
-}
-
-module_init(aer_service_init);
-module_exit(aer_service_exit);
+device_initcall(aer_service_init);
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 945c939a86c5..d51e4a57b190 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -60,6 +60,7 @@ struct aer_rpc {
struct pcie_device *rpd; /* Root Port device */
struct work_struct dpc_handler;
struct aer_err_source e_sources[AER_ERROR_SOURCES_MAX];
+ struct aer_err_info e_info;
unsigned short prod_idx; /* Error Producer Index */
unsigned short cons_idx; /* Error Consumer Index */
int isr;
@@ -105,7 +106,6 @@ static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
}
extern struct bus_type pcie_port_bus_type;
-int aer_init(struct pcie_device *dev);
void aer_isr(struct work_struct *work);
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info);
@@ -121,11 +121,4 @@ static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
return 0;
}
#endif
-
-static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev,
- int enable)
-{
- pci_dev->__aer_firmware_first = !!enable;
- pci_dev->__aer_firmware_first_valid = 1;
-}
#endif /* _AERDRV_H_ */
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 521e39c1b66d..b1303b32053f 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -27,11 +27,6 @@
#include <linux/kfifo.h>
#include "aerdrv.h"
-static bool forceload;
-static bool nosourceid;
-module_param(forceload, bool, 0);
-module_param(nosourceid, bool, 0);
-
#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
@@ -40,7 +35,7 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev)
if (pcie_aer_get_firmware_first(dev))
return -EIO;
- if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
+ if (!dev->aer_cap)
return -EIO;
return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
@@ -62,7 +57,7 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
int pos;
u32 status;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ pos = dev->aer_cap;
if (!pos)
return -EIO;
@@ -83,7 +78,7 @@ int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
if (!pci_is_pcie(dev))
return -ENODEV;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ pos = dev->aer_cap;
if (!pos)
return -EIO;
@@ -102,6 +97,12 @@ int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
return 0;
}
+int pci_aer_init(struct pci_dev *dev)
+{
+ dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ return pci_cleanup_aer_error_status_regs(dev);
+}
+
/**
* add_error_device - list device to be handled
* @e_info: pointer to error info
@@ -132,7 +133,8 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
* When bus id is equal to 0, it might be a bad id
* reported by root port.
*/
- if (!nosourceid && (PCI_BUS_NUM(e_info->id) != 0)) {
+ if ((PCI_BUS_NUM(e_info->id) != 0) &&
+ !(dev->bus->bus_flags & PCI_BUS_FLAGS_NO_AERSID)) {
/* Device ID match? */
if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
return true;
@@ -144,10 +146,10 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
/*
* When either
- * 1) nosourceid==y;
- * 2) bus id is equal to 0. Some ports might lose the bus
+ * 1) bus id is equal to 0. Some ports might lose the bus
* id of error source id;
- * 3) There are multiple errors and prior id comparing fails;
+ * 2) bus flag PCI_BUS_FLAGS_NO_AERSID is set
+ * 3) There are multiple errors and prior ID comparing fails;
* We check AER status registers to find possible reporter.
*/
if (atomic_read(&dev->enable_cnt) == 0)
@@ -158,7 +160,7 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
if (!(reg16 & PCI_EXP_AER_FLAGS))
return false;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ pos = dev->aer_cap;
if (!pos)
return false;
@@ -555,7 +557,7 @@ static void handle_error_source(struct pcie_device *aerdev,
* Correctable error does not need software intervention.
* No need to go through error recovery process.
*/
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ pos = dev->aer_cap;
if (pos)
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
info->status);
@@ -647,7 +649,7 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
info->status = 0;
info->tlp_header_valid = 0;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ pos = dev->aer_cap;
/* The device might not support AER */
if (!pos)
@@ -715,15 +717,8 @@ static inline void aer_process_err_devices(struct pcie_device *p_device,
static void aer_isr_one_error(struct pcie_device *p_device,
struct aer_err_source *e_src)
{
- struct aer_err_info *e_info;
-
- /* struct aer_err_info might be big, so we allocate it with slab */
- e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL);
- if (!e_info) {
- dev_printk(KERN_DEBUG, &p_device->port->dev,
- "Can't allocate mem when processing AER errors\n");
- return;
- }
+ struct aer_rpc *rpc = get_service_data(p_device);
+ struct aer_err_info *e_info = &rpc->e_info;
/*
* There is a possibility that both correctable error and
@@ -762,8 +757,6 @@ static void aer_isr_one_error(struct pcie_device *p_device,
if (find_source_device(p_device->port, e_info))
aer_process_err_devices(p_device, e_info);
}
-
- kfree(e_info);
}
/**
@@ -812,19 +805,3 @@ void aer_isr(struct work_struct *work)
aer_isr_one_error(p_device, &e_src);
mutex_unlock(&rpc->rpc_mutex);
}
-
-/**
- * aer_init - provide AER initialization
- * @dev: pointer to AER pcie device
- *
- * Invoked when AER service driver is loaded.
- */
-int aer_init(struct pcie_device *dev)
-{
- if (forceload) {
- dev_printk(KERN_DEBUG, &dev->device,
- "aerdrv forceload requested.\n");
- pcie_aer_force_firmware_first(dev->port, 0);
- }
- return 0;
-}
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 167fe411ce2e..54c4b691e51f 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -219,15 +219,13 @@ int cper_severity_to_aer(int cper_severity)
}
EXPORT_SYMBOL_GPL(cper_severity_to_aer);
-void cper_print_aer(struct pci_dev *dev, int cper_severity,
+void cper_print_aer(struct pci_dev *dev, int aer_severity,
struct aer_capability_regs *aer)
{
- int aer_severity, layer, agent, status_strs_size, tlp_header_valid = 0;
+ int layer, agent, status_strs_size, tlp_header_valid = 0;
u32 status, mask;
const char **status_strs;
- aer_severity = cper_severity_to_aer(cper_severity);
-
if (aer_severity == AER_CORRECTABLE) {
status = aer->cor_status;
mask = aer->cor_mask;
diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c
index 250f87861786..9811b14d9ad8 100644
--- a/drivers/pci/pcie/pcie-dpc.c
+++ b/drivers/pci/pcie/pcie-dpc.c
@@ -1,5 +1,7 @@
/*
* PCI Express Downstream Port Containment services driver
+ * Author: Keith Busch <keith.busch@intel.com>
+ *
* Copyright (C) 2016 Intel Corp.
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -9,7 +11,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pcieport_if.h>
@@ -143,16 +145,4 @@ static int __init dpc_service_init(void)
{
return pcie_port_service_register(&dpcdriver);
}
-
-static void __exit dpc_service_exit(void)
-{
- pcie_port_service_unregister(&dpcdriver);
-}
-
-MODULE_DESCRIPTION("PCI Express Downstream Port Containment driver");
-MODULE_AUTHOR("Keith Busch <keith.busch@intel.com>");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("0.1");
-
-module_init(dpc_service_init);
-module_exit(dpc_service_exit);
+device_initcall(dpc_service_init);
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 1ae4c73e7a3c..884bad5320f8 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -10,7 +10,6 @@
* for more details.
*/
-#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -449,17 +448,6 @@ static int pcie_pme_resume(struct pcie_device *srv)
return 0;
}
-/**
- * pcie_pme_remove - Prepare PCIe PME service device for removal.
- * @srv - PCIe service device to remove.
- */
-static void pcie_pme_remove(struct pcie_device *srv)
-{
- pcie_pme_suspend(srv);
- free_irq(srv->irq, srv);
- kfree(get_service_data(srv));
-}
-
static struct pcie_port_service_driver pcie_pme_driver = {
.name = "pcie_pme",
.port_type = PCI_EXP_TYPE_ROOT_PORT,
@@ -468,7 +456,6 @@ static struct pcie_port_service_driver pcie_pme_driver = {
.probe = pcie_pme_probe,
.suspend = pcie_pme_suspend,
.resume = pcie_pme_resume,
- .remove = pcie_pme_remove,
};
/**
@@ -478,5 +465,4 @@ static int __init pcie_pme_service_init(void)
{
return pcie_port_service_register(&pcie_pme_driver);
}
-
-module_init(pcie_pme_service_init);
+device_initcall(pcie_pme_service_init);
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 70d7ad8c6d17..79327cc14e7d 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -1,12 +1,13 @@
/*
* File: portdrv_pci.c
* Purpose: PCI Express Port Bus Driver
+ * Author: Tom Nguyen <tom.l.nguyen@intel.com>
+ * Version: v1.0
*
* Copyright (C) 2004 Intel
* Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
*/
-#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -21,16 +22,6 @@
#include "portdrv.h"
#include "aer/aerdrv.h"
-/*
- * Version Information
- */
-#define DRIVER_VERSION "v1.0"
-#define DRIVER_AUTHOR "tom.l.nguyen@intel.com"
-#define DRIVER_DESC "PCIe Port Bus Driver"
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-
/* If this switch is set, PCIe port native services should not be enabled. */
bool pcie_ports_disabled;
@@ -341,7 +332,6 @@ static const struct pci_device_id port_pci_ids[] = { {
PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0),
}, { /* end: all zeroes */ }
};
-MODULE_DEVICE_TABLE(pci, port_pci_ids);
static const struct pci_error_handlers pcie_portdrv_err_handler = {
.error_detected = pcie_portdrv_error_detected,
@@ -406,5 +396,4 @@ static int __init pcie_portdrv_init(void)
out:
return retval;
}
-
-module_init(pcie_portdrv_init);
+device_initcall(pcie_portdrv_init);
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
new file mode 100644
index 000000000000..bab8ac63c4f3
--- /dev/null
+++ b/drivers/pci/pcie/ptm.c
@@ -0,0 +1,142 @@
+/*
+ * PCI Express Precision Time Measurement
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include "../pci.h"
+
+static void pci_ptm_info(struct pci_dev *dev)
+{
+ char clock_desc[8];
+
+ switch (dev->ptm_granularity) {
+ case 0:
+ snprintf(clock_desc, sizeof(clock_desc), "unknown");
+ break;
+ case 255:
+ snprintf(clock_desc, sizeof(clock_desc), ">254ns");
+ break;
+ default:
+ snprintf(clock_desc, sizeof(clock_desc), "%udns",
+ dev->ptm_granularity);
+ break;
+ }
+ dev_info(&dev->dev, "PTM enabled%s, %s granularity\n",
+ dev->ptm_root ? " (root)" : "", clock_desc);
+}
+
+void pci_ptm_init(struct pci_dev *dev)
+{
+ int pos;
+ u32 cap, ctrl;
+ u8 local_clock;
+ struct pci_dev *ups;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
+ if (!pos)
+ return;
+
+ /*
+ * Enable PTM only on interior devices (root ports, switch ports,
+ * etc.) on the assumption that it causes no link traffic until an
+ * endpoint enables it.
+ */
+ if ((pci_pcie_type(dev) == PCI_EXP_TYPE_ENDPOINT ||
+ pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END))
+ return;
+
+ pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap);
+ local_clock = (cap & PCI_PTM_GRANULARITY_MASK) >> 8;
+
+ /*
+ * There's no point in enabling PTM unless it's enabled in the
+ * upstream device or this device can be a PTM Root itself. Per
+ * the spec recommendation (PCIe r3.1, sec 7.32.3), select the
+ * furthest upstream Time Source as the PTM Root.
+ */
+ ups = pci_upstream_bridge(dev);
+ if (ups && ups->ptm_enabled) {
+ ctrl = PCI_PTM_CTRL_ENABLE;
+ if (ups->ptm_granularity == 0)
+ dev->ptm_granularity = 0;
+ else if (ups->ptm_granularity > local_clock)
+ dev->ptm_granularity = ups->ptm_granularity;
+ } else {
+ if (cap & PCI_PTM_CAP_ROOT) {
+ ctrl = PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT;
+ dev->ptm_root = 1;
+ dev->ptm_granularity = local_clock;
+ } else
+ return;
+ }
+
+ ctrl |= dev->ptm_granularity << 8;
+ pci_write_config_dword(dev, pos + PCI_PTM_CTRL, ctrl);
+ dev->ptm_enabled = 1;
+
+ pci_ptm_info(dev);
+}
+
+int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
+{
+ int pos;
+ u32 cap, ctrl;
+ struct pci_dev *ups;
+
+ if (!pci_is_pcie(dev))
+ return -EINVAL;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap);
+ if (!(cap & PCI_PTM_CAP_REQ))
+ return -EINVAL;
+
+ /*
+ * For a PCIe Endpoint, PTM is only useful if the endpoint can
+ * issue PTM requests to upstream devices that have PTM enabled.
+ *
+ * For Root Complex Integrated Endpoints, there is no upstream
+ * device, so there must be some implementation-specific way to
+ * associate the endpoint with a time source.
+ */
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ENDPOINT) {
+ ups = pci_upstream_bridge(dev);
+ if (!ups || !ups->ptm_enabled)
+ return -EINVAL;
+
+ dev->ptm_granularity = ups->ptm_granularity;
+ } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
+ dev->ptm_granularity = 0;
+ } else
+ return -EINVAL;
+
+ ctrl = PCI_PTM_CTRL_ENABLE;
+ ctrl |= dev->ptm_granularity << 8;
+ pci_write_config_dword(dev, pos + PCI_PTM_CTRL, ctrl);
+ dev->ptm_enabled = 1;
+
+ pci_ptm_info(dev);
+
+ if (granularity)
+ *granularity = dev->ptm_granularity;
+ return 0;
+}
+EXPORT_SYMBOL(pci_enable_ptm);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 93f280df3428..ab002671fa60 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1666,7 +1666,11 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Enable ACS P2P upstream forwarding */
pci_enable_acs(dev);
- pci_cleanup_aer_error_status_regs(dev);
+ /* Precision Time Measurement */
+ pci_ptm_init(dev);
+
+ /* Advanced Error Reporting */
+ pci_aer_init(dev);
}
/*
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 44e0ff37480b..c232729f5b1b 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -834,6 +834,17 @@ static void quirk_amd_ioapic(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic);
#endif /* CONFIG_X86_IO_APIC */
+#if defined(CONFIG_ARM64) && defined(CONFIG_PCI_ATS)
+
+static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev)
+{
+ /* Fix for improper SRIOV configuration on Cavium cn88xx RNM device */
+ if (dev->subsystem_device == 0xa118)
+ dev->sriov->link = dev->devfn;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CAVIUM, 0xa018, quirk_cavium_sriov_rnm_link);
+#endif
+
/*
* Some settings of MMRBC can lead to data corruption so block changes.
* See AMD 8131 HyperTransport PCI-X Tunnel Revision Guide
@@ -3198,6 +3209,7 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
static void quirk_no_pm_reset(struct pci_dev *dev)
{
@@ -4431,3 +4443,20 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
+
+/*
+ * VMD-enabled root ports will change the source ID for all messages
+ * to the VMD device. Rather than doing device matching with the source
+ * ID, the AER driver should traverse the child device tree, reading
+ * AER registers to find the faulting device.
+ */
+static void quirk_no_aersid(struct pci_dev *pdev)
+{
+ /* VMD Domain */
+ if (pdev->bus->sysdata && pci_domain_nr(pdev->bus) >= 0x10000)
+ pdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_AERSID;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index d1ef7acf6930..f9357e09e9b3 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -40,6 +40,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
list_del(&dev->bus_list);
up_write(&pci_bus_sem);
+ pci_bridge_d3_device_removed(dev);
pci_free_resources(dev);
put_device(&dev->dev);
}
@@ -96,8 +97,6 @@ static void pci_remove_bus_device(struct pci_dev *dev)
dev->subordinate = NULL;
}
- pci_bridge_d3_device_removed(dev);
-
pci_destroy_dev(dev);
}
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index c74059e10a6d..f30ca75b5b6c 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -25,6 +25,7 @@
#include <linux/ioport.h>
#include <linux/cache.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
#include "pci.h"
unsigned int pci_flags;
@@ -1852,8 +1853,13 @@ void __init pci_assign_unassigned_resources(void)
{
struct pci_bus *root_bus;
- list_for_each_entry(root_bus, &pci_root_buses, node)
+ list_for_each_entry(root_bus, &pci_root_buses, node) {
pci_assign_unassigned_root_bus_resources(root_bus);
+
+ /* Make sure the root bridge has a companion ACPI device: */
+ if (ACPI_HANDLE(root_bus->bridge))
+ acpi_ioapic_add(ACPI_HANDLE(root_bus->bridge));
+ }
}
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)