summaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/access.c16
-rw-r--r--drivers/pci/bus.c2
-rw-r--r--drivers/pci/ecam.c12
-rw-r--r--drivers/pci/host/Kconfig16
-rw-r--r--drivers/pci/host/Makefile19
-rw-r--r--drivers/pci/host/pci-hyperv.c100
-rw-r--r--drivers/pci/host/pci-layerscape.c20
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c2
-rw-r--r--drivers/pci/host/pci-tegra.c160
-rw-r--r--drivers/pci/host/pci-thunder-ecam.c9
-rw-r--r--drivers/pci/host/pci-thunder-pem.c94
-rw-r--r--drivers/pci/host/pci-xgene-msi.c69
-rw-r--r--drivers/pci/host/pci-xgene.c126
-rw-r--r--drivers/pci/host/pcie-altera.c10
-rw-r--r--drivers/pci/host/pcie-designware-plat.c2
-rw-r--r--drivers/pci/host/pcie-hisi.c107
-rw-r--r--drivers/pci/host/pcie-iproc-bcma.c1
-rw-r--r--drivers/pci/host/pcie-iproc-msi.c1
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c28
-rw-r--r--drivers/pci/host/pcie-iproc.c949
-rw-r--r--drivers/pci/host/pcie-iproc.h45
-rw-r--r--drivers/pci/host/pcie-qcom.c177
-rw-r--r--drivers/pci/host/pcie-rcar.c5
-rw-r--r--drivers/pci/host/pcie-rockchip.c284
-rw-r--r--drivers/pci/host/pcie-spear13xx.c6
-rw-r--r--drivers/pci/host/vmd.c30
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c31
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c3
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c10
-rw-r--r--drivers/pci/hotplug/pciehp_core.c9
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c8
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c21
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c7
-rw-r--r--drivers/pci/iov.c70
-rw-r--r--drivers/pci/msi.c81
-rw-r--r--drivers/pci/pci-acpi.c100
-rw-r--r--drivers/pci/pci-mid.c8
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/pci.c138
-rw-r--r--drivers/pci/pci.h19
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c14
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c18
-rw-r--r--drivers/pci/pcie/aspm.c22
-rw-r--r--drivers/pci/pcie/pme.c29
-rw-r--r--drivers/pci/pcie/portdrv_core.c3
-rw-r--r--drivers/pci/pcie/portdrv_pci.c13
-rw-r--r--drivers/pci/probe.c276
-rw-r--r--drivers/pci/quirks.c182
-rw-r--r--drivers/pci/remove.c2
-rw-r--r--drivers/pci/rom.c5
-rw-r--r--drivers/pci/setup-res.c48
-rw-r--r--drivers/pci/xen-pcifront.c6
52 files changed, 2513 insertions, 902 deletions
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index d11cdbb8fba3..db239547fefd 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -142,10 +142,22 @@ int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
if (size == 4) {
writel(val, addr);
return PCIBIOS_SUCCESSFUL;
- } else {
- mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
}
+ /*
+ * In general, hardware that supports only 32-bit writes on PCI is
+ * not spec-compliant. For example, software may perform a 16-bit
+ * write. If the hardware only supports 32-bit accesses, we must
+ * do a 32-bit read, merge in the 16 bits we intend to write,
+ * followed by a 32-bit write. If the 16 bits we *don't* intend to
+ * write happen to have any RW1C (write-one-to-clear) bits set, we
+ * just inadvertently cleared something we shouldn't have.
+ */
+ dev_warn_ratelimited(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n",
+ size, pci_domain_nr(bus), bus->number,
+ PCI_SLOT(devfn), PCI_FUNC(devfn), where);
+
+ mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
tmp = readl(addr) & mask;
tmp |= val << ((where & 0x3) * 8);
writel(tmp, addr);
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index c288e5a52575..bc56cf19afd3 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -320,7 +320,7 @@ void pci_bus_add_device(struct pci_dev *dev)
pci_fixup_device(pci_fixup_final, dev);
pci_create_sysfs_dev_files(dev);
pci_proc_attach_device(dev);
- pci_bridge_d3_device_changed(dev);
+ pci_bridge_d3_update(dev);
dev->match_driver = true;
retval = device_attach(&dev->dev);
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
index 43ed08dd8b01..2fee61bb6559 100644
--- a/drivers/pci/ecam.c
+++ b/drivers/pci/ecam.c
@@ -162,3 +162,15 @@ struct pci_ecam_ops pci_generic_ecam_ops = {
.write = pci_generic_config_write,
}
};
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+/* ECAM ops for 32-bit access only (non-compliant) */
+struct pci_ecam_ops pci_32b_ops = {
+ .bus_shift = 20,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+ }
+};
+#endif
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index d7e7c0a827c3..898d2c48239c 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -69,7 +69,7 @@ config PCI_IMX6
config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
- depends on ARCH_TEGRA && !ARM64
+ depends on ARCH_TEGRA
help
Say Y here if you want support for the PCIe host controller found
on NVIDIA Tegra SoCs.
@@ -133,8 +133,8 @@ config PCIE_XILINX
config PCI_XGENE
bool "X-Gene PCIe controller"
- depends on ARCH_XGENE
- depends on OF
+ depends on ARM64
+ depends on OF || (ACPI && PCI_QUIRKS)
select PCIEPORTBUS
help
Say Y here if you want internal PCI support on APM X-Gene SoC.
@@ -240,14 +240,16 @@ config PCIE_QCOM
config PCI_HOST_THUNDER_PEM
bool "Cavium Thunder PCIe controller to off-chip devices"
- depends on OF && ARM64
+ depends on ARM64
+ depends on OF || (ACPI && PCI_QUIRKS)
select PCI_HOST_COMMON
help
Say Y here if you want PCIe support for CN88XX Cavium Thunder SoCs.
config PCI_HOST_THUNDER_ECAM
bool "Cavium Thunder ECAM controller to on-chip devices on pass-1.x silicon"
- depends on OF && ARM64
+ depends on ARM64
+ depends on OF || (ACPI && PCI_QUIRKS)
select PCI_HOST_COMMON
help
Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
@@ -276,7 +278,7 @@ config PCIE_ARTPEC6
config PCIE_ROCKCHIP
bool "Rockchip PCIe controller"
- depends on ARCH_ROCKCHIP
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
select MFD_SYSCON
@@ -286,7 +288,7 @@ config PCIE_ROCKCHIP
4 slots.
config VMD
- depends on PCI_MSI && X86_64
+ depends on PCI_MSI && X86_64 && SRCU
tristate "Intel Volume Management Device Driver"
default N
---help---
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 084cb4983645..bfe3179ae74c 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
-obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
@@ -25,11 +24,23 @@ obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o
obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
-obj-$(CONFIG_PCI_HISI) += pcie-hisi.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
-obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o
-obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
obj-$(CONFIG_VMD) += vmd.o
+
+# The following drivers are for devices that use the generic ACPI
+# pci_root.c driver but don't support standard ECAM config access.
+# They contain MCFG quirks to replace the generic ECAM accessors with
+# device-specific ones that are shared with the DT driver.
+
+# The ACPI driver is generic and should not require driver-specific
+# config options to be enabled, so we always build these drivers on
+# ARM64 and use internal ifdefs to only build the pieces we need
+# depending on whether ACPI, the DT driver, or both are enabled.
+
+obj-$(CONFIG_ARM64) += pcie-hisi.o
+obj-$(CONFIG_ARM64) += pci-thunder-ecam.o
+obj-$(CONFIG_ARM64) += pci-thunder-pem.o
+obj-$(CONFIG_ARM64) += pci-xgene.o
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 763ff8745828..3efcc7bdc5fb 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -378,6 +378,8 @@ struct hv_pcibus_device {
struct msi_domain_info msi_info;
struct msi_controller msi_chip;
struct irq_domain *irq_domain;
+ struct retarget_msi_interrupt retarget_msi_interrupt_params;
+ spinlock_t retarget_msi_interrupt_lock;
};
/*
@@ -755,7 +757,7 @@ static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest,
return parent->chip->irq_set_affinity(parent, dest, force);
}
-void hv_irq_mask(struct irq_data *data)
+static void hv_irq_mask(struct irq_data *data)
{
pci_msi_mask_irq(data);
}
@@ -770,38 +772,44 @@ void hv_irq_mask(struct irq_data *data)
* is built out of this PCI bus's instance GUID and the function
* number of the device.
*/
-void hv_irq_unmask(struct irq_data *data)
+static void hv_irq_unmask(struct irq_data *data)
{
struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
struct irq_cfg *cfg = irqd_cfg(data);
- struct retarget_msi_interrupt params;
+ struct retarget_msi_interrupt *params;
struct hv_pcibus_device *hbus;
struct cpumask *dest;
struct pci_bus *pbus;
struct pci_dev *pdev;
int cpu;
+ unsigned long flags;
dest = irq_data_get_affinity_mask(data);
pdev = msi_desc_to_pci_dev(msi_desc);
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
- memset(&params, 0, sizeof(params));
- params.partition_id = HV_PARTITION_ID_SELF;
- params.source = 1; /* MSI(-X) */
- params.address = msi_desc->msg.address_lo;
- params.data = msi_desc->msg.data;
- params.device_id = (hbus->hdev->dev_instance.b[5] << 24) |
+ spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
+
+ params = &hbus->retarget_msi_interrupt_params;
+ memset(params, 0, sizeof(*params));
+ params->partition_id = HV_PARTITION_ID_SELF;
+ params->source = 1; /* MSI(-X) */
+ params->address = msi_desc->msg.address_lo;
+ params->data = msi_desc->msg.data;
+ params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
(hbus->hdev->dev_instance.b[4] << 16) |
(hbus->hdev->dev_instance.b[7] << 8) |
(hbus->hdev->dev_instance.b[6] & 0xf8) |
PCI_FUNC(pdev->devfn);
- params.vector = cfg->vector;
+ params->vector = cfg->vector;
for_each_cpu_and(cpu, dest, cpu_online_mask)
- params.vp_mask |= (1ULL << vmbus_cpu_number_to_vp_number(cpu));
+ params->vp_mask |= (1ULL << vmbus_cpu_number_to_vp_number(cpu));
- hv_do_hypercall(HVCALL_RETARGET_INTERRUPT, &params, NULL);
+ hv_do_hypercall(HVCALL_RETARGET_INTERRUPT, params, NULL);
+
+ spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
pci_msi_unmask_irq(data);
}
@@ -1271,9 +1279,9 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
struct hv_pci_dev *hpdev;
struct pci_child_message *res_req;
struct q_res_req_compl comp_pkt;
- union {
- struct pci_packet init_packet;
- u8 buffer[0x100];
+ struct {
+ struct pci_packet init_packet;
+ u8 buffer[sizeof(struct pci_child_message)];
} pkt;
unsigned long flags;
int ret;
@@ -1582,6 +1590,10 @@ static void hv_eject_device_work(struct work_struct *work)
pci_dev_put(pdev);
}
+ spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
+ list_del(&hpdev->list_entry);
+ spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
+
memset(&ctxt, 0, sizeof(ctxt));
ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
@@ -1590,10 +1602,6 @@ static void hv_eject_device_work(struct work_struct *work)
sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
VM_PKT_DATA_INBAND, 0);
- spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
- list_del(&hpdev->list_entry);
- spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
-
put_pcichild(hpdev, hv_pcidev_ref_childlist);
put_pcichild(hpdev, hv_pcidev_ref_pnp);
put_hvpcibus(hpdev->hbus);
@@ -2186,6 +2194,7 @@ static int hv_pci_probe(struct hv_device *hdev,
INIT_LIST_HEAD(&hbus->resources_for_children);
spin_lock_init(&hbus->config_lock);
spin_lock_init(&hbus->device_list_lock);
+ spin_lock_init(&hbus->retarget_msi_interrupt_lock);
sema_init(&hbus->enum_sem, 1);
init_completion(&hbus->remove_event);
@@ -2266,24 +2275,32 @@ free_bus:
return ret;
}
-/**
- * hv_pci_remove() - Remove routine for this VMBus channel
- * @hdev: VMBus's tracking struct for this root PCI bus
- *
- * Return: 0 on success, -errno on failure
- */
-static int hv_pci_remove(struct hv_device *hdev)
+static void hv_pci_bus_exit(struct hv_device *hdev)
{
- int ret;
- struct hv_pcibus_device *hbus;
- union {
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ struct {
struct pci_packet teardown_packet;
- u8 buffer[0x100];
+ u8 buffer[sizeof(struct pci_message)];
} pkt;
struct pci_bus_relations relations;
struct hv_pci_compl comp_pkt;
+ int ret;
- hbus = hv_get_drvdata(hdev);
+ /*
+ * After the host sends the RESCIND_CHANNEL message, it doesn't
+ * access the per-channel ringbuffer any longer.
+ */
+ if (hdev->channel->rescind)
+ return;
+
+ /* Delete any children which might still exist. */
+ memset(&relations, 0, sizeof(relations));
+ hv_pci_devices_present(hbus, &relations);
+
+ ret = hv_send_resources_released(hdev);
+ if (ret)
+ dev_err(&hdev->device,
+ "Couldn't send resources released packet(s)\n");
memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
init_completion(&comp_pkt.host_event);
@@ -2298,7 +2315,19 @@ static int hv_pci_remove(struct hv_device *hdev)
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (!ret)
wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ);
+}
+
+/**
+ * hv_pci_remove() - Remove routine for this VMBus channel
+ * @hdev: VMBus's tracking struct for this root PCI bus
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_pci_remove(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus;
+ hbus = hv_get_drvdata(hdev);
if (hbus->state == hv_pcibus_installed) {
/* Remove the bus from PCI's point of view. */
pci_lock_rescan_remove();
@@ -2307,17 +2336,10 @@ static int hv_pci_remove(struct hv_device *hdev)
pci_unlock_rescan_remove();
}
- ret = hv_send_resources_released(hdev);
- if (ret)
- dev_err(&hdev->device,
- "Couldn't send resources released packet(s)\n");
+ hv_pci_bus_exit(hdev);
vmbus_close(hdev->channel);
- /* Delete any children which might still exist. */
- memset(&relations, 0, sizeof(relations));
- hv_pci_devices_present(hbus, &relations);
-
iounmap(hbus->cfg_addr);
hv_free_config_window(hbus);
pci_free_resource_list(&hbus->resources_for_children);
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index 653707996342..ea789138531b 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -35,12 +35,10 @@
#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
#define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */
-/* PEX LUT registers */
-#define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug Register */
-
struct ls_pcie_drvdata {
u32 lut_offset;
u32 ltssm_shift;
+ u32 lut_dbg;
struct pcie_host_ops *ops;
};
@@ -134,7 +132,7 @@ static int ls_pcie_link_up(struct pcie_port *pp)
struct ls_pcie *pcie = to_ls_pcie(pp);
u32 state;
- state = (ioread32(pcie->lut + PCIE_LUT_DBG) >>
+ state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >>
pcie->drvdata->ltssm_shift) &
LTSSM_STATE_MASK;
@@ -196,18 +194,28 @@ static struct ls_pcie_drvdata ls1021_drvdata = {
static struct ls_pcie_drvdata ls1043_drvdata = {
.lut_offset = 0x10000,
.ltssm_shift = 24,
+ .lut_dbg = 0x7fc,
+ .ops = &ls_pcie_host_ops,
+};
+
+static struct ls_pcie_drvdata ls1046_drvdata = {
+ .lut_offset = 0x80000,
+ .ltssm_shift = 24,
+ .lut_dbg = 0x407fc,
.ops = &ls_pcie_host_ops,
};
static struct ls_pcie_drvdata ls2080_drvdata = {
.lut_offset = 0x80000,
.ltssm_shift = 0,
+ .lut_dbg = 0x7fc,
.ops = &ls_pcie_host_ops,
};
static const struct of_device_id ls_pcie_of_match[] = {
{ .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
{ .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
+ { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
{ .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
{ .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
{ },
@@ -252,10 +260,8 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
pcie->pp.dbi_base = devm_ioremap_resource(dev, dbi_base);
- if (IS_ERR(pcie->pp.dbi_base)) {
- dev_err(dev, "missing *regs* space\n");
+ if (IS_ERR(pcie->pp.dbi_base))
return PTR_ERR(pcie->pp.dbi_base);
- }
pcie->lut = pcie->pp.dbi_base + pcie->drvdata->lut_offset;
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 1eeefa4df64c..85348590848b 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -430,10 +430,10 @@ static int rcar_pci_probe(struct platform_device *pdev)
}
static struct of_device_id rcar_pci_of_match[] = {
- { .compatible = "renesas,pci-rcar-gen2", },
{ .compatible = "renesas,pci-r8a7790", },
{ .compatible = "renesas,pci-r8a7791", },
{ .compatible = "renesas,pci-r8a7794", },
+ { .compatible = "renesas,pci-rcar-gen2", },
{ },
};
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 8dfccf733241..ed8a93f2bfb5 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -51,10 +51,6 @@
#include <soc/tegra/cpuidle.h>
#include <soc/tegra/pmc.h>
-#include <asm/mach/irq.h>
-#include <asm/mach/map.h>
-#include <asm/mach/pci.h>
-
#define INT_PCI_MSI_NR (8 * 32)
/* register definitions */
@@ -188,6 +184,9 @@
#define RP_VEND_XP 0x00000f00
#define RP_VEND_XP_DL_UP (1 << 30)
+#define RP_VEND_CTL2 0x00000fa8
+#define RP_VEND_CTL2_PCA_ENABLE (1 << 7)
+
#define RP_PRIV_MISC 0x00000fe0
#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
@@ -252,6 +251,7 @@ struct tegra_pcie_soc {
bool has_intr_prsnt_sense;
bool has_cml_clk;
bool has_gen2;
+ bool force_pca_enable;
};
static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
@@ -322,11 +322,6 @@ struct tegra_pcie_bus {
unsigned int nr;
};
-static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
-{
- return sys->private_data;
-}
-
static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
unsigned long offset)
{
@@ -385,8 +380,7 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
unsigned int busnr)
{
struct device *dev = pcie->dev;
- pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
- L_PTE_XN | L_PTE_MT_DEV_SHARED | L_PTE_SHARED);
+ pgprot_t prot = pgprot_device(PAGE_KERNEL);
phys_addr_t cs = pcie->cs->start;
struct tegra_pcie_bus *bus;
unsigned int i;
@@ -430,7 +424,8 @@ free:
static int tegra_pcie_add_bus(struct pci_bus *bus)
{
- struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
+ struct pci_host_bridge *host = pci_find_host_bridge(bus);
+ struct tegra_pcie *pcie = pci_host_bridge_priv(host);
struct tegra_pcie_bus *b;
b = tegra_pcie_bus_alloc(pcie, bus->number);
@@ -444,7 +439,8 @@ static int tegra_pcie_add_bus(struct pci_bus *bus)
static void tegra_pcie_remove_bus(struct pci_bus *child)
{
- struct tegra_pcie *pcie = sys_to_pcie(child->sysdata);
+ struct pci_host_bridge *host = pci_find_host_bridge(child);
+ struct tegra_pcie *pcie = pci_host_bridge_priv(host);
struct tegra_pcie_bus *bus, *tmp;
list_for_each_entry_safe(bus, tmp, &pcie->buses, list) {
@@ -461,7 +457,8 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
unsigned int devfn,
int where)
{
- struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
+ struct pci_host_bridge *host = pci_find_host_bridge(bus);
+ struct tegra_pcie *pcie = pci_host_bridge_priv(host);
struct device *dev = pcie->dev;
void __iomem *addr = NULL;
@@ -558,6 +555,12 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
afi_writel(port->pcie, value, ctrl);
tegra_pcie_port_reset(port);
+
+ if (soc->force_pca_enable) {
+ value = readl(port->base + RP_VEND_CTL2);
+ value |= RP_VEND_CTL2_PCA_ENABLE;
+ writel(value, port->base + RP_VEND_CTL2);
+ }
}
static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
@@ -610,39 +613,31 @@ static void tegra_pcie_relax_enable(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
-static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
+static int tegra_pcie_request_resources(struct tegra_pcie *pcie)
{
- struct tegra_pcie *pcie = sys_to_pcie(sys);
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct list_head *windows = &host->windows;
struct device *dev = pcie->dev;
int err;
- sys->mem_offset = pcie->offset.mem;
- sys->io_offset = pcie->offset.io;
+ pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
+ pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
+ pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem);
+ pci_add_resource(windows, &pcie->busn);
- err = devm_request_resource(dev, &iomem_resource, &pcie->io);
+ err = devm_request_pci_bus_resources(dev, windows);
if (err < 0)
return err;
- err = pci_remap_iospace(&pcie->pio, pcie->io.start);
- if (!err)
- pci_add_resource_offset(&sys->resources, &pcie->pio,
- sys->io_offset);
+ pci_remap_iospace(&pcie->pio, pcie->io.start);
- pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
- pci_add_resource_offset(&sys->resources, &pcie->prefetch,
- sys->mem_offset);
- pci_add_resource(&sys->resources, &pcie->busn);
-
- err = devm_request_pci_bus_resources(dev, &sys->resources);
- if (err < 0)
- return err;
-
- return 1;
+ return 0;
}
static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
{
- struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
+ struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
+ struct tegra_pcie *pcie = pci_host_bridge_priv(host);
int irq;
tegra_cpuidle_pcie_irqs_in_use();
@@ -1499,10 +1494,11 @@ static const struct irq_domain_ops msi_domain_ops = {
static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
{
- struct device *dev = pcie->dev;
- struct platform_device *pdev = to_platform_device(dev);
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct platform_device *pdev = to_platform_device(pcie->dev);
const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_msi *msi = &pcie->msi;
+ struct device *dev = pcie->dev;
unsigned long base;
int err;
u32 reg;
@@ -1559,6 +1555,8 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
reg |= AFI_INTR_MASK_MSI_MASK;
afi_writel(pcie, reg, AFI_INTR_MASK);
+ host->msi = &msi->chip;
+
return 0;
err:
@@ -1609,7 +1607,8 @@ static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node;
- if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
+ if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
+ of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
switch (lanes) {
case 0x0000104:
dev_info(dev, "4x1, 1x1 configuration\n");
@@ -1730,7 +1729,22 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
struct device_node *np = dev->of_node;
unsigned int i = 0;
- if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
+ if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
+ pcie->num_supplies = 6;
+
+ pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[i++].supply = "avdd-pll-uerefe";
+ pcie->supplies[i++].supply = "hvddio-pex";
+ pcie->supplies[i++].supply = "dvddio-pex";
+ pcie->supplies[i++].supply = "dvdd-pex-pll";
+ pcie->supplies[i++].supply = "hvdd-pex-pll-e";
+ pcie->supplies[i++].supply = "vddio-pex-ctl";
+ } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
pcie->num_supplies = 7;
pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
@@ -2021,11 +2035,10 @@ retry:
return false;
}
-static int tegra_pcie_enable(struct tegra_pcie *pcie)
+static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
struct tegra_pcie_port *port, *tmp;
- struct hw_pci hw;
list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
dev_info(dev, "probing port %u, using %u lanes\n",
@@ -2041,21 +2054,6 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie)
tegra_pcie_port_disable(port);
tegra_pcie_port_free(port);
}
-
- memset(&hw, 0, sizeof(hw));
-
-#ifdef CONFIG_PCI_MSI
- hw.msi_ctrl = &pcie->msi.chip;
-#endif
-
- hw.nr_controllers = 1;
- hw.private_data = (void **)&pcie;
- hw.setup = tegra_pcie_setup;
- hw.map_irq = tegra_pcie_map_irq;
- hw.ops = &tegra_pcie_ops;
-
- pci_common_init_dev(dev, &hw);
- return 0;
}
static const struct tegra_pcie_soc tegra20_pcie = {
@@ -2069,6 +2067,7 @@ static const struct tegra_pcie_soc tegra20_pcie = {
.has_intr_prsnt_sense = false,
.has_cml_clk = false,
.has_gen2 = false,
+ .force_pca_enable = false,
};
static const struct tegra_pcie_soc tegra30_pcie = {
@@ -2083,6 +2082,7 @@ static const struct tegra_pcie_soc tegra30_pcie = {
.has_intr_prsnt_sense = true,
.has_cml_clk = true,
.has_gen2 = false,
+ .force_pca_enable = false,
};
static const struct tegra_pcie_soc tegra124_pcie = {
@@ -2096,9 +2096,25 @@ static const struct tegra_pcie_soc tegra124_pcie = {
.has_intr_prsnt_sense = true,
.has_cml_clk = true,
.has_gen2 = true,
+ .force_pca_enable = false,
+};
+
+static const struct tegra_pcie_soc tegra210_pcie = {
+ .num_ports = 2,
+ .msi_base_shift = 8,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .pads_refclk_cfg0 = 0x90b890b8,
+ .has_pex_clkreq_en = true,
+ .has_pex_bias_ctrl = true,
+ .has_intr_prsnt_sense = true,
+ .has_cml_clk = true,
+ .has_gen2 = true,
+ .force_pca_enable = true,
};
static const struct of_device_id tegra_pcie_of_match[] = {
+ { .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
@@ -2217,13 +2233,17 @@ remove:
static int tegra_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct pci_host_bridge *host;
struct tegra_pcie *pcie;
+ struct pci_bus *child;
int err;
- pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
+ host = pci_alloc_host_bridge(sizeof(*pcie));
+ if (!host)
return -ENOMEM;
+ pcie = pci_host_bridge_priv(host);
+
pcie->soc = of_device_get_match_data(dev);
INIT_LIST_HEAD(&pcie->buses);
INIT_LIST_HEAD(&pcie->ports);
@@ -2243,6 +2263,10 @@ static int tegra_pcie_probe(struct platform_device *pdev)
if (err)
goto put_resources;
+ err = tegra_pcie_request_resources(pcie);
+ if (err)
+ goto put_resources;
+
/* setup the AFI address translations */
tegra_pcie_setup_translations(pcie);
@@ -2254,12 +2278,30 @@ static int tegra_pcie_probe(struct platform_device *pdev)
}
}
- err = tegra_pcie_enable(pcie);
+ tegra_pcie_enable_ports(pcie);
+
+ pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
+ host->busnr = pcie->busn.start;
+ host->dev.parent = &pdev->dev;
+ host->ops = &tegra_pcie_ops;
+
+ err = pci_register_host_bridge(host);
if (err < 0) {
- dev_err(dev, "failed to enable PCIe ports: %d\n", err);
+ dev_err(dev, "failed to register host: %d\n", err);
goto disable_msi;
}
+ pci_scan_child_bus(host->bus);
+
+ pci_fixup_irqs(pci_common_swizzle, tegra_pcie_map_irq);
+ pci_bus_size_bridges(host->bus);
+ pci_bus_assign_resources(host->bus);
+
+ list_for_each_entry(child, &host->bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ pci_bus_add_devices(host->bus);
+
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
err = tegra_pcie_debugfs_init(pcie);
if (err < 0)
diff --git a/drivers/pci/host/pci-thunder-ecam.c b/drivers/pci/host/pci-thunder-ecam.c
index d50a3dc2d8db..3f54a43bbbea 100644
--- a/drivers/pci/host/pci-thunder-ecam.c
+++ b/drivers/pci/host/pci-thunder-ecam.c
@@ -14,6 +14,8 @@
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#if defined(CONFIG_PCI_HOST_THUNDER_ECAM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
+
static void set_val(u32 v, int where, int size, u32 *val)
{
int shift = (where & 3) * 8;
@@ -346,7 +348,7 @@ static int thunder_ecam_config_write(struct pci_bus *bus, unsigned int devfn,
return pci_generic_config_write(bus, devfn, where, size, val);
}
-static struct pci_ecam_ops pci_thunder_ecam_ops = {
+struct pci_ecam_ops pci_thunder_ecam_ops = {
.bus_shift = 20,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
@@ -355,6 +357,8 @@ static struct pci_ecam_ops pci_thunder_ecam_ops = {
}
};
+#ifdef CONFIG_PCI_HOST_THUNDER_ECAM
+
static const struct of_device_id thunder_ecam_of_match[] = {
{ .compatible = "cavium,pci-host-thunder-ecam" },
{ },
@@ -373,3 +377,6 @@ static struct platform_driver thunder_ecam_driver = {
.probe = thunder_ecam_probe,
};
builtin_platform_driver(thunder_ecam_driver);
+
+#endif
+#endif
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c
index 6abaf80ffb39..af722eb0ca75 100644
--- a/drivers/pci/host/pci-thunder-pem.c
+++ b/drivers/pci/host/pci-thunder-pem.c
@@ -18,8 +18,12 @@
#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
+#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include "../pci.h"
+
+#if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
#define PEM_CFG_WR 0x28
#define PEM_CFG_RD 0x30
@@ -284,35 +288,16 @@ static int thunder_pem_config_write(struct pci_bus *bus, unsigned int devfn,
return pci_generic_config_write(bus, devfn, where, size, val);
}
-static int thunder_pem_init(struct pci_config_window *cfg)
+static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
+ struct resource *res_pem)
{
- struct device *dev = cfg->parent;
- resource_size_t bar4_start;
- struct resource *res_pem;
struct thunder_pem_pci *pem_pci;
- struct platform_device *pdev;
-
- /* Only OF support for now */
- if (!dev->of_node)
- return -EINVAL;
+ resource_size_t bar4_start;
pem_pci = devm_kzalloc(dev, sizeof(*pem_pci), GFP_KERNEL);
if (!pem_pci)
return -ENOMEM;
- pdev = to_platform_device(dev);
-
- /*
- * The second register range is the PEM bridge to the PCIe
- * bus. It has a different config access method than those
- * devices behind the bridge.
- */
- res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res_pem) {
- dev_err(dev, "missing \"reg[1]\"property\n");
- return -EINVAL;
- }
-
pem_pci->pem_reg_base = devm_ioremap(dev, res_pem->start, 0x10000);
if (!pem_pci->pem_reg_base)
return -ENOMEM;
@@ -332,9 +317,69 @@ static int thunder_pem_init(struct pci_config_window *cfg)
return 0;
}
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+static int thunder_pem_acpi_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+ struct resource *res_pem;
+ int ret;
+
+ res_pem = devm_kzalloc(&adev->dev, sizeof(*res_pem), GFP_KERNEL);
+ if (!res_pem)
+ return -ENOMEM;
+
+ ret = acpi_get_rc_resources(dev, "THRX0002", root->segment, res_pem);
+ if (ret) {
+ dev_err(dev, "can't get rc base address\n");
+ return ret;
+ }
+
+ return thunder_pem_init(dev, cfg, res_pem);
+}
+
+struct pci_ecam_ops thunder_pem_ecam_ops = {
+ .bus_shift = 24,
+ .init = thunder_pem_acpi_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = thunder_pem_config_read,
+ .write = thunder_pem_config_write,
+ }
+};
+
+#endif
+
+#ifdef CONFIG_PCI_HOST_THUNDER_PEM
+
+static int thunder_pem_platform_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res_pem;
+
+ if (!dev->of_node)
+ return -EINVAL;
+
+ /*
+ * The second register range is the PEM bridge to the PCIe
+ * bus. It has a different config access method than those
+ * devices behind the bridge.
+ */
+ res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res_pem) {
+ dev_err(dev, "missing \"reg[1]\"property\n");
+ return -EINVAL;
+ }
+
+ return thunder_pem_init(dev, cfg, res_pem);
+}
+
static struct pci_ecam_ops pci_thunder_pem_ops = {
.bus_shift = 24,
- .init = thunder_pem_init,
+ .init = thunder_pem_platform_init,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
.read = thunder_pem_config_read,
@@ -360,3 +405,6 @@ static struct platform_driver thunder_pem_driver = {
.probe = thunder_pem_probe,
};
builtin_platform_driver(thunder_pem_driver);
+
+#endif
+#endif
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
index a6456b578269..1f38d0836751 100644
--- a/drivers/pci/host/pci-xgene-msi.c
+++ b/drivers/pci/host/pci-xgene-msi.c
@@ -360,16 +360,16 @@ static void xgene_msi_isr(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+static enum cpuhp_state pci_xgene_online;
+
static int xgene_msi_remove(struct platform_device *pdev)
{
- int virq, i;
struct xgene_msi *msi = platform_get_drvdata(pdev);
- for (i = 0; i < NR_HW_IRQS; i++) {
- virq = msi->msi_groups[i].gic_irq;
- if (virq != 0)
- irq_set_chained_handler_and_data(virq, NULL, NULL);
- }
+ if (pci_xgene_online)
+ cpuhp_remove_state(pci_xgene_online);
+ cpuhp_remove_state(CPUHP_PCI_XGENE_DEAD);
+
kfree(msi->msi_groups);
kfree(msi->bitmap);
@@ -427,7 +427,7 @@ static int xgene_msi_hwirq_alloc(unsigned int cpu)
return 0;
}
-static void xgene_msi_hwirq_free(unsigned int cpu)
+static int xgene_msi_hwirq_free(unsigned int cpu)
{
struct xgene_msi *msi = &xgene_msi_ctrl;
struct xgene_msi_group *msi_group;
@@ -441,33 +441,9 @@ static void xgene_msi_hwirq_free(unsigned int cpu)
irq_set_chained_handler_and_data(msi_group->gic_irq, NULL,
NULL);
}
+ return 0;
}
-static int xgene_msi_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned cpu = (unsigned long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- xgene_msi_hwirq_alloc(cpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- xgene_msi_hwirq_free(cpu);
- break;
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block xgene_msi_cpu_notifier = {
- .notifier_call = xgene_msi_cpu_callback,
-};
-
static const struct of_device_id xgene_msi_match_table[] = {
{.compatible = "apm,xgene1-msi"},
{},
@@ -478,7 +454,6 @@ static int xgene_msi_probe(struct platform_device *pdev)
struct resource *res;
int rc, irq_index;
struct xgene_msi *xgene_msi;
- unsigned int cpu;
int virt_msir;
u32 msi_val, msi_idx;
@@ -540,28 +515,22 @@ static int xgene_msi_probe(struct platform_device *pdev)
}
}
- cpu_notifier_register_begin();
-
- for_each_online_cpu(cpu)
- if (xgene_msi_hwirq_alloc(cpu)) {
- dev_err(&pdev->dev, "failed to register MSI handlers\n");
- cpu_notifier_register_done();
- goto error;
- }
-
- rc = __register_hotcpu_notifier(&xgene_msi_cpu_notifier);
- if (rc) {
- dev_err(&pdev->dev, "failed to add CPU MSI notifier\n");
- cpu_notifier_register_done();
- goto error;
- }
-
- cpu_notifier_register_done();
+ rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online",
+ xgene_msi_hwirq_alloc, NULL);
+ if (rc)
+ goto err_cpuhp;
+ pci_xgene_online = rc;
+ rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL,
+ xgene_msi_hwirq_free);
+ if (rc)
+ goto err_cpuhp;
dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n");
return 0;
+err_cpuhp:
+ dev_err(&pdev->dev, "failed to add CPU MSI notifier\n");
error:
xgene_msi_remove(pdev);
return rc;
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index 1de23d74783f..7c3b54b9eb17 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -27,6 +27,8 @@
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -64,7 +66,9 @@
/* PCIe IP version */
#define XGENE_PCIE_IP_VER_UNKN 0
#define XGENE_PCIE_IP_VER_1 1
+#define XGENE_PCIE_IP_VER_2 2
+#if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
struct xgene_pcie_port {
struct device_node *node;
struct device *dev;
@@ -91,13 +95,24 @@ static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
}
+static inline struct xgene_pcie_port *pcie_bus_to_port(struct pci_bus *bus)
+{
+ struct pci_config_window *cfg;
+
+ if (acpi_disabled)
+ return (struct xgene_pcie_port *)(bus->sysdata);
+
+ cfg = bus->sysdata;
+ return (struct xgene_pcie_port *)(cfg->priv);
+}
+
/*
* When the address bit [17:16] is 2'b01, the Configuration access will be
* treated as Type 1 and it will be forwarded to external PCIe device.
*/
static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
{
- struct xgene_pcie_port *port = bus->sysdata;
+ struct xgene_pcie_port *port = pcie_bus_to_port(bus);
if (bus->number >= (bus->primary + 1))
return port->cfg_base + AXI_EP_CFG_ACCESS;
@@ -111,7 +126,7 @@ static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
*/
static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn)
{
- struct xgene_pcie_port *port = bus->sysdata;
+ struct xgene_pcie_port *port = pcie_bus_to_port(bus);
unsigned int b, d, f;
u32 rtdid_val = 0;
@@ -158,7 +173,7 @@ static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
- struct xgene_pcie_port *port = bus->sysdata;
+ struct xgene_pcie_port *port = pcie_bus_to_port(bus);
if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) !=
PCIBIOS_SUCCESSFUL)
@@ -182,13 +197,103 @@ static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_SUCCESSFUL;
}
+#endif
-static struct pci_ops xgene_pcie_ops = {
- .map_bus = xgene_pcie_map_bus,
- .read = xgene_pcie_config_read32,
- .write = pci_generic_config_write32,
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+static int xgene_get_csr_resource(struct acpi_device *adev,
+ struct resource *res)
+{
+ struct device *dev = &adev->dev;
+ struct resource_entry *entry;
+ struct list_head list;
+ unsigned long flags;
+ int ret;
+
+ INIT_LIST_HEAD(&list);
+ flags = IORESOURCE_MEM;
+ ret = acpi_dev_get_resources(adev, &list,
+ acpi_dev_filter_resource_type_cb,
+ (void *) flags);
+ if (ret < 0) {
+ dev_err(dev, "failed to parse _CRS method, error code %d\n",
+ ret);
+ return ret;
+ }
+
+ if (ret == 0) {
+ dev_err(dev, "no IO and memory resources present in _CRS\n");
+ return -EINVAL;
+ }
+
+ entry = list_first_entry(&list, struct resource_entry, node);
+ *res = *entry->res;
+ acpi_dev_free_resource_list(&list);
+ return 0;
+}
+
+static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion)
+{
+ struct device *dev = cfg->parent;
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct xgene_pcie_port *port;
+ struct resource csr;
+ int ret;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ ret = xgene_get_csr_resource(adev, &csr);
+ if (ret) {
+ dev_err(dev, "can't get CSR resource\n");
+ kfree(port);
+ return ret;
+ }
+ port->csr_base = devm_ioremap_resource(dev, &csr);
+ if (IS_ERR(port->csr_base)) {
+ kfree(port);
+ return -ENOMEM;
+ }
+
+ port->cfg_base = cfg->win;
+ port->version = ipversion;
+
+ cfg->priv = port;
+ return 0;
+}
+
+static int xgene_v1_pcie_ecam_init(struct pci_config_window *cfg)
+{
+ return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_1);
+}
+
+struct pci_ecam_ops xgene_v1_pcie_ecam_ops = {
+ .bus_shift = 16,
+ .init = xgene_v1_pcie_ecam_init,
+ .pci_ops = {
+ .map_bus = xgene_pcie_map_bus,
+ .read = xgene_pcie_config_read32,
+ .write = pci_generic_config_write,
+ }
+};
+
+static int xgene_v2_pcie_ecam_init(struct pci_config_window *cfg)
+{
+ return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_2);
+}
+
+struct pci_ecam_ops xgene_v2_pcie_ecam_ops = {
+ .bus_shift = 16,
+ .init = xgene_v2_pcie_ecam_init,
+ .pci_ops = {
+ .map_bus = xgene_pcie_map_bus,
+ .read = xgene_pcie_config_read32,
+ .write = pci_generic_config_write,
+ }
};
+#endif
+#if defined(CONFIG_PCI_XGENE)
static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr,
u32 flags, u64 size)
{
@@ -521,6 +626,12 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port,
return 0;
}
+static struct pci_ops xgene_pcie_ops = {
+ .map_bus = xgene_pcie_map_bus,
+ .read = xgene_pcie_config_read32,
+ .write = pci_generic_config_write32,
+};
+
static int xgene_pcie_probe_bridge(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -591,3 +702,4 @@ static struct platform_driver xgene_pcie_driver = {
.probe = xgene_pcie_probe_bridge,
};
builtin_platform_driver(xgene_pcie_driver);
+#endif
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index b0ac4dfafa0b..0c1540225ca3 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -550,10 +550,8 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra");
pcie->cra_base = devm_ioremap_resource(dev, cra);
- if (IS_ERR(pcie->cra_base)) {
- dev_err(dev, "failed to map cra memory\n");
+ if (IS_ERR(pcie->cra_base))
return PTR_ERR(pcie->cra_base);
- }
/* setup IRQ */
pcie->irq = platform_get_irq(pdev, 0);
@@ -641,8 +639,4 @@ static struct platform_driver altera_pcie_driver = {
},
};
-static int altera_pcie_init(void)
-{
- return platform_driver_register(&altera_pcie_driver);
-}
-device_initcall(altera_pcie_init);
+builtin_platform_driver(altera_pcie_driver);
diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c
index 8df6312ed300..1a02038c4640 100644
--- a/drivers/pci/host/pcie-designware-plat.c
+++ b/drivers/pci/host/pcie-designware-plat.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
*
- * Authors: Joao Pinto <jpmpinto@gmail.com>
+ * Authors: Joao Pinto <Joao.Pinto@synopsys.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c
index 56154c25980c..a301a7187b30 100644
--- a/drivers/pci/host/pcie-hisi.c
+++ b/drivers/pci/host/pcie-hisi.c
@@ -18,7 +18,106 @@
#include <linux/of_pci.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
#include <linux/regmap.h>
+#include "../pci.h"
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+static int hisi_pcie_acpi_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ int dev = PCI_SLOT(devfn);
+
+ if (bus->number == cfg->busr.start) {
+ /* access only one slot on each root port */
+ if (dev > 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ else
+ return pci_generic_config_read32(bus, devfn, where,
+ size, val);
+ }
+
+ return pci_generic_config_read(bus, devfn, where, size, val);
+}
+
+static int hisi_pcie_acpi_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ int dev = PCI_SLOT(devfn);
+
+ if (bus->number == cfg->busr.start) {
+ /* access only one slot on each root port */
+ if (dev > 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ else
+ return pci_generic_config_write32(bus, devfn, where,
+ size, val);
+ }
+
+ return pci_generic_config_write(bus, devfn, where, size, val);
+}
+
+static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ void __iomem *reg_base = cfg->priv;
+
+ if (bus->number == cfg->busr.start)
+ return reg_base + where;
+ else
+ return pci_ecam_map_bus(bus, devfn, where);
+}
+
+static int hisi_pcie_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+ struct resource *res;
+ void __iomem *reg_base;
+ int ret;
+
+ /*
+ * Retrieve RC base and size from a HISI0081 device with _UID
+ * matching our segment.
+ */
+ res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ ret = acpi_get_rc_resources(dev, "HISI0081", root->segment, res);
+ if (ret) {
+ dev_err(dev, "can't get rc base address\n");
+ return -ENOMEM;
+ }
+
+ reg_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!reg_base)
+ return -ENOMEM;
+
+ cfg->priv = reg_base;
+ return 0;
+}
+
+struct pci_ecam_ops hisi_pcie_ops = {
+ .bus_shift = 20,
+ .init = hisi_pcie_init,
+ .pci_ops = {
+ .map_bus = hisi_pcie_map_bus,
+ .read = hisi_pcie_acpi_rd_conf,
+ .write = hisi_pcie_acpi_wr_conf,
+ }
+};
+
+#endif
+
+#ifdef CONFIG_PCI_HISI
#include "pcie-designware.h"
@@ -185,17 +284,13 @@ static int hisi_pcie_probe(struct platform_device *pdev)
reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
pp->dbi_base = devm_ioremap_resource(dev, reg);
- if (IS_ERR(pp->dbi_base)) {
- dev_err(dev, "cannot get rc_dbi base\n");
+ if (IS_ERR(pp->dbi_base))
return PTR_ERR(pp->dbi_base);
- }
ret = hisi_add_pcie_port(hisi_pcie, pdev);
if (ret)
return ret;
- dev_warn(dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
-
return 0;
}
@@ -227,3 +322,5 @@ static struct platform_driver hisi_pcie_driver = {
},
};
builtin_platform_driver(hisi_pcie_driver);
+
+#endif
diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c
index 8ce089043a27..bd4c9ec25edc 100644
--- a/drivers/pci/host/pcie-iproc-bcma.c
+++ b/drivers/pci/host/pcie-iproc-bcma.c
@@ -54,6 +54,7 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
pcie->dev = dev;
+ pcie->type = IPROC_PCIE_PAXB_BCMA;
pcie->base = bdev->io_addr;
if (!pcie->base) {
dev_err(dev, "no controller registers\n");
diff --git a/drivers/pci/host/pcie-iproc-msi.c b/drivers/pci/host/pcie-iproc-msi.c
index 9a2973bdc78a..9fad7915f82a 100644
--- a/drivers/pci/host/pcie-iproc-msi.c
+++ b/drivers/pci/host/pcie-iproc-msi.c
@@ -563,6 +563,7 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
}
switch (pcie->type) {
+ case IPROC_PCIE_PAXB_BCMA:
case IPROC_PCIE_PAXB:
msi->reg_offsets = iproc_msi_reg_paxb;
msi->nr_eq_region = 1;
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index a3de087976b3..22d814a78a78 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -31,8 +31,14 @@ static const struct of_device_id iproc_pcie_of_match_table[] = {
.compatible = "brcm,iproc-pcie",
.data = (int *)IPROC_PCIE_PAXB,
}, {
+ .compatible = "brcm,iproc-pcie-paxb-v2",
+ .data = (int *)IPROC_PCIE_PAXB_V2,
+ }, {
.compatible = "brcm,iproc-pcie-paxc",
.data = (int *)IPROC_PCIE_PAXC,
+ }, {
+ .compatible = "brcm,iproc-pcie-paxc-v2",
+ .data = (int *)IPROC_PCIE_PAXC_V2,
},
{ /* sentinel */ }
};
@@ -84,19 +90,6 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
return ret;
}
pcie->ob.axi_offset = val;
-
- ret = of_property_read_u32(np, "brcm,pcie-ob-window-size",
- &val);
- if (ret) {
- dev_err(dev,
- "missing brcm,pcie-ob-window-size property\n");
- return ret;
- }
- pcie->ob.window_size = (resource_size_t)val * SZ_1M;
-
- if (of_property_read_bool(np, "brcm,pcie-ob-oarr-size"))
- pcie->ob.set_oarr_size = true;
-
pcie->need_ob_cfg = true;
}
@@ -115,7 +108,14 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
return ret;
}
- pcie->map_irq = of_irq_parse_and_map_pci;
+ /* PAXC doesn't support legacy IRQs, skip mapping */
+ switch (pcie->type) {
+ case IPROC_PCIE_PAXC:
+ case IPROC_PCIE_PAXC_V2:
+ break;
+ default:
+ pcie->map_irq = of_irq_parse_and_map_pci;
+ }
ret = iproc_pcie_setup(pcie, &res);
if (ret)
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index 0b999a9fb843..3ebc025499b9 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/irqchip/arm-gic-v3.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -38,6 +39,12 @@
#define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT)
#define PAXC_RESET_MASK 0x7f
+#define GIC_V3_CFG_SHIFT 0
+#define GIC_V3_CFG BIT(GIC_V3_CFG_SHIFT)
+
+#define MSI_ENABLE_CFG_SHIFT 0
+#define MSI_ENABLE_CFG BIT(MSI_ENABLE_CFG_SHIFT)
+
#define CFG_IND_ADDR_MASK 0x00001ffc
#define CFG_ADDR_BUS_NUM_SHIFT 20
@@ -58,59 +65,319 @@
#define PCIE_DL_ACTIVE_SHIFT 2
#define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT)
+#define APB_ERR_EN_SHIFT 0
+#define APB_ERR_EN BIT(APB_ERR_EN_SHIFT)
+
+/* derive the enum index of the outbound/inbound mapping registers */
+#define MAP_REG(base_reg, index) ((base_reg) + (index) * 2)
+
+/*
+ * Maximum number of outbound mapping window sizes that can be supported by any
+ * OARR/OMAP mapping pair
+ */
+#define MAX_NUM_OB_WINDOW_SIZES 4
+
#define OARR_VALID_SHIFT 0
#define OARR_VALID BIT(OARR_VALID_SHIFT)
#define OARR_SIZE_CFG_SHIFT 1
-#define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT)
-#define PCI_EXP_CAP 0xac
+/*
+ * Maximum number of inbound mapping region sizes that can be supported by an
+ * IARR
+ */
+#define MAX_NUM_IB_REGION_SIZES 9
+
+#define IMAP_VALID_SHIFT 0
+#define IMAP_VALID BIT(IMAP_VALID_SHIFT)
-#define MAX_NUM_OB_WINDOWS 2
+#define PCI_EXP_CAP 0xac
#define IPROC_PCIE_REG_INVALID 0xffff
+/**
+ * iProc PCIe outbound mapping controller specific parameters
+ *
+ * @window_sizes: list of supported outbound mapping window sizes in MB
+ * @nr_sizes: number of supported outbound mapping window sizes
+ */
+struct iproc_pcie_ob_map {
+ resource_size_t window_sizes[MAX_NUM_OB_WINDOW_SIZES];
+ unsigned int nr_sizes;
+};
+
+static const struct iproc_pcie_ob_map paxb_ob_map[] = {
+ {
+ /* OARR0/OMAP0 */
+ .window_sizes = { 128, 256 },
+ .nr_sizes = 2,
+ },
+ {
+ /* OARR1/OMAP1 */
+ .window_sizes = { 128, 256 },
+ .nr_sizes = 2,
+ },
+};
+
+static const struct iproc_pcie_ob_map paxb_v2_ob_map[] = {
+ {
+ /* OARR0/OMAP0 */
+ .window_sizes = { 128, 256 },
+ .nr_sizes = 2,
+ },
+ {
+ /* OARR1/OMAP1 */
+ .window_sizes = { 128, 256 },
+ .nr_sizes = 2,
+ },
+ {
+ /* OARR2/OMAP2 */
+ .window_sizes = { 128, 256, 512, 1024 },
+ .nr_sizes = 4,
+ },
+ {
+ /* OARR3/OMAP3 */
+ .window_sizes = { 128, 256, 512, 1024 },
+ .nr_sizes = 4,
+ },
+};
+
+/**
+ * iProc PCIe inbound mapping type
+ */
+enum iproc_pcie_ib_map_type {
+ /* for DDR memory */
+ IPROC_PCIE_IB_MAP_MEM = 0,
+
+ /* for device I/O memory */
+ IPROC_PCIE_IB_MAP_IO,
+
+ /* invalid or unused */
+ IPROC_PCIE_IB_MAP_INVALID
+};
+
+/**
+ * iProc PCIe inbound mapping controller specific parameters
+ *
+ * @type: inbound mapping region type
+ * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or
+ * SZ_1G
+ * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or
+ * GB, depedning on the size unit
+ * @nr_sizes: number of supported inbound mapping region sizes
+ * @nr_windows: number of supported inbound mapping windows for the region
+ * @imap_addr_offset: register offset between the upper and lower 32-bit
+ * IMAP address registers
+ * @imap_window_offset: register offset between each IMAP window
+ */
+struct iproc_pcie_ib_map {
+ enum iproc_pcie_ib_map_type type;
+ unsigned int size_unit;
+ resource_size_t region_sizes[MAX_NUM_IB_REGION_SIZES];
+ unsigned int nr_sizes;
+ unsigned int nr_windows;
+ u16 imap_addr_offset;
+ u16 imap_window_offset;
+};
+
+static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = {
+ {
+ /* IARR0/IMAP0 */
+ .type = IPROC_PCIE_IB_MAP_IO,
+ .size_unit = SZ_1K,
+ .region_sizes = { 32 },
+ .nr_sizes = 1,
+ .nr_windows = 8,
+ .imap_addr_offset = 0x40,
+ .imap_window_offset = 0x4,
+ },
+ {
+ /* IARR1/IMAP1 (currently unused) */
+ .type = IPROC_PCIE_IB_MAP_INVALID,
+ },
+ {
+ /* IARR2/IMAP2 */
+ .type = IPROC_PCIE_IB_MAP_MEM,
+ .size_unit = SZ_1M,
+ .region_sizes = { 64, 128, 256, 512, 1024, 2048, 4096, 8192,
+ 16384 },
+ .nr_sizes = 9,
+ .nr_windows = 1,
+ .imap_addr_offset = 0x4,
+ .imap_window_offset = 0x8,
+ },
+ {
+ /* IARR3/IMAP3 */
+ .type = IPROC_PCIE_IB_MAP_MEM,
+ .size_unit = SZ_1G,
+ .region_sizes = { 1, 2, 4, 8, 16, 32 },
+ .nr_sizes = 6,
+ .nr_windows = 8,
+ .imap_addr_offset = 0x4,
+ .imap_window_offset = 0x8,
+ },
+ {
+ /* IARR4/IMAP4 */
+ .type = IPROC_PCIE_IB_MAP_MEM,
+ .size_unit = SZ_1G,
+ .region_sizes = { 32, 64, 128, 256, 512 },
+ .nr_sizes = 5,
+ .nr_windows = 8,
+ .imap_addr_offset = 0x4,
+ .imap_window_offset = 0x8,
+ },
+};
+
+/*
+ * iProc PCIe host registers
+ */
enum iproc_pcie_reg {
+ /* clock/reset signal control */
IPROC_PCIE_CLK_CTRL = 0,
+
+ /*
+ * To allow MSI to be steered to an external MSI controller (e.g., ARM
+ * GICv3 ITS)
+ */
+ IPROC_PCIE_MSI_GIC_MODE,
+
+ /*
+ * IPROC_PCIE_MSI_BASE_ADDR and IPROC_PCIE_MSI_WINDOW_SIZE define the
+ * window where the MSI posted writes are written, for the writes to be
+ * interpreted as MSI writes.
+ */
+ IPROC_PCIE_MSI_BASE_ADDR,
+ IPROC_PCIE_MSI_WINDOW_SIZE,
+
+ /*
+ * To hold the address of the register where the MSI writes are
+ * programed. When ARM GICv3 ITS is used, this should be programmed
+ * with the address of the GITS_TRANSLATER register.
+ */
+ IPROC_PCIE_MSI_ADDR_LO,
+ IPROC_PCIE_MSI_ADDR_HI,
+
+ /* enable MSI */
+ IPROC_PCIE_MSI_EN_CFG,
+
+ /* allow access to root complex configuration space */
IPROC_PCIE_CFG_IND_ADDR,
IPROC_PCIE_CFG_IND_DATA,
+
+ /* allow access to device configuration space */
IPROC_PCIE_CFG_ADDR,
IPROC_PCIE_CFG_DATA,
+
+ /* enable INTx */
IPROC_PCIE_INTX_EN,
- IPROC_PCIE_OARR_LO,
- IPROC_PCIE_OARR_HI,
- IPROC_PCIE_OMAP_LO,
- IPROC_PCIE_OMAP_HI,
+
+ /* outbound address mapping */
+ IPROC_PCIE_OARR0,
+ IPROC_PCIE_OMAP0,
+ IPROC_PCIE_OARR1,
+ IPROC_PCIE_OMAP1,
+ IPROC_PCIE_OARR2,
+ IPROC_PCIE_OMAP2,
+ IPROC_PCIE_OARR3,
+ IPROC_PCIE_OMAP3,
+
+ /* inbound address mapping */
+ IPROC_PCIE_IARR0,
+ IPROC_PCIE_IMAP0,
+ IPROC_PCIE_IARR1,
+ IPROC_PCIE_IMAP1,
+ IPROC_PCIE_IARR2,
+ IPROC_PCIE_IMAP2,
+ IPROC_PCIE_IARR3,
+ IPROC_PCIE_IMAP3,
+ IPROC_PCIE_IARR4,
+ IPROC_PCIE_IMAP4,
+
+ /* link status */
IPROC_PCIE_LINK_STATUS,
+
+ /* enable APB error for unsupported requests */
+ IPROC_PCIE_APB_ERR_EN,
+
+ /* total number of core registers */
+ IPROC_PCIE_MAX_NUM_REG,
+};
+
+/* iProc PCIe PAXB BCMA registers */
+static const u16 iproc_pcie_reg_paxb_bcma[] = {
+ [IPROC_PCIE_CLK_CTRL] = 0x000,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x124,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
+ [IPROC_PCIE_INTX_EN] = 0x330,
+ [IPROC_PCIE_LINK_STATUS] = 0xf0c,
};
/* iProc PCIe PAXB registers */
static const u16 iproc_pcie_reg_paxb[] = {
- [IPROC_PCIE_CLK_CTRL] = 0x000,
- [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
- [IPROC_PCIE_CFG_IND_DATA] = 0x124,
- [IPROC_PCIE_CFG_ADDR] = 0x1f8,
- [IPROC_PCIE_CFG_DATA] = 0x1fc,
- [IPROC_PCIE_INTX_EN] = 0x330,
- [IPROC_PCIE_OARR_LO] = 0xd20,
- [IPROC_PCIE_OARR_HI] = 0xd24,
- [IPROC_PCIE_OMAP_LO] = 0xd40,
- [IPROC_PCIE_OMAP_HI] = 0xd44,
- [IPROC_PCIE_LINK_STATUS] = 0xf0c,
+ [IPROC_PCIE_CLK_CTRL] = 0x000,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x124,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
+ [IPROC_PCIE_INTX_EN] = 0x330,
+ [IPROC_PCIE_OARR0] = 0xd20,
+ [IPROC_PCIE_OMAP0] = 0xd40,
+ [IPROC_PCIE_OARR1] = 0xd28,
+ [IPROC_PCIE_OMAP1] = 0xd48,
+ [IPROC_PCIE_LINK_STATUS] = 0xf0c,
+ [IPROC_PCIE_APB_ERR_EN] = 0xf40,
+};
+
+/* iProc PCIe PAXB v2 registers */
+static const u16 iproc_pcie_reg_paxb_v2[] = {
+ [IPROC_PCIE_CLK_CTRL] = 0x000,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x124,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
+ [IPROC_PCIE_INTX_EN] = 0x330,
+ [IPROC_PCIE_OARR0] = 0xd20,
+ [IPROC_PCIE_OMAP0] = 0xd40,
+ [IPROC_PCIE_OARR1] = 0xd28,
+ [IPROC_PCIE_OMAP1] = 0xd48,
+ [IPROC_PCIE_OARR2] = 0xd60,
+ [IPROC_PCIE_OMAP2] = 0xd68,
+ [IPROC_PCIE_OARR3] = 0xdf0,
+ [IPROC_PCIE_OMAP3] = 0xdf8,
+ [IPROC_PCIE_IARR0] = 0xd00,
+ [IPROC_PCIE_IMAP0] = 0xc00,
+ [IPROC_PCIE_IARR2] = 0xd10,
+ [IPROC_PCIE_IMAP2] = 0xcc0,
+ [IPROC_PCIE_IARR3] = 0xe00,
+ [IPROC_PCIE_IMAP3] = 0xe08,
+ [IPROC_PCIE_IARR4] = 0xe68,
+ [IPROC_PCIE_IMAP4] = 0xe70,
+ [IPROC_PCIE_LINK_STATUS] = 0xf0c,
+ [IPROC_PCIE_APB_ERR_EN] = 0xf40,
};
/* iProc PCIe PAXC v1 registers */
static const u16 iproc_pcie_reg_paxc[] = {
- [IPROC_PCIE_CLK_CTRL] = 0x000,
- [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
- [IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
- [IPROC_PCIE_CFG_ADDR] = 0x1f8,
- [IPROC_PCIE_CFG_DATA] = 0x1fc,
- [IPROC_PCIE_INTX_EN] = IPROC_PCIE_REG_INVALID,
- [IPROC_PCIE_OARR_LO] = IPROC_PCIE_REG_INVALID,
- [IPROC_PCIE_OARR_HI] = IPROC_PCIE_REG_INVALID,
- [IPROC_PCIE_OMAP_LO] = IPROC_PCIE_REG_INVALID,
- [IPROC_PCIE_OMAP_HI] = IPROC_PCIE_REG_INVALID,
- [IPROC_PCIE_LINK_STATUS] = IPROC_PCIE_REG_INVALID,
+ [IPROC_PCIE_CLK_CTRL] = 0x000,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
+};
+
+/* iProc PCIe PAXC v2 registers */
+static const u16 iproc_pcie_reg_paxc_v2[] = {
+ [IPROC_PCIE_MSI_GIC_MODE] = 0x050,
+ [IPROC_PCIE_MSI_BASE_ADDR] = 0x074,
+ [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078,
+ [IPROC_PCIE_MSI_ADDR_LO] = 0x07c,
+ [IPROC_PCIE_MSI_ADDR_HI] = 0x080,
+ [IPROC_PCIE_MSI_EN_CFG] = 0x09c,
+ [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
+ [IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
+ [IPROC_PCIE_CFG_ADDR] = 0x1f8,
+ [IPROC_PCIE_CFG_DATA] = 0x1fc,
};
static inline struct iproc_pcie *iproc_data(struct pci_bus *bus)
@@ -159,16 +426,26 @@ static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie,
writel(val, pcie->base + offset);
}
-static inline void iproc_pcie_ob_write(struct iproc_pcie *pcie,
- enum iproc_pcie_reg reg,
- unsigned window, u32 val)
+/**
+ * APB error forwarding can be disabled during access of configuration
+ * registers of the endpoint device, to prevent unsupported requests
+ * (typically seen during enumeration with multi-function devices) from
+ * triggering a system exception.
+ */
+static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus,
+ bool disable)
{
- u16 offset = iproc_pcie_reg_offset(pcie, reg);
-
- if (iproc_pcie_reg_is_invalid(offset))
- return;
+ struct iproc_pcie *pcie = iproc_data(bus);
+ u32 val;
- writel(val, pcie->base + offset + (window * 8));
+ if (bus->number && pcie->has_apb_err_disable) {
+ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_APB_ERR_EN);
+ if (disable)
+ val &= ~APB_ERR_EN;
+ else
+ val |= APB_ERR_EN;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_APB_ERR_EN, val);
+ }
}
/**
@@ -204,7 +481,7 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
* PAXC is connected to an internally emulated EP within the SoC. It
* allows only one device.
*/
- if (pcie->type == IPROC_PCIE_PAXC)
+ if (pcie->ep_is_internal)
if (slot > 0)
return NULL;
@@ -222,26 +499,47 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
return (pcie->base + offset);
}
+static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ int ret;
+
+ iproc_pcie_apb_err_disable(bus, true);
+ ret = pci_generic_config_read32(bus, devfn, where, size, val);
+ iproc_pcie_apb_err_disable(bus, false);
+
+ return ret;
+}
+
+static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ int ret;
+
+ iproc_pcie_apb_err_disable(bus, true);
+ ret = pci_generic_config_write32(bus, devfn, where, size, val);
+ iproc_pcie_apb_err_disable(bus, false);
+
+ return ret;
+}
+
static struct pci_ops iproc_pcie_ops = {
.map_bus = iproc_pcie_map_cfg_bus,
- .read = pci_generic_config_read32,
- .write = pci_generic_config_write32,
+ .read = iproc_pcie_config_read32,
+ .write = iproc_pcie_config_write32,
};
static void iproc_pcie_reset(struct iproc_pcie *pcie)
{
u32 val;
- if (pcie->type == IPROC_PCIE_PAXC) {
- val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
- val &= ~PAXC_RESET_MASK;
- iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
- udelay(100);
- val |= PAXC_RESET_MASK;
- iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
- udelay(100);
+ /*
+ * PAXC and the internal emulated endpoint device downstream should not
+ * be reset. If firmware has been loaded on the endpoint device at an
+ * earlier boot stage, reset here causes issues.
+ */
+ if (pcie->ep_is_internal)
return;
- }
/*
* Select perst_b signal as reset source. Put the device into reset,
@@ -270,7 +568,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
* PAXC connects to emulated endpoint devices directly and does not
* have a Serdes. Therefore skip the link detection logic here.
*/
- if (pcie->type == IPROC_PCIE_PAXC)
+ if (pcie->ep_is_internal)
return 0;
val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS);
@@ -334,6 +632,58 @@ static void iproc_pcie_enable(struct iproc_pcie *pcie)
iproc_pcie_write_reg(pcie, IPROC_PCIE_INTX_EN, SYS_RC_INTX_MASK);
}
+static inline bool iproc_pcie_ob_is_valid(struct iproc_pcie *pcie,
+ int window_idx)
+{
+ u32 val;
+
+ val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, window_idx));
+
+ return !!(val & OARR_VALID);
+}
+
+static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx,
+ int size_idx, u64 axi_addr, u64 pci_addr)
+{
+ struct device *dev = pcie->dev;
+ u16 oarr_offset, omap_offset;
+
+ /*
+ * Derive the OARR/OMAP offset from the first pair (OARR0/OMAP0) based
+ * on window index.
+ */
+ oarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OARR0,
+ window_idx));
+ omap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OMAP0,
+ window_idx));
+ if (iproc_pcie_reg_is_invalid(oarr_offset) ||
+ iproc_pcie_reg_is_invalid(omap_offset))
+ return -EINVAL;
+
+ /*
+ * Program the OARR registers. The upper 32-bit OARR register is
+ * always right after the lower 32-bit OARR register.
+ */
+ writel(lower_32_bits(axi_addr) | (size_idx << OARR_SIZE_CFG_SHIFT) |
+ OARR_VALID, pcie->base + oarr_offset);
+ writel(upper_32_bits(axi_addr), pcie->base + oarr_offset + 4);
+
+ /* now program the OMAP registers */
+ writel(lower_32_bits(pci_addr), pcie->base + omap_offset);
+ writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4);
+
+ dev_info(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n",
+ window_idx, oarr_offset, &axi_addr, &pci_addr);
+ dev_info(dev, "oarr lo 0x%x oarr hi 0x%x\n",
+ readl(pcie->base + oarr_offset),
+ readl(pcie->base + oarr_offset + 4));
+ dev_info(dev, "omap lo 0x%x omap hi 0x%x\n",
+ readl(pcie->base + omap_offset),
+ readl(pcie->base + omap_offset + 4));
+
+ return 0;
+}
+
/**
* Some iProc SoCs require the SW to configure the outbound address mapping
*
@@ -350,24 +700,7 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
{
struct iproc_pcie_ob *ob = &pcie->ob;
struct device *dev = pcie->dev;
- unsigned i;
- u64 max_size = (u64)ob->window_size * MAX_NUM_OB_WINDOWS;
- u64 remainder;
-
- if (size > max_size) {
- dev_err(dev,
- "res size %pap exceeds max supported size 0x%llx\n",
- &size, max_size);
- return -EINVAL;
- }
-
- div64_u64_rem(size, ob->window_size, &remainder);
- if (remainder) {
- dev_err(dev,
- "res size %pap needs to be multiple of window size %pap\n",
- &size, &ob->window_size);
- return -EINVAL;
- }
+ int ret = -EINVAL, window_idx, size_idx;
if (axi_addr < ob->axi_offset) {
dev_err(dev, "axi address %pap less than offset %pap\n",
@@ -381,26 +714,70 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
*/
axi_addr -= ob->axi_offset;
- for (i = 0; i < MAX_NUM_OB_WINDOWS; i++) {
- iproc_pcie_ob_write(pcie, IPROC_PCIE_OARR_LO, i,
- lower_32_bits(axi_addr) | OARR_VALID |
- (ob->set_oarr_size ? 1 : 0));
- iproc_pcie_ob_write(pcie, IPROC_PCIE_OARR_HI, i,
- upper_32_bits(axi_addr));
- iproc_pcie_ob_write(pcie, IPROC_PCIE_OMAP_LO, i,
- lower_32_bits(pci_addr));
- iproc_pcie_ob_write(pcie, IPROC_PCIE_OMAP_HI, i,
- upper_32_bits(pci_addr));
-
- size -= ob->window_size;
- if (size == 0)
+ /* iterate through all OARR/OMAP mapping windows */
+ for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) {
+ const struct iproc_pcie_ob_map *ob_map =
+ &pcie->ob_map[window_idx];
+
+ /*
+ * If current outbound window is already in use, move on to the
+ * next one.
+ */
+ if (iproc_pcie_ob_is_valid(pcie, window_idx))
+ continue;
+
+ /*
+ * Iterate through all supported window sizes within the
+ * OARR/OMAP pair to find a match. Go through the window sizes
+ * in a descending order.
+ */
+ for (size_idx = ob_map->nr_sizes - 1; size_idx >= 0;
+ size_idx--) {
+ resource_size_t window_size =
+ ob_map->window_sizes[size_idx] * SZ_1M;
+
+ if (size < window_size)
+ continue;
+
+ if (!IS_ALIGNED(axi_addr, window_size) ||
+ !IS_ALIGNED(pci_addr, window_size)) {
+ dev_err(dev,
+ "axi %pap or pci %pap not aligned\n",
+ &axi_addr, &pci_addr);
+ return -EINVAL;
+ }
+
+ /*
+ * Match found! Program both OARR and OMAP and mark
+ * them as a valid entry.
+ */
+ ret = iproc_pcie_ob_write(pcie, window_idx, size_idx,
+ axi_addr, pci_addr);
+ if (ret)
+ goto err_ob;
+
+ size -= window_size;
+ if (size == 0)
+ return 0;
+
+ /*
+ * If we are here, we are done with the current window,
+ * but not yet finished all mappings. Need to move on
+ * to the next window.
+ */
+ axi_addr += window_size;
+ pci_addr += window_size;
break;
-
- axi_addr += ob->window_size;
- pci_addr += ob->window_size;
+ }
}
- return 0;
+err_ob:
+ dev_err(dev, "unable to configure outbound mapping\n");
+ dev_err(dev,
+ "axi %pap, axi offset %pap, pci %pap, res size %pap\n",
+ &axi_addr, &ob->axi_offset, &pci_addr, &size);
+
+ return ret;
}
static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
@@ -434,13 +811,323 @@ static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
return 0;
}
+static inline bool iproc_pcie_ib_is_in_use(struct iproc_pcie *pcie,
+ int region_idx)
+{
+ const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
+ u32 val;
+
+ val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, region_idx));
+
+ return !!(val & (BIT(ib_map->nr_sizes) - 1));
+}
+
+static inline bool iproc_pcie_ib_check_type(const struct iproc_pcie_ib_map *ib_map,
+ enum iproc_pcie_ib_map_type type)
+{
+ return !!(ib_map->type == type);
+}
+
+static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
+ int size_idx, int nr_windows, u64 axi_addr,
+ u64 pci_addr, resource_size_t size)
+{
+ struct device *dev = pcie->dev;
+ const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
+ u16 iarr_offset, imap_offset;
+ u32 val;
+ int window_idx;
+
+ iarr_offset = iproc_pcie_reg_offset(pcie,
+ MAP_REG(IPROC_PCIE_IARR0, region_idx));
+ imap_offset = iproc_pcie_reg_offset(pcie,
+ MAP_REG(IPROC_PCIE_IMAP0, region_idx));
+ if (iproc_pcie_reg_is_invalid(iarr_offset) ||
+ iproc_pcie_reg_is_invalid(imap_offset))
+ return -EINVAL;
+
+ dev_info(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n",
+ region_idx, iarr_offset, &axi_addr, &pci_addr);
+
+ /*
+ * Program the IARR registers. The upper 32-bit IARR register is
+ * always right after the lower 32-bit IARR register.
+ */
+ writel(lower_32_bits(pci_addr) | BIT(size_idx),
+ pcie->base + iarr_offset);
+ writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4);
+
+ dev_info(dev, "iarr lo 0x%x iarr hi 0x%x\n",
+ readl(pcie->base + iarr_offset),
+ readl(pcie->base + iarr_offset + 4));
+
+ /*
+ * Now program the IMAP registers. Each IARR region may have one or
+ * more IMAP windows.
+ */
+ size >>= ilog2(nr_windows);
+ for (window_idx = 0; window_idx < nr_windows; window_idx++) {
+ val = readl(pcie->base + imap_offset);
+ val |= lower_32_bits(axi_addr) | IMAP_VALID;
+ writel(val, pcie->base + imap_offset);
+ writel(upper_32_bits(axi_addr),
+ pcie->base + imap_offset + ib_map->imap_addr_offset);
+
+ dev_info(dev, "imap window [%d] lo 0x%x hi 0x%x\n",
+ window_idx, readl(pcie->base + imap_offset),
+ readl(pcie->base + imap_offset +
+ ib_map->imap_addr_offset));
+
+ imap_offset += ib_map->imap_window_offset;
+ axi_addr += size;
+ }
+
+ return 0;
+}
+
+static int iproc_pcie_setup_ib(struct iproc_pcie *pcie,
+ struct of_pci_range *range,
+ enum iproc_pcie_ib_map_type type)
+{
+ struct device *dev = pcie->dev;
+ struct iproc_pcie_ib *ib = &pcie->ib;
+ int ret;
+ unsigned int region_idx, size_idx;
+ u64 axi_addr = range->cpu_addr, pci_addr = range->pci_addr;
+ resource_size_t size = range->size;
+
+ /* iterate through all IARR mapping regions */
+ for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) {
+ const struct iproc_pcie_ib_map *ib_map =
+ &pcie->ib_map[region_idx];
+
+ /*
+ * If current inbound region is already in use or not a
+ * compatible type, move on to the next.
+ */
+ if (iproc_pcie_ib_is_in_use(pcie, region_idx) ||
+ !iproc_pcie_ib_check_type(ib_map, type))
+ continue;
+
+ /* iterate through all supported region sizes to find a match */
+ for (size_idx = 0; size_idx < ib_map->nr_sizes; size_idx++) {
+ resource_size_t region_size =
+ ib_map->region_sizes[size_idx] * ib_map->size_unit;
+
+ if (size != region_size)
+ continue;
+
+ if (!IS_ALIGNED(axi_addr, region_size) ||
+ !IS_ALIGNED(pci_addr, region_size)) {
+ dev_err(dev,
+ "axi %pap or pci %pap not aligned\n",
+ &axi_addr, &pci_addr);
+ return -EINVAL;
+ }
+
+ /* Match found! Program IARR and all IMAP windows. */
+ ret = iproc_pcie_ib_write(pcie, region_idx, size_idx,
+ ib_map->nr_windows, axi_addr,
+ pci_addr, size);
+ if (ret)
+ goto err_ib;
+ else
+ return 0;
+
+ }
+ }
+ ret = -EINVAL;
+
+err_ib:
+ dev_err(dev, "unable to configure inbound mapping\n");
+ dev_err(dev, "axi %pap, pci %pap, res size %pap\n",
+ &axi_addr, &pci_addr, &size);
+
+ return ret;
+}
+
+static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node)
+{
+ const int na = 3, ns = 2;
+ int rlen;
+
+ parser->node = node;
+ parser->pna = of_n_addr_cells(node);
+ parser->np = parser->pna + na + ns;
+
+ parser->range = of_get_property(node, "dma-ranges", &rlen);
+ if (!parser->range)
+ return -ENOENT;
+
+ parser->end = parser->range + rlen / sizeof(__be32);
+ return 0;
+}
+
+static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
+{
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ int ret;
+
+ /* Get the dma-ranges from DT */
+ ret = pci_dma_range_parser_init(&parser, pcie->dev->of_node);
+ if (ret)
+ return ret;
+
+ for_each_of_pci_range(&parser, &range) {
+ /* Each range entry corresponds to an inbound mapping region */
+ ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
+ struct device_node *msi_node,
+ u64 *msi_addr)
+{
+ struct device *dev = pcie->dev;
+ int ret;
+ struct resource res;
+
+ /*
+ * Check if 'msi-map' points to ARM GICv3 ITS, which is the only
+ * supported external MSI controller that requires steering.
+ */
+ if (!of_device_is_compatible(msi_node, "arm,gic-v3-its")) {
+ dev_err(dev, "unable to find compatible MSI controller\n");
+ return -ENODEV;
+ }
+
+ /* derive GITS_TRANSLATER address from GICv3 */
+ ret = of_address_to_resource(msi_node, 0, &res);
+ if (ret < 0) {
+ dev_err(dev, "unable to obtain MSI controller resources\n");
+ return ret;
+ }
+
+ *msi_addr = res.start + GITS_TRANSLATER;
+ return 0;
+}
+
+static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
+{
+ int ret;
+ struct of_pci_range range;
+
+ memset(&range, 0, sizeof(range));
+ range.size = SZ_32K;
+ range.pci_addr = range.cpu_addr = msi_addr & ~(range.size - 1);
+
+ ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_IO);
+ return ret;
+}
+
+static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
+{
+ u32 val;
+
+ /*
+ * Program bits [43:13] of address of GITS_TRANSLATER register into
+ * bits [30:0] of the MSI base address register. In fact, in all iProc
+ * based SoCs, all I/O register bases are well below the 32-bit
+ * boundary, so we can safely assume bits [43:32] are always zeros.
+ */
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_BASE_ADDR,
+ (u32)(msi_addr >> 13));
+
+ /* use a default 8K window size */
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_WINDOW_SIZE, 0);
+
+ /* steering MSI to GICv3 ITS */
+ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_GIC_MODE);
+ val |= GIC_V3_CFG;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_GIC_MODE, val);
+
+ /*
+ * Program bits [43:2] of address of GITS_TRANSLATER register into the
+ * iProc MSI address registers.
+ */
+ msi_addr >>= 2;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_HI,
+ upper_32_bits(msi_addr));
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_LO,
+ lower_32_bits(msi_addr));
+
+ /* enable MSI */
+ val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG);
+ val |= MSI_ENABLE_CFG;
+ iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val);
+}
+
+static int iproc_pcie_msi_steer(struct iproc_pcie *pcie,
+ struct device_node *msi_node)
+{
+ struct device *dev = pcie->dev;
+ int ret;
+ u64 msi_addr;
+
+ ret = iproce_pcie_get_msi(pcie, msi_node, &msi_addr);
+ if (ret < 0) {
+ dev_err(dev, "msi steering failed\n");
+ return ret;
+ }
+
+ switch (pcie->type) {
+ case IPROC_PCIE_PAXB_V2:
+ ret = iproc_pcie_paxb_v2_msi_steer(pcie, msi_addr);
+ if (ret)
+ return ret;
+ break;
+ case IPROC_PCIE_PAXC_V2:
+ iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int iproc_pcie_msi_enable(struct iproc_pcie *pcie)
{
struct device_node *msi_node;
+ int ret;
+
+ /*
+ * Either the "msi-parent" or the "msi-map" phandle needs to exist
+ * for us to obtain the MSI node.
+ */
msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0);
- if (!msi_node)
- return -ENODEV;
+ if (!msi_node) {
+ const __be32 *msi_map = NULL;
+ int len;
+ u32 phandle;
+
+ msi_map = of_get_property(pcie->dev->of_node, "msi-map", &len);
+ if (!msi_map)
+ return -ENODEV;
+
+ phandle = be32_to_cpup(msi_map + 1);
+ msi_node = of_find_node_by_phandle(phandle);
+ if (!msi_node)
+ return -ENODEV;
+ }
+
+ /*
+ * Certain revisions of the iProc PCIe controller require additional
+ * configurations to steer the MSI writes towards an external MSI
+ * controller.
+ */
+ if (pcie->need_msi_steer) {
+ ret = iproc_pcie_msi_steer(pcie, msi_node);
+ if (ret)
+ return ret;
+ }
/*
* If another MSI controller is being used, the call below should fail
@@ -454,6 +1141,65 @@ static void iproc_pcie_msi_disable(struct iproc_pcie *pcie)
iproc_msi_exit(pcie);
}
+static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ unsigned int reg_idx;
+ const u16 *regs;
+
+ switch (pcie->type) {
+ case IPROC_PCIE_PAXB_BCMA:
+ regs = iproc_pcie_reg_paxb_bcma;
+ break;
+ case IPROC_PCIE_PAXB:
+ regs = iproc_pcie_reg_paxb;
+ pcie->has_apb_err_disable = true;
+ if (pcie->need_ob_cfg) {
+ pcie->ob_map = paxb_ob_map;
+ pcie->ob.nr_windows = ARRAY_SIZE(paxb_ob_map);
+ }
+ break;
+ case IPROC_PCIE_PAXB_V2:
+ regs = iproc_pcie_reg_paxb_v2;
+ pcie->has_apb_err_disable = true;
+ if (pcie->need_ob_cfg) {
+ pcie->ob_map = paxb_v2_ob_map;
+ pcie->ob.nr_windows = ARRAY_SIZE(paxb_v2_ob_map);
+ }
+ pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map);
+ pcie->ib_map = paxb_v2_ib_map;
+ pcie->need_msi_steer = true;
+ break;
+ case IPROC_PCIE_PAXC:
+ regs = iproc_pcie_reg_paxc;
+ pcie->ep_is_internal = true;
+ break;
+ case IPROC_PCIE_PAXC_V2:
+ regs = iproc_pcie_reg_paxc_v2;
+ pcie->ep_is_internal = true;
+ pcie->need_msi_steer = true;
+ break;
+ default:
+ dev_err(dev, "incompatible iProc PCIe interface\n");
+ return -EINVAL;
+ }
+
+ pcie->reg_offsets = devm_kcalloc(dev, IPROC_PCIE_MAX_NUM_REG,
+ sizeof(*pcie->reg_offsets),
+ GFP_KERNEL);
+ if (!pcie->reg_offsets)
+ return -ENOMEM;
+
+ /* go through the register table and populate all valid registers */
+ pcie->reg_offsets[0] = (pcie->type == IPROC_PCIE_PAXC_V2) ?
+ IPROC_PCIE_REG_INVALID : regs[0];
+ for (reg_idx = 1; reg_idx < IPROC_PCIE_MAX_NUM_REG; reg_idx++)
+ pcie->reg_offsets[reg_idx] = regs[reg_idx] ?
+ regs[reg_idx] : IPROC_PCIE_REG_INVALID;
+
+ return 0;
+}
+
int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
{
struct device *dev;
@@ -462,6 +1208,13 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
struct pci_bus *bus;
dev = pcie->dev;
+
+ ret = iproc_pcie_rev_init(pcie);
+ if (ret) {
+ dev_err(dev, "unable to initialize controller parameters\n");
+ return ret;
+ }
+
ret = devm_request_pci_bus_resources(dev, res);
if (ret)
return ret;
@@ -478,19 +1231,6 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
goto err_exit_phy;
}
- switch (pcie->type) {
- case IPROC_PCIE_PAXB:
- pcie->reg_offsets = iproc_pcie_reg_paxb;
- break;
- case IPROC_PCIE_PAXC:
- pcie->reg_offsets = iproc_pcie_reg_paxc;
- break;
- default:
- dev_err(dev, "incompatible iProc PCIe interface\n");
- ret = -EINVAL;
- goto err_power_off_phy;
- }
-
iproc_pcie_reset(pcie);
if (pcie->need_ob_cfg) {
@@ -501,6 +1241,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
}
}
+ ret = iproc_pcie_map_dma_ranges(pcie);
+ if (ret && ret != -ENOENT)
+ goto err_power_off_phy;
+
#ifdef CONFIG_ARM
pcie->sysdata.private_data = pcie;
sysdata = &pcie->sysdata;
@@ -530,7 +1274,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
- pci_fixup_irqs(pci_common_swizzle, pcie->map_irq);
+
+ if (pcie->map_irq)
+ pci_fixup_irqs(pci_common_swizzle, pcie->map_irq);
+
pci_bus_add_devices(bus);
return 0;
diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h
index e84d93c53c7b..04fed8e907f1 100644
--- a/drivers/pci/host/pcie-iproc.h
+++ b/drivers/pci/host/pcie-iproc.h
@@ -24,23 +24,34 @@
* endpoint devices.
*/
enum iproc_pcie_type {
- IPROC_PCIE_PAXB = 0,
+ IPROC_PCIE_PAXB_BCMA = 0,
+ IPROC_PCIE_PAXB,
+ IPROC_PCIE_PAXB_V2,
IPROC_PCIE_PAXC,
+ IPROC_PCIE_PAXC_V2,
};
/**
* iProc PCIe outbound mapping
- * @set_oarr_size: indicates the OARR size bit needs to be set
* @axi_offset: offset from the AXI address to the internal address used by
* the iProc PCIe core
- * @window_size: outbound window size
+ * @nr_windows: total number of supported outbound mapping windows
*/
struct iproc_pcie_ob {
- bool set_oarr_size;
resource_size_t axi_offset;
- resource_size_t window_size;
+ unsigned int nr_windows;
};
+/**
+ * iProc PCIe inbound mapping
+ * @nr_regions: total number of supported inbound mapping regions
+ */
+struct iproc_pcie_ib {
+ unsigned int nr_regions;
+};
+
+struct iproc_pcie_ob_map;
+struct iproc_pcie_ib_map;
struct iproc_msi;
/**
@@ -55,14 +66,25 @@ struct iproc_msi;
* @root_bus: pointer to root bus
* @phy: optional PHY device that controls the Serdes
* @map_irq: function callback to map interrupts
+ * @ep_is_internal: indicates an internal emulated endpoint device is connected
+ * @has_apb_err_disable: indicates the controller can be configured to prevent
+ * unsupported request from being forwarded as an APB bus error
+ *
* @need_ob_cfg: indicates SW needs to configure the outbound mapping window
- * @ob: outbound mapping parameters
+ * @ob: outbound mapping related parameters
+ * @ob_map: outbound mapping related parameters specific to the controller
+ *
+ * @ib: inbound mapping related parameters
+ * @ib_map: outbound mapping region related parameters
+ *
+ * @need_msi_steer: indicates additional configuration of the iProc PCIe
+ * controller is required to steer MSI writes to external interrupt controller
* @msi: MSI data
*/
struct iproc_pcie {
struct device *dev;
enum iproc_pcie_type type;
- const u16 *reg_offsets;
+ u16 *reg_offsets;
void __iomem *base;
phys_addr_t base_addr;
#ifdef CONFIG_ARM
@@ -71,8 +93,17 @@ struct iproc_pcie {
struct pci_bus *root_bus;
struct phy *phy;
int (*map_irq)(const struct pci_dev *, u8, u8);
+ bool ep_is_internal;
+ bool has_apb_err_disable;
+
bool need_ob_cfg;
struct iproc_pcie_ob ob;
+ const struct iproc_pcie_ob_map *ob_map;
+
+ struct iproc_pcie_ib ib;
+ const struct iproc_pcie_ib_map *ib_map;
+
+ bool need_msi_steer;
struct iproc_msi *msi;
};
diff --git a/drivers/pci/host/pcie-qcom.c b/drivers/pci/host/pcie-qcom.c
index 35936409b2d4..734ba0d4a5c8 100644
--- a/drivers/pci/host/pcie-qcom.c
+++ b/drivers/pci/host/pcie-qcom.c
@@ -36,11 +36,17 @@
#include "pcie-designware.h"
+#define PCIE20_PARF_SYS_CTRL 0x00
#define PCIE20_PARF_PHY_CTRL 0x40
#define PCIE20_PARF_PHY_REFCLK 0x4C
#define PCIE20_PARF_DBI_BASE_ADDR 0x168
-#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16c
+#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
+#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
+#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
+#define PCIE20_PARF_LTSSM 0x1B0
+#define PCIE20_PARF_SID_OFFSET 0x234
+#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
#define PCIE20_ELBI_SYS_CTRL 0x04
#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
@@ -72,9 +78,18 @@ struct qcom_pcie_resources_v1 {
struct regulator *vdda;
};
+struct qcom_pcie_resources_v2 {
+ struct clk *aux_clk;
+ struct clk *master_clk;
+ struct clk *slave_clk;
+ struct clk *cfg_clk;
+ struct clk *pipe_clk;
+};
+
union qcom_pcie_resources {
struct qcom_pcie_resources_v0 v0;
struct qcom_pcie_resources_v1 v1;
+ struct qcom_pcie_resources_v2 v2;
};
struct qcom_pcie;
@@ -82,7 +97,9 @@ struct qcom_pcie;
struct qcom_pcie_ops {
int (*get_resources)(struct qcom_pcie *pcie);
int (*init)(struct qcom_pcie *pcie);
+ int (*post_init)(struct qcom_pcie *pcie);
void (*deinit)(struct qcom_pcie *pcie);
+ void (*ltssm_enable)(struct qcom_pcie *pcie);
};
struct qcom_pcie {
@@ -116,17 +133,35 @@ static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg)
return dw_handle_msi_irq(pp);
}
-static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
+static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
{
u32 val;
- if (dw_pcie_link_up(&pcie->pp))
- return 0;
-
/* enable link training */
val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+}
+
+static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
+{
+ u32 val;
+
+ /* enable link training */
+ val = readl(pcie->parf + PCIE20_PARF_LTSSM);
+ val |= BIT(8);
+ writel(val, pcie->parf + PCIE20_PARF_LTSSM);
+}
+
+static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
+{
+
+ if (dw_pcie_link_up(&pcie->pp))
+ return 0;
+
+ /* Enable Link Training state machine */
+ if (pcie->ops->ltssm_enable)
+ pcie->ops->ltssm_enable(pcie);
return dw_pcie_wait_for_link(&pcie->pp);
}
@@ -421,6 +456,113 @@ err_res:
return ret;
}
+static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+ struct device *dev = pcie->pp.dev;
+
+ res->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux_clk))
+ return PTR_ERR(res->aux_clk);
+
+ res->cfg_clk = devm_clk_get(dev, "cfg");
+ if (IS_ERR(res->cfg_clk))
+ return PTR_ERR(res->cfg_clk);
+
+ res->master_clk = devm_clk_get(dev, "bus_master");
+ if (IS_ERR(res->master_clk))
+ return PTR_ERR(res->master_clk);
+
+ res->slave_clk = devm_clk_get(dev, "bus_slave");
+ if (IS_ERR(res->slave_clk))
+ return PTR_ERR(res->slave_clk);
+
+ res->pipe_clk = devm_clk_get(dev, "pipe");
+ if (IS_ERR(res->pipe_clk))
+ return PTR_ERR(res->pipe_clk);
+
+ return 0;
+}
+
+static int qcom_pcie_init_v2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+ struct device *dev = pcie->pp.dev;
+ u32 val;
+ int ret;
+
+ ret = clk_prepare_enable(res->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(res->cfg_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable cfg clock\n");
+ goto err_cfg_clk;
+ }
+
+ ret = clk_prepare_enable(res->master_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable master clock\n");
+ goto err_master_clk;
+ }
+
+ ret = clk_prepare_enable(res->slave_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable slave clock\n");
+ goto err_slave_clk;
+ }
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ /* MAC PHY_POWERDOWN MUX DISABLE */
+ val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+ val &= ~BIT(29);
+ writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BIT(4);
+ writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+ return 0;
+
+err_slave_clk:
+ clk_disable_unprepare(res->master_clk);
+err_master_clk:
+ clk_disable_unprepare(res->cfg_clk);
+err_cfg_clk:
+ clk_disable_unprepare(res->aux_clk);
+
+ return ret;
+}
+
+static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+ struct device *dev = pcie->pp.dev;
+ int ret;
+
+ ret = clk_prepare_enable(res->pipe_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable pipe clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int qcom_pcie_link_up(struct pcie_port *pp)
{
struct qcom_pcie *pcie = to_qcom_pcie(pp);
@@ -429,6 +571,17 @@ static int qcom_pcie_link_up(struct pcie_port *pp)
return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
+static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+
+ clk_disable_unprepare(res->pipe_clk);
+ clk_disable_unprepare(res->slave_clk);
+ clk_disable_unprepare(res->master_clk);
+ clk_disable_unprepare(res->cfg_clk);
+ clk_disable_unprepare(res->aux_clk);
+}
+
static void qcom_pcie_host_init(struct pcie_port *pp)
{
struct qcom_pcie *pcie = to_qcom_pcie(pp);
@@ -444,6 +597,9 @@ static void qcom_pcie_host_init(struct pcie_port *pp)
if (ret)
goto err_deinit;
+ if (pcie->ops->post_init)
+ pcie->ops->post_init(pcie);
+
dw_pcie_setup_rc(pp);
if (IS_ENABLED(CONFIG_PCI_MSI))
@@ -487,12 +643,22 @@ static const struct qcom_pcie_ops ops_v0 = {
.get_resources = qcom_pcie_get_resources_v0,
.init = qcom_pcie_init_v0,
.deinit = qcom_pcie_deinit_v0,
+ .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
};
static const struct qcom_pcie_ops ops_v1 = {
.get_resources = qcom_pcie_get_resources_v1,
.init = qcom_pcie_init_v1,
.deinit = qcom_pcie_deinit_v1,
+ .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
+};
+
+static const struct qcom_pcie_ops ops_v2 = {
+ .get_resources = qcom_pcie_get_resources_v2,
+ .init = qcom_pcie_init_v2,
+ .post_init = qcom_pcie_post_init_v2,
+ .deinit = qcom_pcie_deinit_v2,
+ .ltssm_enable = qcom_pcie_v2_ltssm_enable,
};
static int qcom_pcie_probe(struct platform_device *pdev)
@@ -572,6 +738,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 },
{ .compatible = "qcom,pcie-apq8064", .data = &ops_v0 },
{ .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
+ { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 },
{ }
};
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 62700d1896f4..aca85be101f8 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -1071,13 +1071,14 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
static const struct of_device_id rcar_pcie_of_match[] = {
{ .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 },
- { .compatible = "renesas,pcie-rcar-gen2",
- .data = rcar_pcie_hw_init_gen2 },
{ .compatible = "renesas,pcie-r8a7790",
.data = rcar_pcie_hw_init_gen2 },
{ .compatible = "renesas,pcie-r8a7791",
.data = rcar_pcie_hw_init_gen2 },
+ { .compatible = "renesas,pcie-rcar-gen2",
+ .data = rcar_pcie_hw_init_gen2 },
{ .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init },
+ { .compatible = "renesas,pcie-rcar-gen3", .data = rcar_pcie_hw_init },
{},
};
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index e04f69beb42d..f2dca7bb0b39 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -53,6 +53,7 @@
#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
+#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0)
#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080)
#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48)
#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000
@@ -135,13 +136,14 @@
#define PCIE_RC_CONFIG_VENDOR (PCIE_RC_CONFIG_BASE + 0x00)
#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
#define PCIE_RC_CONFIG_SCC_SHIFT 16
+#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4)
+#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
+#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
+#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26
#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
-#define PCIE_RC_CONFIG_LCS_RETRAIN_LINK BIT(5)
-#define PCIE_RC_CONFIG_LCS_LBMIE BIT(10)
-#define PCIE_RC_CONFIG_LCS_LABIE BIT(11)
-#define PCIE_RC_CONFIG_LCS_LBMS BIT(30)
-#define PCIE_RC_CONFIG_LCS_LAMS BIT(31)
#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
+#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274)
+#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20)
#define PCIE_CORE_AXI_CONF_BASE 0xc00000
#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0)
@@ -203,8 +205,14 @@ struct rockchip_pcie {
struct gpio_desc *ep_gpio;
u32 lanes;
u8 root_bus_nr;
+ int link_gen;
struct device *dev;
struct irq_domain *irq_domain;
+ u32 io_size;
+ int offset;
+ phys_addr_t io_bus_addr;
+ u32 mem_size;
+ phys_addr_t mem_bus_addr;
};
static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg)
@@ -223,7 +231,7 @@ static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
u32 status;
status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= (PCIE_RC_CONFIG_LCS_LBMIE | PCIE_RC_CONFIG_LCS_LABIE);
+ status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
}
@@ -232,7 +240,7 @@ static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
u32 status;
status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= (PCIE_RC_CONFIG_LCS_LBMS | PCIE_RC_CONFIG_LCS_LAMS);
+ status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
}
@@ -398,6 +406,40 @@ static struct pci_ops rockchip_pcie_ops = {
.write = rockchip_pcie_wr_conf,
};
+static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
+{
+ u32 status, curr, scale, power;
+
+ if (IS_ERR(rockchip->vpcie3v3))
+ return;
+
+ /*
+ * Set RC's captured slot power limit and scale if
+ * vpcie3v3 available. The default values are both zero
+ * which means the software should set these two according
+ * to the actual power supply.
+ */
+ curr = regulator_get_current_limit(rockchip->vpcie3v3);
+ if (curr > 0) {
+ scale = 3; /* 0.001x */
+ curr = curr / 1000; /* convert to mA */
+ power = (curr * 3300) / 1000; /* milliwatt */
+ while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
+ if (!scale) {
+ dev_warn(rockchip->dev, "invalid power supply\n");
+ return;
+ }
+ scale--;
+ power = power / 10;
+ }
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
+ status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
+ (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
+ }
+}
+
/**
* rockchip_pcie_init_port - Initialize hardware
* @rockchip: PCIe port information
@@ -429,26 +471,6 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
return err;
}
- udelay(10);
-
- err = reset_control_deassert(rockchip->pm_rst);
- if (err) {
- dev_err(dev, "deassert pm_rst err %d\n", err);
- return err;
- }
-
- err = reset_control_deassert(rockchip->aclk_rst);
- if (err) {
- dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
- return err;
- }
-
- err = reset_control_deassert(rockchip->pclk_rst);
- if (err) {
- dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
- return err;
- }
-
err = phy_init(rockchip->phy);
if (err < 0) {
dev_err(dev, "fail to init phy, err %d\n", err);
@@ -479,14 +501,40 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
return err;
}
+ udelay(10);
+
+ err = reset_control_deassert(rockchip->pm_rst);
+ if (err) {
+ dev_err(dev, "deassert pm_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_deassert(rockchip->aclk_rst);
+ if (err) {
+ dev_err(dev, "deassert aclk_rst err %d\n", err);
+ return err;
+ }
+
+ err = reset_control_deassert(rockchip->pclk_rst);
+ if (err) {
+ dev_err(dev, "deassert pclk_rst err %d\n", err);
+ return err;
+ }
+
+ if (rockchip->link_gen == 2)
+ rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_2,
+ PCIE_CLIENT_CONFIG);
+ else
+ rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1,
+ PCIE_CLIENT_CONFIG);
+
rockchip_pcie_write(rockchip,
PCIE_CLIENT_CONF_ENABLE |
PCIE_CLIENT_LINK_TRAIN_ENABLE |
PCIE_CLIENT_ARI_ENABLE |
PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes) |
- PCIE_CLIENT_MODE_RC |
- PCIE_CLIENT_GEN_SEL_2,
- PCIE_CLIENT_CONFIG);
+ PCIE_CLIENT_MODE_RC,
+ PCIE_CLIENT_CONFIG);
err = phy_power_on(rockchip->phy);
if (err) {
@@ -522,21 +570,19 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
return err;
}
- /*
- * We need to read/write PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 before
- * enabling ASPM. Otherwise L1PwrOnSc and L1PwrOnVal isn't
- * reliable and enabling ASPM doesn't work. This is a controller
- * bug we need to work around.
- */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2);
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2);
-
/* Fix the transmitted FTS count desired to exit from L0s. */
status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
- status = (status & PCIE_CORE_CTRL_PLC1_FTS_MASK) |
+ status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) |
(PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
+ rockchip_pcie_set_power_limit(rockchip);
+
+ /* Set RC's clock architecture as common clock */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCI_EXP_LNKCTL_CCC;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
/* Enable Gen1 training */
rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
PCIE_CLIENT_CONFIG);
@@ -563,35 +609,37 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
msleep(20);
}
- /*
- * Enable retrain for gen2. This should be configured only after
- * gen1 finished.
- */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= PCIE_RC_CONFIG_LCS_RETRAIN_LINK;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+ if (rockchip->link_gen == 2) {
+ /*
+ * Enable retrain for gen2. This should be configured only after
+ * gen1 finished.
+ */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCI_EXP_LNKCTL_RL;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+ timeout = jiffies + msecs_to_jiffies(500);
+ for (;;) {
+ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
+ if ((status & PCIE_CORE_PL_CONF_SPEED_MASK) ==
+ PCIE_CORE_PL_CONF_SPEED_5G) {
+ dev_dbg(dev, "PCIe link training gen2 pass!\n");
+ break;
+ }
- timeout = jiffies + msecs_to_jiffies(500);
- for (;;) {
- status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
- if ((status & PCIE_CORE_PL_CONF_SPEED_MASK) ==
- PCIE_CORE_PL_CONF_SPEED_5G) {
- dev_dbg(dev, "PCIe link training gen2 pass!\n");
- break;
- }
+ if (time_after(jiffies, timeout)) {
+ dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
+ break;
+ }
- if (time_after(jiffies, timeout)) {
- dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
- break;
+ msleep(20);
}
-
- msleep(20);
}
/* Check the final link width from negotiated lane counter from MGMT */
status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
- status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
- PCIE_CORE_PL_CONF_LANE_MASK);
+ status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
+ PCIE_CORE_PL_CONF_LANE_SHIFT);
dev_dbg(dev, "current link width is x%d\n", status);
rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
@@ -599,6 +647,12 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
rockchip_pcie_write(rockchip,
PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
PCIE_RC_CONFIG_RID_CCR);
+
+ /* Clear THP cap's next cap pointer to remove L1 substate cap */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP);
+ status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP);
+
rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
rockchip_pcie_write(rockchip,
@@ -794,6 +848,10 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
rockchip->lanes = 1;
}
+ rockchip->link_gen = of_pci_get_max_link_speed(node);
+ if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
+ rockchip->link_gen = 2;
+
rockchip->core_rst = devm_reset_control_get(dev, "core");
if (IS_ERR(rockchip->core_rst)) {
if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
@@ -1087,6 +1145,50 @@ static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
return 0;
}
+static int rockchip_cfg_atu(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->dev;
+ int offset;
+ int err;
+ int reg_no;
+
+ for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
+ err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
+ AXI_WRAPPER_MEM_WRITE,
+ 20 - 1,
+ rockchip->mem_bus_addr +
+ (reg_no << 20),
+ 0);
+ if (err) {
+ dev_err(dev, "program RC mem outbound ATU failed\n");
+ return err;
+ }
+ }
+
+ err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
+ if (err) {
+ dev_err(dev, "program RC mem inbound ATU failed\n");
+ return err;
+ }
+
+ offset = rockchip->mem_size >> 20;
+ for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) {
+ err = rockchip_pcie_prog_ob_atu(rockchip,
+ reg_no + 1 + offset,
+ AXI_WRAPPER_IO_WRITE,
+ 20 - 1,
+ rockchip->io_bus_addr +
+ (reg_no << 20),
+ 0);
+ if (err) {
+ dev_err(dev, "program RC io outbound ATU failed\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int rockchip_pcie_probe(struct platform_device *pdev)
{
struct rockchip_pcie *rockchip;
@@ -1096,13 +1198,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
resource_size_t io_base;
struct resource *mem;
struct resource *io;
- phys_addr_t io_bus_addr = 0;
- u32 io_size;
- phys_addr_t mem_bus_addr = 0;
- u32 mem_size = 0;
- int reg_no;
int err;
- int offset;
LIST_HEAD(res);
@@ -1169,14 +1265,13 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
goto err_vpcie;
/* Get the I/O and memory ranges from DT */
- io_size = 0;
resource_list_for_each_entry(win, &res) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
io = win->res;
io->name = "I/O";
- io_size = resource_size(io);
- io_bus_addr = io->start - win->offset;
+ rockchip->io_size = resource_size(io);
+ rockchip->io_bus_addr = io->start - win->offset;
err = pci_remap_iospace(io, io_base);
if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
@@ -1187,8 +1282,8 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
case IORESOURCE_MEM:
mem = win->res;
mem->name = "MEM";
- mem_size = resource_size(mem);
- mem_bus_addr = mem->start - win->offset;
+ rockchip->mem_size = resource_size(mem);
+ rockchip->mem_bus_addr = mem->start - win->offset;
break;
case IORESOURCE_BUS:
rockchip->root_bus_nr = win->res->start;
@@ -1198,45 +1293,9 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
}
}
- if (mem_size) {
- for (reg_no = 0; reg_no < (mem_size >> 20); reg_no++) {
- err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
- AXI_WRAPPER_MEM_WRITE,
- 20 - 1,
- mem_bus_addr +
- (reg_no << 20),
- 0);
- if (err) {
- dev_err(dev, "program RC mem outbound ATU failed\n");
- goto err_vpcie;
- }
- }
- }
-
- err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
- if (err) {
- dev_err(dev, "program RC mem inbound ATU failed\n");
+ err = rockchip_cfg_atu(rockchip);
+ if (err)
goto err_vpcie;
- }
-
- offset = mem_size >> 20;
-
- if (io_size) {
- for (reg_no = 0; reg_no < (io_size >> 20); reg_no++) {
- err = rockchip_pcie_prog_ob_atu(rockchip,
- reg_no + 1 + offset,
- AXI_WRAPPER_IO_WRITE,
- 20 - 1,
- io_bus_addr +
- (reg_no << 20),
- 0);
- if (err) {
- dev_err(dev, "program RC io outbound ATU failed\n");
- goto err_vpcie;
- }
- }
- }
-
bus = pci_scan_root_bus(&pdev->dev, 0, &rockchip_pcie_ops, rockchip, &res);
if (!bus) {
err = -ENOMEM;
@@ -1249,9 +1308,6 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
-
- dev_warn(dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
-
return err;
err_vpcie:
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index 3cf197ba7f37..dafe8b88d97d 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -296,8 +296,4 @@ static struct platform_driver spear13xx_pcie_driver = {
},
};
-static int __init spear13xx_pcie_init(void)
-{
- return platform_driver_register(&spear13xx_pcie_driver);
-}
-device_initcall(spear13xx_pcie_init);
+builtin_platform_driver(spear13xx_pcie_driver);
diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c
index 37e29b580be3..18ef1a93c10a 100644
--- a/drivers/pci/host/vmd.c
+++ b/drivers/pci/host/vmd.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/srcu.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
@@ -39,7 +40,6 @@ static DEFINE_RAW_SPINLOCK(list_lock);
/**
* struct vmd_irq - private data to map driver IRQ to the VMD shared vector
* @node: list item for parent traversal.
- * @rcu: RCU callback item for freeing.
* @irq: back pointer to parent.
* @enabled: true if driver enabled IRQ
* @virq: the virtual IRQ value provided to the requesting driver.
@@ -49,7 +49,6 @@ static DEFINE_RAW_SPINLOCK(list_lock);
*/
struct vmd_irq {
struct list_head node;
- struct rcu_head rcu;
struct vmd_irq_list *irq;
bool enabled;
unsigned int virq;
@@ -58,11 +57,13 @@ struct vmd_irq {
/**
* struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector
* @irq_list: the list of irq's the VMD one demuxes to.
+ * @srcu: SRCU struct for local synchronization.
* @count: number of child IRQs assigned to this vector; used to track
* sharing.
*/
struct vmd_irq_list {
struct list_head irq_list;
+ struct srcu_struct srcu;
unsigned int count;
};
@@ -224,14 +225,14 @@ static void vmd_msi_free(struct irq_domain *domain,
struct vmd_irq *vmdirq = irq_get_chip_data(virq);
unsigned long flags;
- synchronize_rcu();
+ synchronize_srcu(&vmdirq->irq->srcu);
/* XXX: Potential optimization to rebalance */
raw_spin_lock_irqsave(&list_lock, flags);
vmdirq->irq->count--;
raw_spin_unlock_irqrestore(&list_lock, flags);
- kfree_rcu(vmdirq, rcu);
+ kfree(vmdirq);
}
static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
@@ -646,11 +647,12 @@ static irqreturn_t vmd_irq(int irq, void *data)
{
struct vmd_irq_list *irqs = data;
struct vmd_irq *vmdirq;
+ int idx;
- rcu_read_lock();
+ idx = srcu_read_lock(&irqs->srcu);
list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
generic_handle_irq(vmdirq->virq);
- rcu_read_unlock();
+ srcu_read_unlock(&irqs->srcu, idx);
return IRQ_HANDLED;
}
@@ -696,6 +698,10 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENOMEM;
for (i = 0; i < vmd->msix_count; i++) {
+ err = init_srcu_struct(&vmd->irqs[i].srcu);
+ if (err)
+ return err;
+
INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
vmd_irq, 0, "vmd", &vmd->irqs[i]);
@@ -714,12 +720,20 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
return 0;
}
+static void vmd_cleanup_srcu(struct vmd_dev *vmd)
+{
+ int i;
+
+ for (i = 0; i < vmd->msix_count; i++)
+ cleanup_srcu_struct(&vmd->irqs[i].srcu);
+}
+
static void vmd_remove(struct pci_dev *dev)
{
struct vmd_dev *vmd = pci_get_drvdata(dev);
vmd_detach_resources(vmd);
- pci_set_drvdata(dev, NULL);
+ vmd_cleanup_srcu(vmd);
sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_stop_root_bus(vmd->bus);
pci_remove_root_bus(vmd->bus);
@@ -727,7 +741,7 @@ static void vmd_remove(struct pci_dev *dev)
irq_domain_remove(vmd->irq_domain);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int vmd_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index a46b585fae31..5ed2dcaa8e27 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -222,35 +222,6 @@ static void acpiphp_post_dock_fixup(struct acpi_device *adev)
acpiphp_let_context_go(context);
}
-/* Check whether the PCI device is managed by native PCIe hotplug driver */
-static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
-{
- u32 reg32;
- acpi_handle tmp;
- struct acpi_pci_root *root;
-
- /* Check whether the PCIe port supports native PCIe hotplug */
- if (pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32))
- return false;
- if (!(reg32 & PCI_EXP_SLTCAP_HPC))
- return false;
-
- /*
- * Check whether native PCIe hotplug has been enabled for
- * this PCIe hierarchy.
- */
- tmp = acpi_find_root_bridge_handle(pdev);
- if (!tmp)
- return false;
- root = acpi_pci_find_root(tmp);
- if (!root)
- return false;
- if (!(root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
- return false;
-
- return true;
-}
-
/**
* acpiphp_add_context - Add ACPIPHP context to an ACPI device object.
* @handle: ACPI handle of the object to add a context to.
@@ -334,7 +305,7 @@ static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
* expose slots to user space in those cases.
*/
if ((acpi_pci_check_ejectable(pbus, handle) || is_dock_device(adev))
- && !(pdev && device_is_managed_by_native_pciehp(pdev))) {
+ && !(pdev && pdev->is_hotplug_bridge && pciehp_is_native(pdev))) {
unsigned long long sun;
int retval;
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 74f3a0695b43..ec009a7dba20 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -867,7 +867,8 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) {
err(msg_HPC_not_supported);
- return -ENODEV;
+ rc = -ENODEV;
+ goto err_disable_device;
}
/* TODO: This code can be made to support non-Compaq or Intel
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index fea0b8b33589..56013d0daf7f 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -23,6 +23,9 @@
*
* Send feedback to <kristen.c.accardi@intel.com>
*
+ * Authors:
+ * Greg Kroah-Hartman <greg@kroah.com>
+ * Scott Murray <scottm@somanetworks.com>
*/
#include <linux/module.h> /* try_module_get & module_put */
@@ -50,15 +53,9 @@
#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME, ## arg)
#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME, ## arg)
-
/* local variables */
static bool debug;
-#define DRIVER_VERSION "0.5"
-#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Scott Murray <scottm@somanetworks.com>"
-#define DRIVER_DESC "PCI Hot Plug PCI Core"
-
-
static LIST_HEAD(pci_hotplug_slot_list);
static DEFINE_MUTEX(pci_hp_mutex);
@@ -534,7 +531,6 @@ static int __init pci_hotplug_init(void)
return result;
}
- info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
return result;
}
device_initcall(pci_hotplug_init);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 7d32fa33dcef..35d84845d5af 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -25,6 +25,10 @@
*
* Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
*
+ * Authors:
+ * Dan Zink <dan.zink@compaq.com>
+ * Greg Kroah-Hartman <greg@kroah.com>
+ * Dely Sy <dely.l.sy@intel.com>"
*/
#include <linux/moduleparam.h>
@@ -42,10 +46,6 @@ bool pciehp_poll_mode;
int pciehp_poll_time;
static bool pciehp_force;
-#define DRIVER_VERSION "0.4"
-#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
-#define DRIVER_DESC "PCI Express Hot Plug Controller Driver"
-
/*
* not really modular, but the easiest way to keep compat with existing
* bootargs behaviour is to continue using module_param here.
@@ -333,7 +333,6 @@ static int __init pcied_init(void)
retval = pcie_port_service_register(&hpdriver_portdrv);
dbg("pcie_port_service_register = %d\n", retval);
- info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
if (retval)
dbg("Failure to register service\n");
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index efe69e879455..10c9c0ba8ff2 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -31,6 +31,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <linux/pci.h>
#include "../pci.h"
#include "pciehp.h"
@@ -98,6 +99,7 @@ static int board_added(struct slot *p_slot)
pciehp_green_led_blink(p_slot);
/* Check link training status */
+ pm_runtime_get_sync(&ctrl->pcie->port->dev);
retval = pciehp_check_link_status(ctrl);
if (retval) {
ctrl_err(ctrl, "Failed to check link status\n");
@@ -118,12 +120,14 @@ static int board_added(struct slot *p_slot)
if (retval != -EEXIST)
goto err_exit;
}
+ pm_runtime_put(&ctrl->pcie->port->dev);
pciehp_green_led_on(p_slot);
pciehp_set_attention_status(p_slot, 0);
return 0;
err_exit:
+ pm_runtime_put(&ctrl->pcie->port->dev);
set_slot_off(ctrl, p_slot);
return retval;
}
@@ -137,7 +141,9 @@ static int remove_board(struct slot *p_slot)
int retval;
struct controller *ctrl = p_slot->ctrl;
+ pm_runtime_get_sync(&ctrl->pcie->port->dev);
retval = pciehp_unconfigure_device(p_slot);
+ pm_runtime_put(&ctrl->pcie->port->dev);
if (retval)
return retval;
@@ -410,7 +416,7 @@ int pciehp_enable_slot(struct slot *p_slot)
if (getstatus) {
ctrl_info(ctrl, "Slot(%s): Already enabled\n",
slot_name(p_slot));
- return -EINVAL;
+ return 0;
}
}
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index b57fc6d6e28a..026830a138ae 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -620,8 +620,18 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
pciehp_queue_interrupt_event(slot, INT_BUTTON_PRESS);
}
- /* Check Presence Detect Changed */
- if (events & PCI_EXP_SLTSTA_PDC) {
+ /*
+ * Check Link Status Changed at higher precedence than Presence
+ * Detect Changed. The PDS value may be set to "card present" from
+ * out-of-band detection, which may be in conflict with a Link Down
+ * and cause the wrong event to queue.
+ */
+ if (events & PCI_EXP_SLTSTA_DLLSC) {
+ ctrl_info(ctrl, "Slot(%s): Link %s\n", slot_name(slot),
+ link ? "Up" : "Down");
+ pciehp_queue_interrupt_event(slot, link ? INT_LINK_UP :
+ INT_LINK_DOWN);
+ } else if (events & PCI_EXP_SLTSTA_PDC) {
present = !!(status & PCI_EXP_SLTSTA_PDS);
ctrl_info(ctrl, "Slot(%s): Card %spresent\n", slot_name(slot),
present ? "" : "not ");
@@ -636,13 +646,6 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
pciehp_queue_interrupt_event(slot, INT_POWER_FAULT);
}
- if (events & PCI_EXP_SLTSTA_DLLSC) {
- ctrl_info(ctrl, "Slot(%s): Link %s\n", slot_name(slot),
- link ? "Up" : "Down");
- pciehp_queue_interrupt_event(slot, link ? INT_LINK_UP :
- INT_LINK_DOWN);
- }
-
return IRQ_HANDLED;
}
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 50b8b7d54416..530d0e49f2ed 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -5,12 +5,13 @@
*
* Author(s):
* Jan Glauber <jang@linux.vnet.ibm.com>
+ *
+ * License: GPL
*/
#define KMSG_COMPONENT "zpci"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pci.h>
@@ -21,10 +22,6 @@
#define SLOT_NAME_SIZE 10
static LIST_HEAD(s390_hotplug_slot_list);
-MODULE_AUTHOR("Jan Glauber <jang@linux.vnet.ibm.com");
-MODULE_DESCRIPTION("Hot Plug PCI Controller for System z");
-MODULE_LICENSE("GPL");
-
static int zpci_fn_configured(enum zpci_state state)
{
return state == ZPCI_FN_STATE_CONFIGURED ||
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index e30f05c8517f..47227820406d 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -306,13 +306,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return rc;
}
- pci_iov_set_numvfs(dev, nr_virtfn);
- iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
- pci_cfg_access_lock(dev);
- pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
- msleep(100);
- pci_cfg_access_unlock(dev);
-
iov->initial_VFs = initial;
if (nr_virtfn < initial)
initial = nr_virtfn;
@@ -323,6 +316,13 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
goto err_pcibios;
}
+ pci_iov_set_numvfs(dev, nr_virtfn);
+ iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
+ pci_cfg_access_lock(dev);
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+ msleep(100);
+ pci_cfg_access_unlock(dev);
+
for (i = 0; i < initial; i++) {
rc = pci_iov_add_virtfn(dev, i, 0);
if (rc)
@@ -554,21 +554,61 @@ void pci_iov_release(struct pci_dev *dev)
}
/**
- * pci_iov_resource_bar - get position of the SR-IOV BAR
+ * pci_iov_update_resource - update a VF BAR
* @dev: the PCI device
* @resno: the resource number
*
- * Returns position of the BAR encapsulated in the SR-IOV capability.
+ * Update a VF BAR in the SR-IOV capability of a PF.
*/
-int pci_iov_resource_bar(struct pci_dev *dev, int resno)
+void pci_iov_update_resource(struct pci_dev *dev, int resno)
{
- if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
- return 0;
+ struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
+ struct resource *res = dev->resource + resno;
+ int vf_bar = resno - PCI_IOV_RESOURCES;
+ struct pci_bus_region region;
+ u16 cmd;
+ u32 new;
+ int reg;
+
+ /*
+ * The generic pci_restore_bars() path calls this for all devices,
+ * including VFs and non-SR-IOV devices. If this is not a PF, we
+ * have nothing to do.
+ */
+ if (!iov)
+ return;
+
+ pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd);
+ if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) {
+ dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n",
+ vf_bar, res);
+ return;
+ }
+
+ /*
+ * Ignore unimplemented BARs, unused resource slots for 64-bit
+ * BARs, and non-movable resources, e.g., those described via
+ * Enhanced Allocation.
+ */
+ if (!res->flags)
+ return;
+
+ if (res->flags & IORESOURCE_UNSET)
+ return;
+
+ if (res->flags & IORESOURCE_PCI_FIXED)
+ return;
- BUG_ON(!dev->is_physfn);
+ pcibios_resource_to_bus(dev->bus, &region, res);
+ new = region.start;
+ new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
- return dev->sriov->pos + PCI_SRIOV_BAR +
- 4 * (resno - PCI_IOV_RESOURCES);
+ reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar;
+ pci_write_config_dword(dev, reg, new);
+ if (res->flags & IORESOURCE_MEM_64) {
+ new = region.start >> 16 >> 16;
+ pci_write_config_dword(dev, reg + 4, new);
+ }
}
resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev,
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index ad70507cfb56..50c5003295ca 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -551,14 +551,14 @@ error_attrs:
}
static struct msi_desc *
-msi_setup_entry(struct pci_dev *dev, int nvec, bool affinity)
+msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd)
{
struct cpumask *masks = NULL;
struct msi_desc *entry;
u16 control;
- if (affinity) {
- masks = irq_create_affinity_masks(dev->irq_affinity, nvec);
+ if (affd) {
+ masks = irq_create_affinity_masks(nvec, affd);
if (!masks)
pr_err("Unable to allocate affinity masks, ignoring\n");
}
@@ -618,7 +618,8 @@ static int msi_verify_entries(struct pci_dev *dev)
* an error, and a positive return value indicates the number of interrupts
* which could have been allocated.
*/
-static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity)
+static int msi_capability_init(struct pci_dev *dev, int nvec,
+ const struct irq_affinity *affd)
{
struct msi_desc *entry;
int ret;
@@ -626,7 +627,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity)
pci_msi_set_enable(dev, 0); /* Disable MSI during set up */
- entry = msi_setup_entry(dev, nvec, affinity);
+ entry = msi_setup_entry(dev, nvec, affd);
if (!entry)
return -ENOMEM;
@@ -690,14 +691,14 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
struct msix_entry *entries, int nvec,
- bool affinity)
+ const struct irq_affinity *affd)
{
struct cpumask *curmsk, *masks = NULL;
struct msi_desc *entry;
int ret, i;
- if (affinity) {
- masks = irq_create_affinity_masks(dev->irq_affinity, nvec);
+ if (affd) {
+ masks = irq_create_affinity_masks(nvec, affd);
if (!masks)
pr_err("Unable to allocate affinity masks, ignoring\n");
}
@@ -753,14 +754,14 @@ static void msix_program_entries(struct pci_dev *dev,
* @dev: pointer to the pci_dev data structure of MSI-X device function
* @entries: pointer to an array of struct msix_entry entries
* @nvec: number of @entries
- * @affinity: flag to indicate cpu irq affinity mask should be set
+ * @affd: Optional pointer to enable automatic affinity assignement
*
* Setup the MSI-X capability structure of device function with a
* single MSI-X irq. A return of zero indicates the successful setup of
* requested MSI-X entries with allocated irqs or non-zero for otherwise.
**/
static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
- int nvec, bool affinity)
+ int nvec, const struct irq_affinity *affd)
{
int ret;
u16 control;
@@ -775,7 +776,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
if (!base)
return -ENOMEM;
- ret = msix_setup_entries(dev, base, entries, nvec, affinity);
+ ret = msix_setup_entries(dev, base, entries, nvec, affd);
if (ret)
return ret;
@@ -956,7 +957,7 @@ int pci_msix_vec_count(struct pci_dev *dev)
EXPORT_SYMBOL(pci_msix_vec_count);
static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
- int nvec, bool affinity)
+ int nvec, const struct irq_affinity *affd)
{
int nr_entries;
int i, j;
@@ -988,7 +989,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n");
return -EINVAL;
}
- return msix_capability_init(dev, entries, nvec, affinity);
+ return msix_capability_init(dev, entries, nvec, affd);
}
/**
@@ -1008,7 +1009,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
**/
int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
{
- return __pci_enable_msix(dev, entries, nvec, false);
+ return __pci_enable_msix(dev, entries, nvec, NULL);
}
EXPORT_SYMBOL(pci_enable_msix);
@@ -1059,9 +1060,8 @@ int pci_msi_enabled(void)
EXPORT_SYMBOL(pci_msi_enabled);
static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
- unsigned int flags)
+ const struct irq_affinity *affd)
{
- bool affinity = flags & PCI_IRQ_AFFINITY;
int nvec;
int rc;
@@ -1090,14 +1090,13 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
nvec = maxvec;
for (;;) {
- if (affinity) {
- nvec = irq_calc_affinity_vectors(dev->irq_affinity,
- nvec);
+ if (affd) {
+ nvec = irq_calc_affinity_vectors(nvec, affd);
if (nvec < minvec)
return -ENOSPC;
}
- rc = msi_capability_init(dev, nvec, affinity);
+ rc = msi_capability_init(dev, nvec, affd);
if (rc == 0)
return nvec;
@@ -1124,29 +1123,27 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
**/
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
{
- return __pci_enable_msi_range(dev, minvec, maxvec, 0);
+ return __pci_enable_msi_range(dev, minvec, maxvec, NULL);
}
EXPORT_SYMBOL(pci_enable_msi_range);
static int __pci_enable_msix_range(struct pci_dev *dev,
- struct msix_entry *entries, int minvec, int maxvec,
- unsigned int flags)
+ struct msix_entry *entries, int minvec,
+ int maxvec, const struct irq_affinity *affd)
{
- bool affinity = flags & PCI_IRQ_AFFINITY;
int rc, nvec = maxvec;
if (maxvec < minvec)
return -ERANGE;
for (;;) {
- if (affinity) {
- nvec = irq_calc_affinity_vectors(dev->irq_affinity,
- nvec);
+ if (affd) {
+ nvec = irq_calc_affinity_vectors(nvec, affd);
if (nvec < minvec)
return -ENOSPC;
}
- rc = __pci_enable_msix(dev, entries, nvec, affinity);
+ rc = __pci_enable_msix(dev, entries, nvec, affd);
if (rc == 0)
return nvec;
@@ -1177,16 +1174,17 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
int minvec, int maxvec)
{
- return __pci_enable_msix_range(dev, entries, minvec, maxvec, 0);
+ return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL);
}
EXPORT_SYMBOL(pci_enable_msix_range);
/**
- * pci_alloc_irq_vectors - allocate multiple IRQs for a device
+ * pci_alloc_irq_vectors_affinity - allocate multiple IRQs for a device
* @dev: PCI device to operate on
* @min_vecs: minimum number of vectors required (must be >= 1)
* @max_vecs: maximum (desired) number of vectors
* @flags: flags or quirks for the allocation
+ * @affd: optional description of the affinity requirements
*
* Allocate up to @max_vecs interrupt vectors for @dev, using MSI-X or MSI
* vectors if available, and fall back to a single legacy vector
@@ -1198,20 +1196,30 @@ EXPORT_SYMBOL(pci_enable_msix_range);
* To get the Linux IRQ number used for a vector that can be passed to
* request_irq() use the pci_irq_vector() helper.
*/
-int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
- unsigned int max_vecs, unsigned int flags)
+int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags,
+ const struct irq_affinity *affd)
{
+ static const struct irq_affinity msi_default_affd;
int vecs = -ENOSPC;
+ if (flags & PCI_IRQ_AFFINITY) {
+ if (!affd)
+ affd = &msi_default_affd;
+ } else {
+ if (WARN_ON(affd))
+ affd = NULL;
+ }
+
if (flags & PCI_IRQ_MSIX) {
vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
- flags);
+ affd);
if (vecs > 0)
return vecs;
}
if (flags & PCI_IRQ_MSI) {
- vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags);
+ vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd);
if (vecs > 0)
return vecs;
}
@@ -1224,7 +1232,7 @@ int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
return vecs;
}
-EXPORT_SYMBOL(pci_alloc_irq_vectors);
+EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity);
/**
* pci_free_irq_vectors - free previously allocated IRQs for a device
@@ -1294,7 +1302,8 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
} else if (dev->msi_enabled) {
struct msi_desc *entry = first_pci_msi_entry(dev);
- if (WARN_ON_ONCE(!entry || nr >= entry->nvec_used))
+ if (WARN_ON_ONCE(!entry || !entry->affinity ||
+ nr >= entry->nvec_used))
return NULL;
return &entry->affinity[nr];
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index d966d47c9e80..001860361434 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -29,6 +29,82 @@ const u8 pci_acpi_dsm_uuid[] = {
0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d
};
+#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
+static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res)
+{
+ struct device *dev = &adev->dev;
+ struct resource_entry *entry;
+ struct list_head list;
+ unsigned long flags;
+ int ret;
+
+ INIT_LIST_HEAD(&list);
+ flags = IORESOURCE_MEM;
+ ret = acpi_dev_get_resources(adev, &list,
+ acpi_dev_filter_resource_type_cb,
+ (void *) flags);
+ if (ret < 0) {
+ dev_err(dev, "failed to parse _CRS method, error code %d\n",
+ ret);
+ return ret;
+ }
+
+ if (ret == 0) {
+ dev_err(dev, "no IO and memory resources present in _CRS\n");
+ return -EINVAL;
+ }
+
+ entry = list_first_entry(&list, struct resource_entry, node);
+ *res = *entry->res;
+ acpi_dev_free_resource_list(&list);
+ return 0;
+}
+
+static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context,
+ void **retval)
+{
+ u16 *segment = context;
+ unsigned long long uid;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
+ if (ACPI_FAILURE(status) || uid != *segment)
+ return AE_CTRL_DEPTH;
+
+ *(acpi_handle *)retval = handle;
+ return AE_CTRL_TERMINATE;
+}
+
+int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
+ struct resource *res)
+{
+ struct acpi_device *adev;
+ acpi_status status;
+ acpi_handle handle;
+ int ret;
+
+ status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "can't find _HID %s device to locate resources\n",
+ hid);
+ return -ENODEV;
+ }
+
+ ret = acpi_bus_get_device(handle, &adev);
+ if (ret)
+ return ret;
+
+ ret = acpi_get_rc_addr(adev, res);
+ if (ret) {
+ dev_err(dev, "can't get resource from %s\n",
+ dev_name(&adev->dev));
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
{
acpi_status status = AE_NOT_EXIST;
@@ -294,6 +370,30 @@ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
EXPORT_SYMBOL_GPL(pci_get_hp_params);
/**
+ * pciehp_is_native - Check whether a hotplug port is handled by the OS
+ * @pdev: Hotplug port to check
+ *
+ * Walk up from @pdev to the host bridge, obtain its cached _OSC Control Field
+ * and return the value of the "PCI Express Native Hot Plug control" bit.
+ * On failure to obtain the _OSC Control Field return %false.
+ */
+bool pciehp_is_native(struct pci_dev *pdev)
+{
+ struct acpi_pci_root *root;
+ acpi_handle handle;
+
+ handle = acpi_find_root_bridge_handle(pdev);
+ if (!handle)
+ return false;
+
+ root = acpi_pci_find_root(handle);
+ if (!root)
+ return false;
+
+ return root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL;
+}
+
+/**
* pci_acpi_wake_bus - Root bus wakeup notification fork function.
* @work: Work item to handle.
*/
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
index 55f453de562e..1c4af7227bca 100644
--- a/drivers/pci/pci-mid.c
+++ b/drivers/pci/pci-mid.c
@@ -29,6 +29,11 @@ static int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
return intel_mid_pci_set_power_state(pdev, state);
}
+static pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
+{
+ return intel_mid_pci_get_power_state(pdev);
+}
+
static pci_power_t mid_pci_choose_state(struct pci_dev *pdev)
{
return PCI_D3hot;
@@ -49,9 +54,10 @@ static bool mid_pci_need_resume(struct pci_dev *dev)
return false;
}
-static struct pci_platform_pm_ops mid_pci_platform_pm = {
+static const struct pci_platform_pm_ops mid_pci_platform_pm = {
.is_manageable = mid_pci_power_manageable,
.set_state = mid_pci_set_power_state,
+ .get_state = mid_pci_get_power_state,
.choose_state = mid_pci_choose_state,
.sleep_wake = mid_pci_sleep_wake,
.run_wake = mid_pci_run_wake,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index bcd10c795284..066628776e1b 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -50,6 +50,7 @@ pci_config_attr(vendor, "0x%04x\n");
pci_config_attr(device, "0x%04x\n");
pci_config_attr(subsystem_vendor, "0x%04x\n");
pci_config_attr(subsystem_device, "0x%04x\n");
+pci_config_attr(revision, "0x%02x\n");
pci_config_attr(class, "0x%06x\n");
pci_config_attr(irq, "%u\n");
@@ -568,6 +569,7 @@ static struct attribute *pci_dev_attrs[] = {
&dev_attr_device.attr,
&dev_attr_subsystem_vendor.attr,
&dev_attr_subsystem_device.attr,
+ &dev_attr_revision.attr,
&dev_attr_class.attr,
&dev_attr_irq.attr,
&dev_attr_local_cpus.attr,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index ba34907538f6..a881c0d3d2e8 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -564,10 +564,6 @@ static void pci_restore_bars(struct pci_dev *dev)
{
int i;
- /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
- if (dev->is_virtfn)
- return;
-
for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
pci_update_resource(dev, i);
}
@@ -2106,6 +2102,10 @@ bool pci_dev_run_wake(struct pci_dev *dev)
if (!dev->pme_support)
return false;
+ /* PME-capable in principle, but not from the intended sleep state */
+ if (!pci_pme_capable(dev, pci_target_state(dev)))
+ return false;
+
while (bus->parent) {
struct pci_dev *bridge = bus->self;
@@ -2226,7 +2226,7 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev)
* This function checks if it is possible to move the bridge to D3.
* Currently we only allow D3 for recent enough PCIe ports.
*/
-static bool pci_bridge_d3_possible(struct pci_dev *bridge)
+bool pci_bridge_d3_possible(struct pci_dev *bridge)
{
unsigned int year;
@@ -2239,6 +2239,14 @@ static bool pci_bridge_d3_possible(struct pci_dev *bridge)
case PCI_EXP_TYPE_DOWNSTREAM:
if (pci_bridge_d3_disable)
return false;
+
+ /*
+ * Hotplug ports handled by firmware in System Management Mode
+ * may not be put into D3 by the OS (Thunderbolt on non-Macs).
+ */
+ if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
+ return false;
+
if (pci_bridge_d3_force)
return true;
@@ -2259,32 +2267,36 @@ static bool pci_bridge_d3_possible(struct pci_dev *bridge)
static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
{
bool *d3cold_ok = data;
- bool no_d3cold;
- /*
- * The device needs to be allowed to go D3cold and if it is wake
- * capable to do so from D3cold.
- */
- no_d3cold = dev->no_d3cold || !dev->d3cold_allowed ||
- (device_may_wakeup(&dev->dev) && !pci_pme_capable(dev, PCI_D3cold)) ||
- !pci_power_manageable(dev);
+ if (/* The device needs to be allowed to go D3cold ... */
+ dev->no_d3cold || !dev->d3cold_allowed ||
- *d3cold_ok = !no_d3cold;
+ /* ... and if it is wakeup capable to do so from D3cold. */
+ (device_may_wakeup(&dev->dev) &&
+ !pci_pme_capable(dev, PCI_D3cold)) ||
- return no_d3cold;
+ /* If it is a bridge it must be allowed to go to D3. */
+ !pci_power_manageable(dev) ||
+
+ /* Hotplug interrupts cannot be delivered if the link is down. */
+ dev->is_hotplug_bridge)
+
+ *d3cold_ok = false;
+
+ return !*d3cold_ok;
}
/*
* pci_bridge_d3_update - Update bridge D3 capabilities
* @dev: PCI device which is changed
- * @remove: Is the device being removed
*
* Update upstream bridge PM capabilities accordingly depending on if the
* device PM configuration was changed or the device is being removed. The
* change is also propagated upstream.
*/
-static void pci_bridge_d3_update(struct pci_dev *dev, bool remove)
+void pci_bridge_d3_update(struct pci_dev *dev)
{
+ bool remove = !device_is_registered(&dev->dev);
struct pci_dev *bridge;
bool d3cold_ok = true;
@@ -2292,55 +2304,39 @@ static void pci_bridge_d3_update(struct pci_dev *dev, bool remove)
if (!bridge || !pci_bridge_d3_possible(bridge))
return;
- pci_dev_get(bridge);
/*
- * If the device is removed we do not care about its D3cold
- * capabilities.
+ * If D3 is currently allowed for the bridge, removing one of its
+ * children won't change that.
+ */
+ if (remove && bridge->bridge_d3)
+ return;
+
+ /*
+ * If D3 is currently allowed for the bridge and a child is added or
+ * changed, disallowance of D3 can only be caused by that child, so
+ * we only need to check that single device, not any of its siblings.
+ *
+ * If D3 is currently not allowed for the bridge, checking the device
+ * first may allow us to skip checking its siblings.
*/
if (!remove)
pci_dev_check_d3cold(dev, &d3cold_ok);
- if (d3cold_ok) {
- /*
- * We need to go through all children to find out if all of
- * them can still go to D3cold.
- */
+ /*
+ * If D3 is currently not allowed for the bridge, this may be caused
+ * either by the device being changed/removed or any of its siblings,
+ * so we need to go through all children to find out if one of them
+ * continues to block D3.
+ */
+ if (d3cold_ok && !bridge->bridge_d3)
pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
&d3cold_ok);
- }
if (bridge->bridge_d3 != d3cold_ok) {
bridge->bridge_d3 = d3cold_ok;
/* Propagate change to upstream bridges */
- pci_bridge_d3_update(bridge, false);
+ pci_bridge_d3_update(bridge);
}
-
- pci_dev_put(bridge);
-}
-
-/**
- * pci_bridge_d3_device_changed - Update bridge D3 capabilities on change
- * @dev: PCI device that was changed
- *
- * If a device is added or its PM configuration, such as is it allowed to
- * enter D3cold, is changed this function updates upstream bridge PM
- * capabilities accordingly.
- */
-void pci_bridge_d3_device_changed(struct pci_dev *dev)
-{
- pci_bridge_d3_update(dev, false);
-}
-
-/**
- * pci_bridge_d3_device_removed - Update bridge D3 capabilities on remove
- * @dev: PCI device being removed
- *
- * Function updates upstream bridge PM capabilities based on other devices
- * still left on the bus.
- */
-void pci_bridge_d3_device_removed(struct pci_dev *dev)
-{
- pci_bridge_d3_update(dev, true);
}
/**
@@ -2355,7 +2351,7 @@ void pci_d3cold_enable(struct pci_dev *dev)
{
if (dev->no_d3cold) {
dev->no_d3cold = false;
- pci_bridge_d3_device_changed(dev);
+ pci_bridge_d3_update(dev);
}
}
EXPORT_SYMBOL_GPL(pci_d3cold_enable);
@@ -2372,7 +2368,7 @@ void pci_d3cold_disable(struct pci_dev *dev)
{
if (!dev->no_d3cold) {
dev->no_d3cold = true;
- pci_bridge_d3_device_changed(dev);
+ pci_bridge_d3_update(dev);
}
}
EXPORT_SYMBOL_GPL(pci_d3cold_disable);
@@ -4831,36 +4827,6 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags)
}
EXPORT_SYMBOL(pci_select_bars);
-/**
- * pci_resource_bar - get position of the BAR associated with a resource
- * @dev: the PCI device
- * @resno: the resource number
- * @type: the BAR type to be filled in
- *
- * Returns BAR position in config space, or 0 if the BAR is invalid.
- */
-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
-{
- int reg;
-
- if (resno < PCI_ROM_RESOURCE) {
- *type = pci_bar_unknown;
- return PCI_BASE_ADDRESS_0 + 4 * resno;
- } else if (resno == PCI_ROM_RESOURCE) {
- *type = pci_bar_mem32;
- return dev->rom_base_reg;
- } else if (resno < PCI_BRIDGE_RESOURCES) {
- /* device specific resource */
- *type = pci_bar_unknown;
- reg = pci_iov_resource_bar(dev, resno);
- if (reg)
- return reg;
- }
-
- dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
- return 0;
-}
-
/* Some architectures require additional programming to enable VGA */
static arch_set_vga_state_t arch_set_vga_state;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 451856210e18..cb17db242f30 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -1,9 +1,6 @@
#ifndef DRIVERS_PCI_H
#define DRIVERS_PCI_H
-#define PCI_CFG_SPACE_SIZE 256
-#define PCI_CFG_SPACE_EXP_SIZE 4096
-
#define PCI_FIND_CAP_TTL 48
extern const unsigned char pcie_link_speed[];
@@ -85,8 +82,8 @@ void pci_pm_init(struct pci_dev *dev);
void pci_ea_init(struct pci_dev *dev);
void pci_allocate_cap_save_buffers(struct pci_dev *dev);
void pci_free_cap_save_buffers(struct pci_dev *dev);
-void pci_bridge_d3_device_changed(struct pci_dev *dev);
-void pci_bridge_d3_device_removed(struct pci_dev *dev);
+bool pci_bridge_d3_possible(struct pci_dev *dev);
+void pci_bridge_d3_update(struct pci_dev *dev);
static inline void pci_wakeup_event(struct pci_dev *dev)
{
@@ -245,7 +242,6 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
int pci_setup_device(struct pci_dev *dev);
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct resource *res, unsigned int reg);
-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type);
void pci_configure_ari(struct pci_dev *dev);
void __pci_bus_size_bridges(struct pci_bus *bus,
struct list_head *realloc_head);
@@ -289,7 +285,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev)
#ifdef CONFIG_PCI_IOV
int pci_iov_init(struct pci_dev *dev);
void pci_iov_release(struct pci_dev *dev);
-int pci_iov_resource_bar(struct pci_dev *dev, int resno);
+void pci_iov_update_resource(struct pci_dev *dev, int resno);
resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
void pci_restore_iov_state(struct pci_dev *dev);
int pci_iov_bus_range(struct pci_bus *bus);
@@ -303,10 +299,6 @@ static inline void pci_iov_release(struct pci_dev *dev)
{
}
-static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno)
-{
- return 0;
-}
static inline void pci_restore_iov_state(struct pci_dev *dev)
{
}
@@ -356,4 +348,9 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
}
#endif
+#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
+int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
+ struct resource *res);
+#endif
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index db553dc22c8e..2b6a59266689 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -307,20 +307,6 @@ out:
return 0;
}
-static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
-{
- while (1) {
- if (!pci_is_pcie(dev))
- break;
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
- return dev;
- if (!dev->bus->self)
- break;
- dev = dev->bus->self;
- }
- return NULL;
-}
-
static int find_aer_device_iter(struct device *device, void *data)
{
struct pcie_device **result = data;
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 139150b2bdfd..dea186a9d6b6 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -30,13 +30,6 @@
#include "aerdrv.h"
#include "../../pci.h"
-/*
- * Version Information
- */
-#define DRIVER_VERSION "v1.0"
-#define DRIVER_AUTHOR "tom.l.nguyen@intel.com"
-#define DRIVER_DESC "Root Port Advanced Error Reporting Driver"
-
static int aer_probe(struct pcie_device *dev);
static void aer_remove(struct pcie_device *dev);
static pci_ers_result_t aer_error_detected(struct pci_dev *dev,
@@ -297,12 +290,12 @@ static int aer_probe(struct pcie_device *dev)
{
int status;
struct aer_rpc *rpc;
- struct device *device = &dev->device;
+ struct device *device = &dev->port->dev;
/* Alloc rpc data structure */
rpc = aer_alloc_rpc(dev);
if (!rpc) {
- dev_printk(KERN_DEBUG, device, "alloc rpc failed\n");
+ dev_printk(KERN_DEBUG, device, "alloc AER rpc failed\n");
aer_remove(dev);
return -ENOMEM;
}
@@ -310,7 +303,8 @@ static int aer_probe(struct pcie_device *dev)
/* Request IRQ ISR */
status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", dev);
if (status) {
- dev_printk(KERN_DEBUG, device, "request IRQ failed\n");
+ dev_printk(KERN_DEBUG, device, "request AER IRQ %d failed\n",
+ dev->irq);
aer_remove(dev);
return status;
}
@@ -318,8 +312,8 @@ static int aer_probe(struct pcie_device *dev)
rpc->isr = 1;
aer_enable_rootport(rpc);
-
- return status;
+ dev_info(device, "AER enabled with IRQ %d\n", dev->irq);
+ return 0;
}
/**
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 0ec649d961d7..17ac1dce3286 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -351,12 +351,26 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
return;
}
+ /* Get upstream/downstream components' register state */
+ pcie_get_aspm_reg(parent, &upreg);
+ child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
+ pcie_get_aspm_reg(child, &dwreg);
+
+ /*
+ * If ASPM not supported, don't mess with the clocks and link,
+ * bail out now.
+ */
+ if (!(upreg.support & dwreg.support))
+ return;
+
/* Configure common clock before checking latencies */
pcie_aspm_configure_common_clock(link);
- /* Get upstream/downstream components' register state */
+ /*
+ * Re-read upstream/downstream components' register state
+ * after clock configuration
+ */
pcie_get_aspm_reg(parent, &upreg);
- child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
pcie_get_aspm_reg(child, &dwreg);
/*
@@ -886,8 +900,8 @@ static ssize_t clk_ctl_store(struct device *dev,
return n;
}
-static DEVICE_ATTR(link_state, 0644, link_state_show, link_state_store);
-static DEVICE_ATTR(clk_ctl, 0644, clk_ctl_show, clk_ctl_store);
+static DEVICE_ATTR_RW(link_state);
+static DEVICE_ATTR_RW(clk_ctl);
static char power_group[] = "power";
void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 884bad5320f8..717529331dac 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -300,8 +300,6 @@ static irqreturn_t pcie_pme_irq(int irq, void *context)
*/
static int pcie_pme_set_native(struct pci_dev *dev, void *ign)
{
- dev_info(&dev->dev, "Signaling PME through PCIe PME interrupt\n");
-
device_set_run_wake(&dev->dev, true);
dev->pme_interrupt = true;
return 0;
@@ -319,23 +317,8 @@ static int pcie_pme_set_native(struct pci_dev *dev, void *ign)
static void pcie_pme_mark_devices(struct pci_dev *port)
{
pcie_pme_set_native(port, NULL);
- if (port->subordinate) {
+ if (port->subordinate)
pci_walk_bus(port->subordinate, pcie_pme_set_native, NULL);
- } else {
- struct pci_bus *bus = port->bus;
- struct pci_dev *dev;
-
- /* Check if this is a root port event collector. */
- if (pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC || !bus)
- return;
-
- down_read(&pci_bus_sem);
- list_for_each_entry(dev, &bus->devices, bus_list)
- if (pci_is_pcie(dev)
- && pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
- pcie_pme_set_native(dev, NULL);
- up_read(&pci_bus_sem);
- }
}
/**
@@ -364,12 +347,14 @@ static int pcie_pme_probe(struct pcie_device *srv)
ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv);
if (ret) {
kfree(data);
- } else {
- pcie_pme_mark_devices(port);
- pcie_pme_interrupt_enable(port, true);
+ return ret;
}
- return ret;
+ dev_info(&port->dev, "Signaling PME with IRQ %d\n", srv->irq);
+
+ pcie_pme_mark_devices(port);
+ pcie_pme_interrupt_enable(port, true);
+ return 0;
}
static bool pcie_pme_check_wakeup(struct pci_bus *bus)
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index e9270b4026f3..9698289f105c 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -499,7 +499,6 @@ static int pcie_port_probe_service(struct device *dev)
if (status)
return status;
- dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", driver->name);
get_device(dev);
return 0;
}
@@ -524,8 +523,6 @@ static int pcie_port_remove_service(struct device *dev)
pciedev = to_pcie_device(dev);
driver = to_service_driver(dev->driver);
if (driver && driver->remove) {
- dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n",
- driver->name);
driver->remove(pciedev);
put_device(dev);
}
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 79327cc14e7d..8aa3f14bc87d 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -19,6 +19,7 @@
#include <linux/dmi.h>
#include <linux/pci-aspm.h>
+#include "../pci.h"
#include "portdrv.h"
#include "aer/aerdrv.h"
@@ -149,15 +150,7 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
pci_save_state(dev);
- /*
- * Prevent runtime PM if the port is advertising support for PCIe
- * hotplug. Otherwise the BIOS hotplug SMI code might not be able
- * to enumerate devices behind this port properly (the port is
- * powered down preventing all config space accesses to the
- * subordinate devices). We can't be sure for native PCIe hotplug
- * either so prevent that as well.
- */
- if (!dev->is_hotplug_bridge) {
+ if (pci_bridge_d3_possible(dev)) {
/*
* Keep the port resumed 100ms to make sure things like
* config space accesses from userspace (lspci) will not
@@ -175,7 +168,7 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
static void pcie_portdrv_remove(struct pci_dev *dev)
{
- if (!dev->is_hotplug_bridge) {
+ if (pci_bridge_d3_possible(dev)) {
pm_runtime_forbid(&dev->dev);
pm_runtime_get_noresume(&dev->dev);
pm_runtime_dont_use_autosuspend(&dev->dev);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ab002671fa60..e164b5c9f0f0 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -227,7 +227,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
}
} else {
- res->flags |= (l & IORESOURCE_ROM_ENABLE);
+ if (l & PCI_ROM_ADDRESS_ENABLE)
+ res->flags |= IORESOURCE_ROM_ENABLE;
l64 = l & PCI_ROM_ADDRESS_MASK;
sz64 = sz & PCI_ROM_ADDRESS_MASK;
mask64 = (u32)PCI_ROM_ADDRESS_MASK;
@@ -521,18 +522,19 @@ static void pci_release_host_bridge_dev(struct device *dev)
kfree(bridge);
}
-static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
+struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
{
struct pci_host_bridge *bridge;
- bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
if (!bridge)
return NULL;
INIT_LIST_HEAD(&bridge->windows);
- bridge->bus = b;
+
return bridge;
}
+EXPORT_SYMBOL(pci_alloc_host_bridge);
static const unsigned char pcix_bus_speed[] = {
PCI_SPEED_UNKNOWN, /* 0 */
@@ -717,6 +719,123 @@ static void pci_set_bus_msi_domain(struct pci_bus *bus)
dev_set_msi_domain(&bus->dev, d);
}
+int pci_register_host_bridge(struct pci_host_bridge *bridge)
+{
+ struct device *parent = bridge->dev.parent;
+ struct resource_entry *window, *n;
+ struct pci_bus *bus, *b;
+ resource_size_t offset;
+ LIST_HEAD(resources);
+ struct resource *res;
+ char addr[64], *fmt;
+ const char *name;
+ int err;
+
+ bus = pci_alloc_bus(NULL);
+ if (!bus)
+ return -ENOMEM;
+
+ bridge->bus = bus;
+
+ /* temporarily move resources off the list */
+ list_splice_init(&bridge->windows, &resources);
+ bus->sysdata = bridge->sysdata;
+ bus->msi = bridge->msi;
+ bus->ops = bridge->ops;
+ bus->number = bus->busn_res.start = bridge->busnr;
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+ bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
+#endif
+
+ b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
+ if (b) {
+ /* If we already got to this bus through a different bridge, ignore it */
+ dev_dbg(&b->dev, "bus already known\n");
+ err = -EEXIST;
+ goto free;
+ }
+
+ dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus),
+ bridge->busnr);
+
+ err = pcibios_root_bridge_prepare(bridge);
+ if (err)
+ goto free;
+
+ err = device_register(&bridge->dev);
+ if (err)
+ put_device(&bridge->dev);
+
+ bus->bridge = get_device(&bridge->dev);
+ device_enable_async_suspend(bus->bridge);
+ pci_set_bus_of_node(bus);
+ pci_set_bus_msi_domain(bus);
+
+ if (!parent)
+ set_dev_node(bus->bridge, pcibus_to_node(bus));
+
+ bus->dev.class = &pcibus_class;
+ bus->dev.parent = bus->bridge;
+
+ dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number);
+ name = dev_name(&bus->dev);
+
+ err = device_register(&bus->dev);
+ if (err)
+ goto unregister;
+
+ pcibios_add_bus(bus);
+
+ /* Create legacy_io and legacy_mem files for this bus */
+ pci_create_legacy_files(bus);
+
+ if (parent)
+ dev_info(parent, "PCI host bridge to bus %s\n", name);
+ else
+ pr_info("PCI host bridge to bus %s\n", name);
+
+ /* Add initial resources to the bus */
+ resource_list_for_each_entry_safe(window, n, &resources) {
+ list_move_tail(&window->node, &bridge->windows);
+ offset = window->offset;
+ res = window->res;
+
+ if (res->flags & IORESOURCE_BUS)
+ pci_bus_insert_busn_res(bus, bus->number, res->end);
+ else
+ pci_bus_add_resource(bus, res, 0);
+
+ if (offset) {
+ if (resource_type(res) == IORESOURCE_IO)
+ fmt = " (bus address [%#06llx-%#06llx])";
+ else
+ fmt = " (bus address [%#010llx-%#010llx])";
+
+ snprintf(addr, sizeof(addr), fmt,
+ (unsigned long long)(res->start - offset),
+ (unsigned long long)(res->end - offset));
+ } else
+ addr[0] = '\0';
+
+ dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
+ }
+
+ down_write(&pci_bus_sem);
+ list_add_tail(&bus->node, &pci_root_buses);
+ up_write(&pci_bus_sem);
+
+ return 0;
+
+unregister:
+ put_device(&bridge->dev);
+ device_unregister(&bridge->dev);
+
+free:
+ kfree(bus);
+ return err;
+}
+EXPORT_SYMBOL(pci_register_host_bridge);
+
static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
struct pci_dev *bridge, int busnr)
{
@@ -1439,6 +1558,21 @@ static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
dev_warn(&dev->dev, "PCI-X settings not supported\n");
}
+static bool pcie_root_rcb_set(struct pci_dev *dev)
+{
+ struct pci_dev *rp = pcie_find_root_port(dev);
+ u16 lnkctl;
+
+ if (!rp)
+ return false;
+
+ pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
+ if (lnkctl & PCI_EXP_LNKCTL_RCB)
+ return true;
+
+ return false;
+}
+
static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
{
int pos;
@@ -1468,9 +1602,20 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
/* Initialize Link Control Register */
- if (pcie_cap_has_lnkctl(dev))
+ if (pcie_cap_has_lnkctl(dev)) {
+
+ /*
+ * If the Root Port supports Read Completion Boundary of
+ * 128, set RCB to 128. Otherwise, clear it.
+ */
+ hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
+ hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
+ if (pcie_root_rcb_set(dev))
+ hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
+
pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
+ }
/* Find Advanced Error Reporting Enhanced Capability */
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
@@ -1738,8 +1883,7 @@ static void pci_dma_configure(struct pci_dev *dev)
if (attr == DEV_DMA_NOT_SUPPORTED)
dev_warn(&dev->dev, "DMA not supported.\n");
else
- arch_setup_dma_ops(&dev->dev, 0, 0, NULL,
- attr == DEV_DMA_COHERENT);
+ acpi_dma_configure(&dev->dev, attr);
}
pci_put_host_bridge_device(bridge);
@@ -2130,113 +2274,43 @@ void __weak pcibios_remove_bus(struct pci_bus *bus)
{
}
-struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
- struct pci_ops *ops, void *sysdata, struct list_head *resources)
+static struct pci_bus *pci_create_root_bus_msi(struct device *parent,
+ int bus, struct pci_ops *ops, void *sysdata,
+ struct list_head *resources, struct msi_controller *msi)
{
int error;
struct pci_host_bridge *bridge;
- struct pci_bus *b, *b2;
- struct resource_entry *window, *n;
- struct resource *res;
- resource_size_t offset;
- char bus_addr[64];
- char *fmt;
-
- b = pci_alloc_bus(NULL);
- if (!b)
- return NULL;
-
- b->sysdata = sysdata;
- b->ops = ops;
- b->number = b->busn_res.start = bus;
-#ifdef CONFIG_PCI_DOMAINS_GENERIC
- b->domain_nr = pci_bus_find_domain_nr(b, parent);
-#endif
- b2 = pci_find_bus(pci_domain_nr(b), bus);
- if (b2) {
- /* If we already got to this bus through a different bridge, ignore it */
- dev_dbg(&b2->dev, "bus already known\n");
- goto err_out;
- }
- bridge = pci_alloc_host_bridge(b);
+ bridge = pci_alloc_host_bridge(0);
if (!bridge)
- goto err_out;
+ return NULL;
bridge->dev.parent = parent;
bridge->dev.release = pci_release_host_bridge_dev;
- dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
- error = pcibios_root_bridge_prepare(bridge);
- if (error) {
- kfree(bridge);
- goto err_out;
- }
-
- error = device_register(&bridge->dev);
- if (error) {
- put_device(&bridge->dev);
- goto err_out;
- }
- b->bridge = get_device(&bridge->dev);
- device_enable_async_suspend(b->bridge);
- pci_set_bus_of_node(b);
- pci_set_bus_msi_domain(b);
-
- if (!parent)
- set_dev_node(b->bridge, pcibus_to_node(b));
-
- b->dev.class = &pcibus_class;
- b->dev.parent = b->bridge;
- dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
- error = device_register(&b->dev);
- if (error)
- goto class_dev_reg_err;
- pcibios_add_bus(b);
+ list_splice_init(resources, &bridge->windows);
+ bridge->sysdata = sysdata;
+ bridge->busnr = bus;
+ bridge->ops = ops;
+ bridge->msi = msi;
- /* Create legacy_io and legacy_mem files for this bus */
- pci_create_legacy_files(b);
-
- if (parent)
- dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
- else
- printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
-
- /* Add initial resources to the bus */
- resource_list_for_each_entry_safe(window, n, resources) {
- list_move_tail(&window->node, &bridge->windows);
- res = window->res;
- offset = window->offset;
- if (res->flags & IORESOURCE_BUS)
- pci_bus_insert_busn_res(b, bus, res->end);
- else
- pci_bus_add_resource(b, res, 0);
- if (offset) {
- if (resource_type(res) == IORESOURCE_IO)
- fmt = " (bus address [%#06llx-%#06llx])";
- else
- fmt = " (bus address [%#010llx-%#010llx])";
- snprintf(bus_addr, sizeof(bus_addr), fmt,
- (unsigned long long) (res->start - offset),
- (unsigned long long) (res->end - offset));
- } else
- bus_addr[0] = '\0';
- dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
- }
-
- down_write(&pci_bus_sem);
- list_add_tail(&b->node, &pci_root_buses);
- up_write(&pci_bus_sem);
+ error = pci_register_host_bridge(bridge);
+ if (error < 0)
+ goto err_out;
- return b;
+ return bridge->bus;
-class_dev_reg_err:
- put_device(&bridge->dev);
- device_unregister(&bridge->dev);
err_out:
- kfree(b);
+ kfree(bridge);
return NULL;
}
+
+struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata, struct list_head *resources)
+{
+ return pci_create_root_bus_msi(parent, bus, ops, sysdata, resources,
+ NULL);
+}
EXPORT_SYMBOL_GPL(pci_create_root_bus);
int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
@@ -2317,12 +2391,10 @@ struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
break;
}
- b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
+ b = pci_create_root_bus_msi(parent, bus, ops, sysdata, resources, msi);
if (!b)
return NULL;
- b->msi = msi;
-
if (!found) {
dev_info(&b->dev,
"No busn resource found for root bus, will use [bus %02x-ff]\n",
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index c232729f5b1b..9236e40ac055 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2156,7 +2156,7 @@ static void quirk_blacklist_vpd(struct pci_dev *dev)
{
if (dev->vpd) {
dev->vpd->len = 0;
- dev_warn(&dev->dev, FW_BUG "VPD access disabled\n");
+ dev_warn(&dev->dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
}
}
@@ -3137,8 +3137,9 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay);
+
/*
- * Some devices may pass our check in pci_intx_mask_supported if
+ * Some devices may pass our check in pci_intx_mask_supported() if
* PCI_COMMAND_INTX_DISABLE works though they actually do not properly
* support this feature.
*/
@@ -3146,53 +3147,139 @@ static void quirk_broken_intx_masking(struct pci_dev *dev)
{
dev->broken_intx_masking = 1;
}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, 0x0030,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
- quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x0030,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
+ quirk_broken_intx_masking);
+
/*
* Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10)
* Subsystem: Realtek RTL8169/8110 Family PCI Gigabit Ethernet NIC
*
* RTL8110SC - Fails under PCI device assignment using DisINTx masking.
*/
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
- quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, 0x8169,
+ quirk_broken_intx_masking);
/*
* Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking,
* DisINTx can be set but the interrupt status bit is non-functional.
*/
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1572,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1574,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1580,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1581,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1583,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1584,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1585,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1586,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1587,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1588,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1589,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d0,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d1,
- quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d2,
- quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2,
+ quirk_broken_intx_masking);
+
+static u16 mellanox_broken_intx_devs[] = {
+ PCI_DEVICE_ID_MELLANOX_HERMON_SDR,
+ PCI_DEVICE_ID_MELLANOX_HERMON_DDR,
+ PCI_DEVICE_ID_MELLANOX_HERMON_QDR,
+ PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2,
+ PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2,
+ PCI_DEVICE_ID_MELLANOX_HERMON_EN,
+ PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX_EN,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX2,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO,
+};
+
+#define CONNECTX_4_CURR_MAX_MINOR 99
+#define CONNECTX_4_INTX_SUPPORT_MINOR 14
+
+/*
+ * Check ConnectX-4/LX FW version to see if it supports legacy interrupts.
+ * If so, don't mark it as broken.
+ * FW minor > 99 means older FW version format and no INTx masking support.
+ * FW minor < 14 means new FW version format and no INTx masking support.
+ */
+static void mellanox_check_broken_intx_masking(struct pci_dev *pdev)
+{
+ __be32 __iomem *fw_ver;
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_subminor;
+ u32 fw_maj_min;
+ u32 fw_sub_min;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mellanox_broken_intx_devs); i++) {
+ if (pdev->device == mellanox_broken_intx_devs[i]) {
+ pdev->broken_intx_masking = 1;
+ return;
+ }
+ }
+
+ /* Getting here means Connect-IB cards and up. Connect-IB has no INTx
+ * support so shouldn't be checked further
+ */
+ if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB)
+ return;
+
+ if (pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4 &&
+ pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX)
+ return;
+
+ /* For ConnectX-4 and ConnectX-4LX, need to check FW support */
+ if (pci_enable_device_mem(pdev)) {
+ dev_warn(&pdev->dev, "Can't enable device memory\n");
+ return;
+ }
+
+ fw_ver = ioremap(pci_resource_start(pdev, 0), 4);
+ if (!fw_ver) {
+ dev_warn(&pdev->dev, "Can't map ConnectX-4 initialization segment\n");
+ goto out;
+ }
+
+ /* Reading from resource space should be 32b aligned */
+ fw_maj_min = ioread32be(fw_ver);
+ fw_sub_min = ioread32be(fw_ver + 1);
+ fw_major = fw_maj_min & 0xffff;
+ fw_minor = fw_maj_min >> 16;
+ fw_subminor = fw_sub_min & 0xffff;
+ if (fw_minor > CONNECTX_4_CURR_MAX_MINOR ||
+ fw_minor < CONNECTX_4_INTX_SUPPORT_MINOR) {
+ dev_warn(&pdev->dev, "ConnectX-4: FW %u.%u.%u doesn't support INTx masking, disabling. Please upgrade FW to %d.14.1100 and up for INTx support\n",
+ fw_major, fw_minor, fw_subminor, pdev->device ==
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4 ? 12 : 14);
+ pdev->broken_intx_masking = 1;
+ }
+
+ iounmap(fw_ver);
+
+out:
+ pci_disable_device(pdev);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
+ mellanox_check_broken_intx_masking);
static void quirk_no_bus_reset(struct pci_dev *dev)
{
@@ -3255,6 +3342,25 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
quirk_thunderbolt_hotplug_msi);
+static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
+{
+ pci_set_vpd_size(dev, 8192);
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd);
+
#ifdef CONFIG_ACPI
/*
* Apple: Shutdown Cactus Ridge Thunderbolt controller.
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index f9357e09e9b3..73a03d382590 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -40,7 +40,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
list_del(&dev->bus_list);
up_write(&pci_bus_sem);
- pci_bridge_d3_device_removed(dev);
+ pci_bridge_d3_update(dev);
pci_free_resources(dev);
put_device(&dev->dev);
}
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 06663d391b39..b6edb187d160 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -35,6 +35,11 @@ int pci_enable_rom(struct pci_dev *pdev)
if (res->flags & IORESOURCE_ROM_SHADOW)
return 0;
+ /*
+ * Ideally pci_update_resource() would update the ROM BAR address,
+ * and we would only set the enable bit here. But apparently some
+ * devices have buggy ROM BARs that read as zero when disabled.
+ */
pcibios_resource_to_bus(pdev->bus, &region, res);
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
rom_addr &= ~PCI_ROM_ADDRESS_MASK;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 9526e341988b..4bc589ee78d0 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -25,21 +25,18 @@
#include <linux/slab.h>
#include "pci.h"
-
-void pci_update_resource(struct pci_dev *dev, int resno)
+static void pci_std_update_resource(struct pci_dev *dev, int resno)
{
struct pci_bus_region region;
bool disable;
u16 cmd;
u32 new, check, mask;
int reg;
- enum pci_bar_type type;
struct resource *res = dev->resource + resno;
- if (dev->is_virtfn) {
- dev_warn(&dev->dev, "can't update VF BAR%d\n", resno);
+ /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
+ if (dev->is_virtfn)
return;
- }
/*
* Ignore resources for unimplemented BARs and unused resource slots
@@ -60,21 +57,34 @@ void pci_update_resource(struct pci_dev *dev, int resno)
return;
pcibios_resource_to_bus(dev->bus, &region, res);
+ new = region.start;
- new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
- if (res->flags & IORESOURCE_IO)
+ if (res->flags & IORESOURCE_IO) {
mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
- else
+ new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK;
+ } else if (resno == PCI_ROM_RESOURCE) {
+ mask = (u32)PCI_ROM_ADDRESS_MASK;
+ } else {
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
+ new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
+ }
- reg = pci_resource_bar(dev, resno, &type);
- if (!reg)
- return;
- if (type != pci_bar_unknown) {
+ if (resno < PCI_ROM_RESOURCE) {
+ reg = PCI_BASE_ADDRESS_0 + 4 * resno;
+ } else if (resno == PCI_ROM_RESOURCE) {
+
+ /*
+ * Apparently some Matrox devices have ROM BARs that read
+ * as zero when disabled, so don't update ROM BARs unless
+ * they're enabled. See https://lkml.org/lkml/2005/8/30/138.
+ */
if (!(res->flags & IORESOURCE_ROM_ENABLE))
return;
+
+ reg = dev->rom_base_reg;
new |= PCI_ROM_ADDRESS_ENABLE;
- }
+ } else
+ return;
/*
* We can't update a 64-bit BAR atomically, so when possible,
@@ -110,6 +120,16 @@ void pci_update_resource(struct pci_dev *dev, int resno)
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
+void pci_update_resource(struct pci_dev *dev, int resno)
+{
+ if (resno <= PCI_ROM_RESOURCE)
+ pci_std_update_resource(dev, resno);
+#ifdef CONFIG_PCI_IOV
+ else if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
+ pci_iov_update_resource(dev, resno);
+#endif
+}
+
int pci_claim_resource(struct pci_dev *dev, int resource)
{
struct resource *res = &dev->resource[resource];
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index d6ff5e82377d..8fc2e9532575 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -1038,10 +1038,8 @@ static int pcifront_detach_devices(struct pcifront_device *pdev)
err = -ENOMEM;
goto out;
}
- err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, "%d",
- &state);
- if (err != 1)
- state = XenbusStateUnknown;
+ state = xenbus_read_unsigned(pdev->xdev->otherend, str,
+ XenbusStateUnknown);
if (state != XenbusStateClosing)
continue;