summaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/access.c41
-rw-r--r--drivers/pci/dmar.c82
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c5
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c5
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c17
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c3
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c3
-rw-r--r--drivers/pci/intel-iommu.c151
-rw-r--r--drivers/pci/intr_remapping.c6
-rw-r--r--drivers/pci/pci-sysfs.c89
-rw-r--r--drivers/pci/pci.c12
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c173
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h6
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c558
-rw-r--r--drivers/pci/probe.c23
-rw-r--r--drivers/pci/quirks.c21
-rw-r--r--drivers/pci/setup-bus.c114
-rw-r--r--drivers/pci/slot.c48
21 files changed, 673 insertions, 690 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 7858a117e80b..34ef70d562b2 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -19,7 +19,7 @@ config PCI_MSI
by using the 'pci=nomsi' option. This disables MSI for the
entire system.
- If you don't know what to do here, say N.
+ If you don't know what to do here, say Y.
config PCI_DEBUG
bool "PCI Debugging"
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 2f646fe1260f..531bc697d800 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -13,7 +13,7 @@
* configuration space.
*/
-static DEFINE_SPINLOCK(pci_lock);
+static DEFINE_RAW_SPINLOCK(pci_lock);
/*
* Wrappers for all PCI configuration access functions. They just check
@@ -33,10 +33,10 @@ int pci_bus_read_config_##size \
unsigned long flags; \
u32 data = 0; \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
- spin_lock_irqsave(&pci_lock, flags); \
+ raw_spin_lock_irqsave(&pci_lock, flags); \
res = bus->ops->read(bus, devfn, pos, len, &data); \
*value = (type)data; \
- spin_unlock_irqrestore(&pci_lock, flags); \
+ raw_spin_unlock_irqrestore(&pci_lock, flags); \
return res; \
}
@@ -47,9 +47,9 @@ int pci_bus_write_config_##size \
int res; \
unsigned long flags; \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
- spin_lock_irqsave(&pci_lock, flags); \
+ raw_spin_lock_irqsave(&pci_lock, flags); \
res = bus->ops->write(bus, devfn, pos, len, value); \
- spin_unlock_irqrestore(&pci_lock, flags); \
+ raw_spin_unlock_irqrestore(&pci_lock, flags); \
return res; \
}
@@ -79,10 +79,10 @@ struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
struct pci_ops *old_ops;
unsigned long flags;
- spin_lock_irqsave(&pci_lock, flags);
+ raw_spin_lock_irqsave(&pci_lock, flags);
old_ops = bus->ops;
bus->ops = ops;
- spin_unlock_irqrestore(&pci_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
return old_ops;
}
EXPORT_SYMBOL(pci_bus_set_ops);
@@ -136,9 +136,9 @@ static noinline void pci_wait_ucfg(struct pci_dev *dev)
__add_wait_queue(&pci_ucfg_wait, &wait);
do {
set_current_state(TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&pci_lock);
+ raw_spin_unlock_irq(&pci_lock);
schedule();
- spin_lock_irq(&pci_lock);
+ raw_spin_lock_irq(&pci_lock);
} while (dev->block_ucfg_access);
__remove_wait_queue(&pci_ucfg_wait, &wait);
}
@@ -150,11 +150,11 @@ int pci_user_read_config_##size \
int ret = 0; \
u32 data = -1; \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
- spin_lock_irq(&pci_lock); \
+ raw_spin_lock_irq(&pci_lock); \
if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
ret = dev->bus->ops->read(dev->bus, dev->devfn, \
pos, sizeof(type), &data); \
- spin_unlock_irq(&pci_lock); \
+ raw_spin_unlock_irq(&pci_lock); \
*val = (type)data; \
return ret; \
}
@@ -165,11 +165,11 @@ int pci_user_write_config_##size \
{ \
int ret = -EIO; \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
- spin_lock_irq(&pci_lock); \
+ raw_spin_lock_irq(&pci_lock); \
if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
ret = dev->bus->ops->write(dev->bus, dev->devfn, \
pos, sizeof(type), val); \
- spin_unlock_irq(&pci_lock); \
+ raw_spin_unlock_irq(&pci_lock); \
return ret; \
}
@@ -220,8 +220,13 @@ static int pci_vpd_pci22_wait(struct pci_dev *dev)
return 0;
}
- if (time_after(jiffies, timeout))
+ if (time_after(jiffies, timeout)) {
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "vpd r/w failed. This is likely a firmware "
+ "bug on this device. Contact the card "
+ "vendor for a firmware update.");
return -ETIMEDOUT;
+ }
if (fatal_signal_pending(current))
return -EINTR;
if (!cond_resched())
@@ -396,10 +401,10 @@ void pci_block_user_cfg_access(struct pci_dev *dev)
unsigned long flags;
int was_blocked;
- spin_lock_irqsave(&pci_lock, flags);
+ raw_spin_lock_irqsave(&pci_lock, flags);
was_blocked = dev->block_ucfg_access;
dev->block_ucfg_access = 1;
- spin_unlock_irqrestore(&pci_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
/* If we BUG() inside the pci_lock, we're guaranteed to hose
* the machine */
@@ -417,7 +422,7 @@ void pci_unblock_user_cfg_access(struct pci_dev *dev)
{
unsigned long flags;
- spin_lock_irqsave(&pci_lock, flags);
+ raw_spin_lock_irqsave(&pci_lock, flags);
/* This indicates a problem in the caller, but we don't need
* to kill them, unlike a double-block above. */
@@ -425,6 +430,6 @@ void pci_unblock_user_cfg_access(struct pci_dev *dev)
dev->block_ucfg_access = 0;
wake_up_all(&pci_ucfg_wait);
- spin_unlock_irqrestore(&pci_lock, flags);
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
}
EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access);
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 33ead97f0c4b..0a19708074c2 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -131,9 +131,10 @@ static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
(*cnt)++;
- else
+ else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
printk(KERN_WARNING PREFIX
- "Unsupported device scope\n");
+ "Unsupported device scope\n");
+ }
start += scope->length;
}
if (*cnt == 0)
@@ -309,6 +310,8 @@ int dmar_find_matched_atsr_unit(struct pci_dev *dev)
struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
+ dev = pci_physfn(dev);
+
list_for_each_entry(atsru, &dmar_atsr_units, list) {
atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
if (atsr->segment == pci_domain_nr(dev->bus))
@@ -358,12 +361,14 @@ dmar_parse_one_rhsa(struct acpi_dmar_header *header)
return 0;
}
}
- WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- drhd->reg_base_addr,
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
+ WARN_TAINT(
+ 1, TAINT_FIRMWARE_WORKAROUND,
+ "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ drhd->reg_base_addr,
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
return 0;
}
@@ -507,7 +512,7 @@ parse_dmar_table(void)
return ret;
}
-int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
+static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
struct pci_dev *dev)
{
int index;
@@ -530,6 +535,8 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)
struct dmar_drhd_unit *dmaru = NULL;
struct acpi_dmar_hardware_unit *drhd;
+ dev = pci_physfn(dev);
+
list_for_each_entry(dmaru, &dmar_drhd_units, list) {
drhd = container_of(dmaru->hdr,
struct acpi_dmar_hardware_unit,
@@ -614,7 +621,17 @@ int __init dmar_table_init(void)
return 0;
}
-static int bios_warned;
+static void warn_invalid_dmar(u64 addr, const char *message)
+{
+ WARN_TAINT_ONCE(
+ 1, TAINT_FIRMWARE_WORKAROUND,
+ "Your BIOS is broken; DMAR reported at address %llx%s!\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ addr, message,
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
+}
int __init check_zero_address(void)
{
@@ -640,13 +657,7 @@ int __init check_zero_address(void)
drhd = (void *)entry_header;
if (!drhd->address) {
- /* Promote an attitude of violence to a BIOS engineer today */
- WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- bios_warned = 1;
+ warn_invalid_dmar(0, "");
goto failed;
}
@@ -659,14 +670,8 @@ int __init check_zero_address(void)
ecap = dmar_readq(addr + DMAR_ECAP_REG);
early_iounmap(addr, VTD_PAGE_SIZE);
if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
- /* Promote an attitude of violence to a BIOS engineer today */
- WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- drhd->address,
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- bios_warned = 1;
+ warn_invalid_dmar(drhd->address,
+ " returns all ones");
goto failed;
}
}
@@ -731,14 +736,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
int msagaw = 0;
if (!drhd->reg_base_addr) {
- if (!bios_warned) {
- WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- bios_warned = 1;
- }
+ warn_invalid_dmar(0, "");
return -EINVAL;
}
@@ -758,16 +756,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
- if (!bios_warned) {
- /* Promote an attitude of violence to a BIOS engineer today */
- WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- drhd->reg_base_addr,
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- bios_warned = 1;
- }
+ warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
goto err_unmap;
}
@@ -806,7 +795,8 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
}
ver = readl(iommu->reg + DMAR_VER_REG);
- pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
+ pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
+ iommu->seq_id,
(unsigned long long)drhd->reg_base_addr,
DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
(unsigned long long)iommu->cap,
@@ -1457,9 +1447,11 @@ int dmar_reenable_qi(struct intel_iommu *iommu)
/*
* Check interrupt remapping support in DMAR table description.
*/
-int dmar_ir_support(void)
+int __init dmar_ir_support(void)
{
struct acpi_table_dmar *dmar;
dmar = (struct acpi_table_dmar *)dmar_tbl;
+ if (!dmar)
+ return 0;
return dmar->flags & 0x1;
}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 6ecbfb27db9d..e525263210ee 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -108,7 +108,7 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status);
static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status);
static void ibm_handle_events(acpi_handle handle, u32 event, void *context);
static int ibm_get_table_from_acpi(char **bufp);
-static ssize_t ibm_read_apci_table(struct kobject *kobj,
+static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t size);
static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
@@ -351,6 +351,7 @@ read_table_done:
/**
* ibm_read_apci_table - callback for the sysfs apci_table file
+ * @filp: the open sysfs file
* @kobj: the kobject this binary attribute is a part of
* @bin_attr: struct bin_attribute for this file
* @buffer: the kernel space buffer to fill
@@ -364,7 +365,7 @@ read_table_done:
* things get really tricky here...
* our solution is to only allow reading the table in all at once.
*/
-static ssize_t ibm_read_apci_table(struct kobject *kobj,
+static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t size)
{
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index f184d1d2ecbe..b3e5580c837b 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -848,7 +848,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_disable_device;
}
- /* Check for the proper subsytem ID's
+ /* Check for the proper subsystem ID's
* Intel uses a different SSID programming model than Compaq.
* For Intel, each SSID bit identifies a PHP capability.
* Also Intel HPC's may have RID=0.
@@ -1075,13 +1075,12 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* make our own copy of the pci bus structure,
* as we like tweaking it a lot */
- ctrl->pci_bus = kmalloc(sizeof(*ctrl->pci_bus), GFP_KERNEL);
+ ctrl->pci_bus = kmemdup(pdev->bus, sizeof(*ctrl->pci_bus), GFP_KERNEL);
if (!ctrl->pci_bus) {
err("out of memory\n");
rc = -ENOMEM;
goto err_free_ctrl;
}
- memcpy(ctrl->pci_bus, pdev->bus, sizeof(*ctrl->pci_bus));
ctrl->bus = pdev->bus->number;
ctrl->rev = pdev->revision;
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 0a16444c14c9..2fce726758d2 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -84,12 +84,6 @@ int pciehp_configure_device(struct slot *p_slot)
dev = pci_get_slot(parent, PCI_DEVFN(0, fn));
if (!dev)
continue;
- if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
- ctrl_err(ctrl, "Cannot hot-add display device %s\n",
- pci_name(dev));
- pci_dev_put(dev);
- continue;
- }
if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
(dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
pciehp_add_bridge(dev);
@@ -133,15 +127,9 @@ int pciehp_unconfigure_device(struct slot *p_slot)
presence = 0;
for (j = 0; j < 8; j++) {
- struct pci_dev* temp = pci_get_slot(parent, PCI_DEVFN(0, j));
+ struct pci_dev *temp = pci_get_slot(parent, PCI_DEVFN(0, j));
if (!temp)
continue;
- if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
- ctrl_err(ctrl, "Cannot remove display device %s\n",
- pci_name(temp));
- pci_dev_put(temp);
- continue;
- }
if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) {
pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl);
if (bctl & PCI_BRIDGE_CTL_VGA) {
@@ -149,7 +137,8 @@ int pciehp_unconfigure_device(struct slot *p_slot)
"Cannot remove display device %s\n",
pci_name(temp));
pci_dev_put(temp);
- continue;
+ rc = EINVAL;
+ break;
}
}
pci_remove_bus_device(temp);
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index 4e3e0382c16e..083034710fa6 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/string.h>
+#include <linux/vmalloc.h>
#include <asm/pci-bridge.h>
#include <linux/mutex.h>
@@ -430,6 +431,8 @@ int dlpar_remove_slot(char *drc_name)
rc = dlpar_remove_pci_slot(drc_name, dn);
break;
}
+ vm_unmap_aliases();
+
printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name);
exit:
mutex_unlock(&rpadlpar_mutex);
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 719702240780..ef7411c660b9 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -29,6 +29,7 @@
#include <linux/pci_hotplug.h>
#include <linux/smp.h>
#include <linux/init.h>
+#include <linux/vmalloc.h>
#include <asm/eeh.h> /* for eeh_add_device() */
#include <asm/rtas.h> /* rtas_call */
#include <asm/pci-bridge.h> /* for pci_controller */
@@ -418,6 +419,8 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
return -EINVAL;
pcibios_remove_pci_devices(slot->bus);
+ vm_unmap_aliases();
+
slot->state = NOT_CONFIGURED;
return 0;
}
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 417312528ddf..796828fce34c 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -491,13 +491,11 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
domain->iommu_coherency = 1;
- i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
- for (; i < g_num_of_iommus; ) {
+ for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
if (!ecap_coherent(g_iommus[i]->ecap)) {
domain->iommu_coherency = 0;
break;
}
- i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
}
}
@@ -507,13 +505,11 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain)
domain->iommu_snooping = 1;
- i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
- for (; i < g_num_of_iommus; ) {
+ for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
if (!ecap_sc_support(g_iommus[i]->ecap)) {
domain->iommu_snooping = 0;
break;
}
- i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
}
}
@@ -1068,7 +1064,7 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
- unsigned long pfn, unsigned int pages)
+ unsigned long pfn, unsigned int pages, int map)
{
unsigned int mask = ilog2(__roundup_pow_of_two(pages));
uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
@@ -1089,10 +1085,10 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
DMA_TLB_PSI_FLUSH);
/*
- * In caching mode, domain ID 0 is reserved for non-present to present
- * mapping flush. Device IOTLB doesn't need to be flushed in this case.
+ * In caching mode, changes of pages from non-present to present require
+ * flush. However, device IOTLB doesn't need to be flushed in this case.
*/
- if (!cap_caching_mode(iommu->cap) || did)
+ if (!cap_caching_mode(iommu->cap) || !map)
iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
}
@@ -1154,7 +1150,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
unsigned long nlongs;
ndomains = cap_ndoms(iommu->cap);
- pr_debug("Number of Domains supportd <%ld>\n", ndomains);
+ pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id,
+ ndomains);
nlongs = BITS_TO_LONGS(ndomains);
spin_lock_init(&iommu->lock);
@@ -1194,8 +1191,7 @@ void free_dmar_iommu(struct intel_iommu *iommu)
unsigned long flags;
if ((iommu->domains) && (iommu->domain_ids)) {
- i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
- for (; i < cap_ndoms(iommu->cap); ) {
+ for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
domain = iommu->domains[i];
clear_bit(i, iommu->domain_ids);
@@ -1207,9 +1203,6 @@ void free_dmar_iommu(struct intel_iommu *iommu)
domain_exit(domain);
}
spin_unlock_irqrestore(&domain->iommu_lock, flags);
-
- i = find_next_bit(iommu->domain_ids,
- cap_ndoms(iommu->cap), i+1);
}
}
@@ -1292,14 +1285,11 @@ static void iommu_detach_domain(struct dmar_domain *domain,
spin_lock_irqsave(&iommu->lock, flags);
ndomains = cap_ndoms(iommu->cap);
- num = find_first_bit(iommu->domain_ids, ndomains);
- for (; num < ndomains; ) {
+ for_each_set_bit(num, iommu->domain_ids, ndomains) {
if (iommu->domains[num] == domain) {
found = 1;
break;
}
- num = find_next_bit(iommu->domain_ids,
- cap_ndoms(iommu->cap), num+1);
}
if (found) {
@@ -1485,15 +1475,12 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
/* find an available domain id for this device in iommu */
ndomains = cap_ndoms(iommu->cap);
- num = find_first_bit(iommu->domain_ids, ndomains);
- for (; num < ndomains; ) {
+ for_each_set_bit(num, iommu->domain_ids, ndomains) {
if (iommu->domains[num] == domain) {
id = num;
found = 1;
break;
}
- num = find_next_bit(iommu->domain_ids,
- cap_ndoms(iommu->cap), num+1);
}
if (found == 0) {
@@ -1558,7 +1545,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
+ iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
} else {
iommu_flush_write_buffer(iommu);
}
@@ -2333,14 +2320,16 @@ int __init init_dmars(void)
*/
iommu->flush.flush_context = __iommu_flush_context;
iommu->flush.flush_iotlb = __iommu_flush_iotlb;
- printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
+ printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
"invalidation\n",
+ iommu->seq_id,
(unsigned long long)drhd->reg_base_addr);
} else {
iommu->flush.flush_context = qi_flush_context;
iommu->flush.flush_iotlb = qi_flush_iotlb;
- printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
+ printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
"invalidation\n",
+ iommu->seq_id,
(unsigned long long)drhd->reg_base_addr);
}
}
@@ -2621,7 +2610,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, 0, mm_to_dma_pfn(iova->pfn_lo), size);
+ iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
else
iommu_flush_write_buffer(iommu);
@@ -2661,15 +2650,24 @@ static void flush_unmaps(void)
if (!deferred_flush[i].next)
continue;
- iommu->flush.flush_iotlb(iommu, 0, 0, 0,
+ /* In caching mode, global flushes turn emulation expensive */
+ if (!cap_caching_mode(iommu->cap))
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH);
for (j = 0; j < deferred_flush[i].next; j++) {
unsigned long mask;
struct iova *iova = deferred_flush[i].iova[j];
-
- mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
- iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
- (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
+ struct dmar_domain *domain = deferred_flush[i].domain[j];
+
+ /* On real hardware multiple invalidations are expensive */
+ if (cap_caching_mode(iommu->cap))
+ iommu_flush_iotlb_psi(iommu, domain->id,
+ iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0);
+ else {
+ mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
+ iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
+ (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
+ }
__free_iova(&deferred_flush[i].domain[j]->iovad, iova);
}
deferred_flush[i].next = 0;
@@ -2750,7 +2748,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
if (intel_iommu_strict) {
iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
- last_pfn - start_pfn + 1);
+ last_pfn - start_pfn + 1, 0);
/* free iova */
__free_iova(&domain->iovad, iova);
} else {
@@ -2840,7 +2838,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
if (intel_iommu_strict) {
iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
- last_pfn - start_pfn + 1);
+ last_pfn - start_pfn + 1, 0);
/* free iova */
__free_iova(&domain->iovad, iova);
} else {
@@ -2874,7 +2872,6 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
struct dmar_domain *domain;
size_t size = 0;
int prot = 0;
- size_t offset_pfn = 0;
struct iova *iova = NULL;
int ret;
struct scatterlist *sg;
@@ -2928,7 +2925,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, 0, start_vpfn, offset_pfn);
+ iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
else
iommu_flush_write_buffer(iommu);
@@ -3436,22 +3433,6 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
/* domain id for virtual machine, it won't be set in context */
static unsigned long vm_domid;
-static int vm_domain_min_agaw(struct dmar_domain *domain)
-{
- int i;
- int min_agaw = domain->agaw;
-
- i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
- for (; i < g_num_of_iommus; ) {
- if (min_agaw > g_iommus[i]->agaw)
- min_agaw = g_iommus[i]->agaw;
-
- i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
- }
-
- return min_agaw;
-}
-
static struct dmar_domain *iommu_alloc_vm_domain(void)
{
struct dmar_domain *domain;
@@ -3512,8 +3493,7 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
iommu = drhd->iommu;
ndomains = cap_ndoms(iommu->cap);
- i = find_first_bit(iommu->domain_ids, ndomains);
- for (; i < ndomains; ) {
+ for_each_set_bit(i, iommu->domain_ids, ndomains) {
if (iommu->domains[i] == domain) {
spin_lock_irqsave(&iommu->lock, flags);
clear_bit(i, iommu->domain_ids);
@@ -3521,7 +3501,6 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
spin_unlock_irqrestore(&iommu->lock, flags);
break;
}
- i = find_next_bit(iommu->domain_ids, ndomains, i+1);
}
}
}
@@ -3582,7 +3561,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
struct pci_dev *pdev = to_pci_dev(dev);
struct intel_iommu *iommu;
int addr_width;
- u64 end;
/* normally pdev is not mapped */
if (unlikely(domain_context_mapped(pdev))) {
@@ -3605,14 +3583,30 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
/* check if this iommu agaw is sufficient for max mapped address */
addr_width = agaw_to_width(iommu->agaw);
- end = DOMAIN_MAX_ADDR(addr_width);
- end = end & VTD_PAGE_MASK;
- if (end < dmar_domain->max_addr) {
- printk(KERN_ERR "%s: iommu agaw (%d) is not "
+ if (addr_width > cap_mgaw(iommu->cap))
+ addr_width = cap_mgaw(iommu->cap);
+
+ if (dmar_domain->max_addr > (1LL << addr_width)) {
+ printk(KERN_ERR "%s: iommu width (%d) is not "
"sufficient for the mapped address (%llx)\n",
- __func__, iommu->agaw, dmar_domain->max_addr);
+ __func__, addr_width, dmar_domain->max_addr);
return -EFAULT;
}
+ dmar_domain->gaw = addr_width;
+
+ /*
+ * Knock out extra levels of page tables if necessary
+ */
+ while (iommu->agaw < dmar_domain->agaw) {
+ struct dma_pte *pte;
+
+ pte = dmar_domain->pgd;
+ if (dma_pte_present(pte)) {
+ free_pgtable_page(dmar_domain->pgd);
+ dmar_domain->pgd = (struct dma_pte *)dma_pte_addr(pte);
+ }
+ dmar_domain->agaw--;
+ }
return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
}
@@ -3626,14 +3620,14 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
domain_remove_one_dev_info(dmar_domain, pdev);
}
-static int intel_iommu_map_range(struct iommu_domain *domain,
- unsigned long iova, phys_addr_t hpa,
- size_t size, int iommu_prot)
+static int intel_iommu_map(struct iommu_domain *domain,
+ unsigned long iova, phys_addr_t hpa,
+ int gfp_order, int iommu_prot)
{
struct dmar_domain *dmar_domain = domain->priv;
u64 max_addr;
- int addr_width;
int prot = 0;
+ size_t size;
int ret;
if (iommu_prot & IOMMU_READ)
@@ -3643,20 +3637,17 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
prot |= DMA_PTE_SNP;
+ size = PAGE_SIZE << gfp_order;
max_addr = iova + size;
if (dmar_domain->max_addr < max_addr) {
- int min_agaw;
u64 end;
/* check if minimum agaw is sufficient for mapped address */
- min_agaw = vm_domain_min_agaw(dmar_domain);
- addr_width = agaw_to_width(min_agaw);
- end = DOMAIN_MAX_ADDR(addr_width);
- end = end & VTD_PAGE_MASK;
+ end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
if (end < max_addr) {
- printk(KERN_ERR "%s: iommu agaw (%d) is not "
+ printk(KERN_ERR "%s: iommu width (%d) is not "
"sufficient for the mapped address (%llx)\n",
- __func__, min_agaw, max_addr);
+ __func__, dmar_domain->gaw, max_addr);
return -EFAULT;
}
dmar_domain->max_addr = max_addr;
@@ -3669,19 +3660,19 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
return ret;
}
-static void intel_iommu_unmap_range(struct iommu_domain *domain,
- unsigned long iova, size_t size)
+static int intel_iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, int gfp_order)
{
struct dmar_domain *dmar_domain = domain->priv;
-
- if (!size)
- return;
+ size_t size = PAGE_SIZE << gfp_order;
dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
(iova + size - 1) >> VTD_PAGE_SHIFT);
if (dmar_domain->max_addr == iova + size)
dmar_domain->max_addr = iova;
+
+ return gfp_order;
}
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -3714,8 +3705,8 @@ static struct iommu_ops intel_iommu_ops = {
.domain_destroy = intel_iommu_domain_destroy,
.attach_dev = intel_iommu_attach_device,
.detach_dev = intel_iommu_detach_device,
- .map = intel_iommu_map_range,
- .unmap = intel_iommu_unmap_range,
+ .map = intel_iommu_map,
+ .unmap = intel_iommu_unmap,
.iova_to_phys = intel_iommu_iova_to_phys,
.domain_has_cap = intel_iommu_domain_has_cap,
};
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 6ee98a56946f..1315ac688aa2 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -832,9 +832,9 @@ static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
return -1;
}
- printk(KERN_INFO "IOAPIC id %d under DRHD base"
- " 0x%Lx\n", scope->enumeration_id,
- drhd->address);
+ printk(KERN_INFO "IOAPIC id %d under DRHD base "
+ " 0x%Lx IOMMU %d\n", scope->enumeration_id,
+ drhd->address, iommu->seq_id);
ir_parse_one_ioapic_scope(scope, iommu);
} else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index fad93983bfed..afd2fbf7d797 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -21,6 +21,7 @@
#include <linux/stat.h>
#include <linux/topology.h>
#include <linux/mm.h>
+#include <linux/fs.h>
#include <linux/capability.h>
#include <linux/pci-aspm.h>
#include <linux/slab.h>
@@ -357,7 +358,8 @@ boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
struct device_attribute vga_attr = __ATTR_RO(boot_vga);
static ssize_t
-pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_read_config(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj));
@@ -366,7 +368,7 @@ pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
u8 *data = (u8*) buf;
/* Several chips lock up trying to read undefined config space */
- if (capable(CAP_SYS_ADMIN)) {
+ if (cap_raised(filp->f_cred->cap_effective, CAP_SYS_ADMIN)) {
size = dev->cfg_size;
} else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
size = 128;
@@ -430,7 +432,8 @@ pci_read_config(struct kobject *kobj, struct bin_attribute *bin_attr,
}
static ssize_t
-pci_write_config(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_write_config(struct file* filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(container_of(kobj,struct device,kobj));
@@ -487,7 +490,8 @@ pci_write_config(struct kobject *kobj, struct bin_attribute *bin_attr,
}
static ssize_t
-read_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
+read_vpd_attr(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_dev *dev =
@@ -502,7 +506,8 @@ read_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
}
static ssize_t
-write_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
+write_vpd_attr(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_dev *dev =
@@ -519,6 +524,7 @@ write_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
#ifdef HAVE_PCI_LEGACY
/**
* pci_read_legacy_io - read byte(s) from legacy I/O port space
+ * @filp: open sysfs file
* @kobj: kobject corresponding to file to read from
* @bin_attr: struct bin_attribute for this file
* @buf: buffer to store results
@@ -529,7 +535,8 @@ write_vpd_attr(struct kobject *kobj, struct bin_attribute *bin_attr,
* callback routine (pci_legacy_read).
*/
static ssize_t
-pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_read_legacy_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_bus *bus = to_pci_bus(container_of(kobj,
@@ -545,6 +552,7 @@ pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
/**
* pci_write_legacy_io - write byte(s) to legacy I/O port space
+ * @filp: open sysfs file
* @kobj: kobject corresponding to file to read from
* @bin_attr: struct bin_attribute for this file
* @buf: buffer containing value to be written
@@ -555,7 +563,8 @@ pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
* callback routine (pci_legacy_write).
*/
static ssize_t
-pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_write_legacy_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_bus *bus = to_pci_bus(container_of(kobj,
@@ -570,6 +579,7 @@ pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
/**
* pci_mmap_legacy_mem - map legacy PCI memory into user memory space
+ * @filp: open sysfs file
* @kobj: kobject corresponding to device to be mapped
* @attr: struct bin_attribute for this file
* @vma: struct vm_area_struct passed to mmap
@@ -579,7 +589,8 @@ pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr,
* memory space.
*/
static int
-pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr,
+pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
struct vm_area_struct *vma)
{
struct pci_bus *bus = to_pci_bus(container_of(kobj,
@@ -591,6 +602,7 @@ pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr,
/**
* pci_mmap_legacy_io - map legacy PCI IO into user memory space
+ * @filp: open sysfs file
* @kobj: kobject corresponding to device to be mapped
* @attr: struct bin_attribute for this file
* @vma: struct vm_area_struct passed to mmap
@@ -600,7 +612,8 @@ pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr,
* memory space. Returns -ENOSYS if the operation isn't supported
*/
static int
-pci_mmap_legacy_io(struct kobject *kobj, struct bin_attribute *attr,
+pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
struct vm_area_struct *vma)
{
struct pci_bus *bus = to_pci_bus(container_of(kobj,
@@ -750,14 +763,16 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
}
static int
-pci_mmap_resource_uc(struct kobject *kobj, struct bin_attribute *attr,
+pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 0);
}
static int
-pci_mmap_resource_wc(struct kobject *kobj, struct bin_attribute *attr,
+pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 1);
@@ -861,6 +876,7 @@ void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
/**
* pci_write_rom - used to enable access to the PCI ROM display
+ * @filp: sysfs file
* @kobj: kernel object handle
* @bin_attr: struct bin_attribute for this file
* @buf: user input
@@ -870,7 +886,8 @@ void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
* writing anything except 0 enables it
*/
static ssize_t
-pci_write_rom(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_write_rom(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj));
@@ -885,6 +902,7 @@ pci_write_rom(struct kobject *kobj, struct bin_attribute *bin_attr,
/**
* pci_read_rom - read a PCI ROM
+ * @filp: sysfs file
* @kobj: kernel object handle
* @bin_attr: struct bin_attribute for this file
* @buf: where to put the data we read from the ROM
@@ -895,7 +913,8 @@ pci_write_rom(struct kobject *kobj, struct bin_attribute *bin_attr,
* device corresponding to @kobj.
*/
static ssize_t
-pci_read_rom(struct kobject *kobj, struct bin_attribute *bin_attr,
+pci_read_rom(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj));
@@ -960,7 +979,12 @@ static ssize_t reset_store(struct device *dev,
if (val != 1)
return -EINVAL;
- return pci_reset_function(pdev);
+
+ result = pci_reset_function(pdev);
+ if (result < 0)
+ return result;
+
+ return count;
}
static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store);
@@ -1011,6 +1035,39 @@ error:
return retval;
}
+static void pci_remove_slot_links(struct pci_dev *dev)
+{
+ char func[10];
+ struct pci_slot *slot;
+
+ sysfs_remove_link(&dev->dev.kobj, "slot");
+ list_for_each_entry(slot, &dev->bus->slots, list) {
+ if (slot->number != PCI_SLOT(dev->devfn))
+ continue;
+ snprintf(func, 10, "function%d", PCI_FUNC(dev->devfn));
+ sysfs_remove_link(&slot->kobj, func);
+ }
+}
+
+static int pci_create_slot_links(struct pci_dev *dev)
+{
+ int result = 0;
+ char func[10];
+ struct pci_slot *slot;
+
+ list_for_each_entry(slot, &dev->bus->slots, list) {
+ if (slot->number != PCI_SLOT(dev->devfn))
+ continue;
+ result = sysfs_create_link(&dev->dev.kobj, &slot->kobj, "slot");
+ if (result)
+ goto out;
+ snprintf(func, 10, "function%d", PCI_FUNC(dev->devfn));
+ result = sysfs_create_link(&slot->kobj, &dev->dev.kobj, func);
+ }
+out:
+ return result;
+}
+
int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
{
int retval;
@@ -1073,6 +1130,8 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
if (retval)
goto err_vga_file;
+ pci_create_slot_links(pdev);
+
return 0;
err_vga_file:
@@ -1122,6 +1181,8 @@ void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
if (!sysfs_initialized)
return;
+ pci_remove_slot_links(pdev);
+
pci_remove_capabilities_sysfs(pdev);
if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 5ea587e59e48..60f30e7f1c8c 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -679,7 +679,7 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
*/
int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
{
- return state > PCI_D0 ?
+ return state >= PCI_D0 ?
pci_platform_power_transition(dev, state) : -EINVAL;
}
EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
@@ -716,10 +716,6 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
*/
return 0;
- /* Check if we're already there */
- if (dev->current_state == state)
- return 0;
-
__pci_start_power_transition(dev, state);
/* This device is quirked not to be put into D3, so
@@ -1197,7 +1193,7 @@ void pci_disable_enabled_device(struct pci_dev *dev)
* anymore. This only involves disabling PCI bus-mastering, if active.
*
* Note we don't actually disable the device until all callers of
- * pci_device_enable() have called pci_device_disable().
+ * pci_enable_device() have called pci_disable_device().
*/
void
pci_disable_device(struct pci_dev *dev)
@@ -1507,7 +1503,7 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
* pci_back_from_sleep - turn PCI device on during system-wide transition into working state
* @dev: Device to handle.
*
- * Disable device's sytem wake-up capability and put it into D0.
+ * Disable device's system wake-up capability and put it into D0.
*/
int pci_back_from_sleep(struct pci_dev *dev)
{
@@ -1635,7 +1631,6 @@ void pci_pm_init(struct pci_dev *dev)
* let the user space enable it to wake up the system as needed.
*/
device_set_wakeup_capable(&dev->dev, true);
- device_set_wakeup_enable(&dev->dev, false);
/* Disable the PME# generation functionality */
pci_pme_active(dev, false);
} else {
@@ -1659,7 +1654,6 @@ void platform_pci_wakeup_init(struct pci_dev *dev)
return;
device_set_wakeup_capable(&dev->dev, true);
- device_set_wakeup_enable(&dev->dev, false);
platform_pci_sleep_wake(dev, false);
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 4eb10f48d270..f8077b3c8c8c 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -244,7 +244,7 @@ struct pci_ats {
int stu; /* Smallest Translation Unit */
int qdep; /* Invalidate Queue Depth */
int ref_cnt; /* Physical Function reference count */
- int is_enabled:1; /* Enable bit is set */
+ unsigned int is_enabled:1; /* Enable bit is set */
};
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index f8f425b8731d..909924692b8a 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -168,7 +168,7 @@ static u32 *find_pci_config_dword(struct aer_error *err, int where,
target = &err->root_status;
rw1cs = 1;
break;
- case PCI_ERR_ROOT_COR_SRC:
+ case PCI_ERR_ROOT_ERR_SRC:
target = &err->source_id;
break;
}
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index aa495ad9bbd4..484cc55194b8 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -72,13 +72,120 @@ void pci_no_aer(void)
pcie_aer_disable = 1; /* has priority over 'forceload' */
}
+static int set_device_error_reporting(struct pci_dev *dev, void *data)
+{
+ bool enable = *((bool *)data);
+
+ if ((dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) ||
+ (dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) ||
+ (dev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)) {
+ if (enable)
+ pci_enable_pcie_error_reporting(dev);
+ else
+ pci_disable_pcie_error_reporting(dev);
+ }
+
+ if (enable)
+ pcie_set_ecrc_checking(dev);
+
+ return 0;
+}
+
+/**
+ * set_downstream_devices_error_reporting - enable/disable the error reporting bits on the root port and its downstream ports.
+ * @dev: pointer to root port's pci_dev data structure
+ * @enable: true = enable error reporting, false = disable error reporting.
+ */
+static void set_downstream_devices_error_reporting(struct pci_dev *dev,
+ bool enable)
+{
+ set_device_error_reporting(dev, &enable);
+
+ if (!dev->subordinate)
+ return;
+ pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
+}
+
+/**
+ * aer_enable_rootport - enable Root Port's interrupts when receiving messages
+ * @rpc: pointer to a Root Port data structure
+ *
+ * Invoked when PCIe bus loads AER service driver.
+ */
+static void aer_enable_rootport(struct aer_rpc *rpc)
+{
+ struct pci_dev *pdev = rpc->rpd->port;
+ int pos, aer_pos;
+ u16 reg16;
+ u32 reg32;
+
+ pos = pci_pcie_cap(pdev);
+ /* Clear PCIe Capability's Device Status */
+ pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16);
+ pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16);
+
+ /* Disable system error generation in response to error messages */
+ pci_read_config_word(pdev, pos + PCI_EXP_RTCTL, &reg16);
+ reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK);
+ pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16);
+
+ aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ /* Clear error status */
+ pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32);
+ pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32);
+ pci_read_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, &reg32);
+ pci_write_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, reg32);
+ pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32);
+ pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32);
+
+ /*
+ * Enable error reporting for the root port device and downstream port
+ * devices.
+ */
+ set_downstream_devices_error_reporting(pdev, true);
+
+ /* Enable Root Port's interrupt in response to error messages */
+ pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, reg32);
+}
+
+/**
+ * aer_disable_rootport - disable Root Port's interrupts when receiving messages
+ * @rpc: pointer to a Root Port data structure
+ *
+ * Invoked when PCIe bus unloads AER service driver.
+ */
+static void aer_disable_rootport(struct aer_rpc *rpc)
+{
+ struct pci_dev *pdev = rpc->rpd->port;
+ u32 reg32;
+ int pos;
+
+ /*
+ * Disable error reporting for the root port device and downstream port
+ * devices.
+ */
+ set_downstream_devices_error_reporting(pdev, false);
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ /* Disable Root's interrupt in response to error messages */
+ pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, reg32);
+
+ /* Clear Root's error status reg */
+ pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, &reg32);
+ pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, reg32);
+}
+
/**
* aer_irq - Root Port's ISR
* @irq: IRQ assigned to Root Port
* @context: pointer to Root Port data structure
*
* Invoked when Root Port detects AER messages.
- **/
+ */
irqreturn_t aer_irq(int irq, void *context)
{
unsigned int status, id;
@@ -97,13 +204,13 @@ irqreturn_t aer_irq(int irq, void *context)
/* Read error status */
pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, &status);
- if (!(status & ROOT_ERR_STATUS_MASKS)) {
+ if (!(status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV))) {
spin_unlock_irqrestore(&rpc->e_lock, flags);
return IRQ_NONE;
}
/* Read error source and clear error status */
- pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_COR_SRC, &id);
+ pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_ERR_SRC, &id);
pci_write_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, status);
/* Store error source for later DPC handler */
@@ -135,7 +242,7 @@ EXPORT_SYMBOL_GPL(aer_irq);
* @dev: pointer to the pcie_dev data structure
*
* Invoked when Root Port's AER service is loaded.
- **/
+ */
static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
{
struct aer_rpc *rpc;
@@ -144,15 +251,11 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
if (!rpc)
return NULL;
- /*
- * Initialize Root lock access, e_lock, to Root Error Status Reg,
- * Root Error ID Reg, and Root error producer/consumer index.
- */
+ /* Initialize Root lock access, e_lock, to Root Error Status Reg */
spin_lock_init(&rpc->e_lock);
rpc->rpd = dev;
INIT_WORK(&rpc->dpc_handler, aer_isr);
- rpc->prod_idx = rpc->cons_idx = 0;
mutex_init(&rpc->rpc_mutex);
init_waitqueue_head(&rpc->wait_release);
@@ -167,7 +270,7 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
* @dev: pointer to the pcie_dev data structure
*
* Invoked when PCI Express bus unloads or AER probe fails.
- **/
+ */
static void aer_remove(struct pcie_device *dev)
{
struct aer_rpc *rpc = get_service_data(dev);
@@ -179,7 +282,8 @@ static void aer_remove(struct pcie_device *dev)
wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
- aer_delete_rootport(rpc);
+ aer_disable_rootport(rpc);
+ kfree(rpc);
set_service_data(dev, NULL);
}
}
@@ -190,7 +294,7 @@ static void aer_remove(struct pcie_device *dev)
* @id: pointer to the service id data structure
*
* Invoked when PCI Express bus loads AER service driver.
- **/
+ */
static int __devinit aer_probe(struct pcie_device *dev)
{
int status;
@@ -230,41 +334,30 @@ static int __devinit aer_probe(struct pcie_device *dev)
* @dev: pointer to Root Port's pci_dev data structure
*
* Invoked by Port Bus driver when performing link reset at Root Port.
- **/
+ */
static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
{
- u16 p2p_ctrl;
- u32 status;
+ u32 reg32;
int pos;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
/* Disable Root's interrupt in response to error messages */
- pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, 0);
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
- /* Assert Secondary Bus Reset */
- pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl);
- p2p_ctrl |= PCI_CB_BRIDGE_CTL_CB_RESET;
- pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
-
- /* De-assert Secondary Bus Reset */
- p2p_ctrl &= ~PCI_CB_BRIDGE_CTL_CB_RESET;
- pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
-
- /*
- * System software must wait for at least 100ms from the end
- * of a reset of one or more device before it is permitted
- * to issue Configuration Requests to those devices.
- */
- msleep(200);
+ aer_do_secondary_bus_reset(dev);
dev_printk(KERN_DEBUG, &dev->dev, "Root Port link has been reset\n");
+ /* Clear Root Error Status */
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, reg32);
+
/* Enable Root Port's interrupt in response to error messages */
- pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
- pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status);
- pci_write_config_dword(dev,
- pos + PCI_ERR_ROOT_COMMAND,
- ROOT_PORT_INTR_ON_MESG_MASK);
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
return PCI_ERS_RESULT_RECOVERED;
}
@@ -275,7 +368,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
* @error: error severity being notified by port bus
*
* Invoked by Port Bus driver during error recovery.
- **/
+ */
static pci_ers_result_t aer_error_detected(struct pci_dev *dev,
enum pci_channel_state error)
{
@@ -288,7 +381,7 @@ static pci_ers_result_t aer_error_detected(struct pci_dev *dev,
* @dev: pointer to Root Port's pci_dev data structure
*
* Invoked by Port Bus driver during nonfatal recovery.
- **/
+ */
static void aer_error_resume(struct pci_dev *dev)
{
int pos;
@@ -315,7 +408,7 @@ static void aer_error_resume(struct pci_dev *dev)
* aer_service_init - register AER root service driver
*
* Invoked when AER root service driver is loaded.
- **/
+ */
static int __init aer_service_init(void)
{
if (pcie_aer_disable)
@@ -329,7 +422,7 @@ static int __init aer_service_init(void)
* aer_service_exit - unregister AER root service driver
*
* Invoked when AER root service driver is unloaded.
- **/
+ */
static void __exit aer_service_exit(void)
{
pcie_port_service_unregister(&aerdriver);
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index bd833ea3ba49..7aaae2d2bd67 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -17,9 +17,6 @@
#define AER_FATAL 1
#define AER_CORRECTABLE 2
-/* Root Error Status Register Bits */
-#define ROOT_ERR_STATUS_MASKS 0x0f
-
#define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \
PCI_EXP_RTCTL_SENFEE| \
PCI_EXP_RTCTL_SEFEE)
@@ -117,8 +114,7 @@ static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
}
extern struct bus_type pcie_port_bus_type;
-extern void aer_enable_rootport(struct aer_rpc *rpc);
-extern void aer_delete_rootport(struct aer_rpc *rpc);
+extern void aer_do_secondary_bus_reset(struct pci_dev *dev);
extern int aer_init(struct pcie_device *dev);
extern void aer_isr(struct work_struct *work);
extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index aceb04b67b60..df2d686fe3dd 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -47,13 +47,12 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev)
if (!pos)
return -EIO;
- pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, &reg16);
- reg16 = reg16 |
- PCI_EXP_DEVCTL_CERE |
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16);
+ reg16 |= (PCI_EXP_DEVCTL_CERE |
PCI_EXP_DEVCTL_NFERE |
PCI_EXP_DEVCTL_FERE |
- PCI_EXP_DEVCTL_URRE;
- pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, reg16);
+ PCI_EXP_DEVCTL_URRE);
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16);
return 0;
}
@@ -71,12 +70,12 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev)
if (!pos)
return -EIO;
- pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, &reg16);
- reg16 = reg16 & ~(PCI_EXP_DEVCTL_CERE |
- PCI_EXP_DEVCTL_NFERE |
- PCI_EXP_DEVCTL_FERE |
- PCI_EXP_DEVCTL_URRE);
- pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, reg16);
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16);
+ reg16 &= ~(PCI_EXP_DEVCTL_CERE |
+ PCI_EXP_DEVCTL_NFERE |
+ PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_URRE);
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16);
return 0;
}
@@ -99,99 +98,46 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
-static int set_device_error_reporting(struct pci_dev *dev, void *data)
-{
- bool enable = *((bool *)data);
-
- if ((dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) ||
- (dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) ||
- (dev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)) {
- if (enable)
- pci_enable_pcie_error_reporting(dev);
- else
- pci_disable_pcie_error_reporting(dev);
- }
-
- if (enable)
- pcie_set_ecrc_checking(dev);
-
- return 0;
-}
-
/**
- * set_downstream_devices_error_reporting - enable/disable the error reporting bits on the root port and its downstream ports.
- * @dev: pointer to root port's pci_dev data structure
- * @enable: true = enable error reporting, false = disable error reporting.
+ * add_error_device - list device to be handled
+ * @e_info: pointer to error info
+ * @dev: pointer to pci_dev to be added
*/
-static void set_downstream_devices_error_reporting(struct pci_dev *dev,
- bool enable)
-{
- set_device_error_reporting(dev, &enable);
-
- if (!dev->subordinate)
- return;
- pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
-}
-
-static inline int compare_device_id(struct pci_dev *dev,
- struct aer_err_info *e_info)
-{
- if (e_info->id == ((dev->bus->number << 8) | dev->devfn)) {
- /*
- * Device ID match
- */
- return 1;
- }
-
- return 0;
-}
-
static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
{
if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
e_info->dev[e_info->error_dev_num] = dev;
e_info->error_dev_num++;
- return 1;
+ return 0;
}
-
- return 0;
+ return -ENOSPC;
}
-
#define PCI_BUS(x) (((x) >> 8) & 0xff)
-static int find_device_iter(struct pci_dev *dev, void *data)
+/**
+ * is_error_source - check whether the device is source of reported error
+ * @dev: pointer to pci_dev to be checked
+ * @e_info: pointer to reported error info
+ */
+static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
{
int pos;
- u32 status;
- u32 mask;
+ u32 status, mask;
u16 reg16;
- int result;
- struct aer_err_info *e_info = (struct aer_err_info *)data;
/*
* When bus id is equal to 0, it might be a bad id
* reported by root port.
*/
if (!nosourceid && (PCI_BUS(e_info->id) != 0)) {
- result = compare_device_id(dev, e_info);
- if (result)
- add_error_device(e_info, dev);
+ /* Device ID match? */
+ if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
+ return true;
- /*
- * If there is no multiple error, we stop
- * or continue based on the id comparing.
- */
+ /* Continue id comparing if there is no multiple error */
if (!e_info->multi_error_valid)
- return result;
-
- /*
- * If there are multiple errors and id does match,
- * We need continue to search other devices under
- * the root port. Return 0 means that.
- */
- if (result)
- return 0;
+ return false;
}
/*
@@ -200,71 +146,94 @@ static int find_device_iter(struct pci_dev *dev, void *data)
* 2) bus id is equal to 0. Some ports might lose the bus
* id of error source id;
* 3) There are multiple errors and prior id comparing fails;
- * We check AER status registers to find the initial reporter.
+ * We check AER status registers to find possible reporter.
*/
if (atomic_read(&dev->enable_cnt) == 0)
- return 0;
+ return false;
pos = pci_pcie_cap(dev);
if (!pos)
- return 0;
+ return false;
+
/* Check if AER is enabled */
- pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, &reg16);
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16);
if (!(reg16 & (
PCI_EXP_DEVCTL_CERE |
PCI_EXP_DEVCTL_NFERE |
PCI_EXP_DEVCTL_FERE |
PCI_EXP_DEVCTL_URRE)))
- return 0;
+ return false;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (!pos)
- return 0;
+ return false;
- status = 0;
- mask = 0;
+ /* Check if error is recorded */
if (e_info->severity == AER_CORRECTABLE) {
pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask);
- if (status & ~mask) {
- add_error_device(e_info, dev);
- goto added;
- }
} else {
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
- if (status & ~mask) {
- add_error_device(e_info, dev);
- goto added;
- }
}
+ if (status & ~mask)
+ return true;
- return 0;
+ return false;
+}
-added:
- if (e_info->multi_error_valid)
- return 0;
- else
- return 1;
+static int find_device_iter(struct pci_dev *dev, void *data)
+{
+ struct aer_err_info *e_info = (struct aer_err_info *)data;
+
+ if (is_error_source(dev, e_info)) {
+ /* List this device */
+ if (add_error_device(e_info, dev)) {
+ /* We cannot handle more... Stop iteration */
+ /* TODO: Should print error message here? */
+ return 1;
+ }
+
+ /* If there is only a single error, stop iteration */
+ if (!e_info->multi_error_valid)
+ return 1;
+ }
+ return 0;
}
/**
* find_source_device - search through device hierarchy for source device
* @parent: pointer to Root Port pci_dev data structure
- * @err_info: including detailed error information such like id
+ * @e_info: including detailed error information such like id
*
- * Invoked when error is detected at the Root Port.
+ * Return true if found.
+ *
+ * Invoked by DPC when error is detected at the Root Port.
+ * Caller of this function must set id, severity, and multi_error_valid of
+ * struct aer_err_info pointed by @e_info properly. This function must fill
+ * e_info->error_dev_num and e_info->dev[], based on the given information.
*/
-static void find_source_device(struct pci_dev *parent,
+static bool find_source_device(struct pci_dev *parent,
struct aer_err_info *e_info)
{
struct pci_dev *dev = parent;
int result;
+ /* Must reset in this function */
+ e_info->error_dev_num = 0;
+
/* Is Root Port an agent that sends error message? */
result = find_device_iter(dev, e_info);
if (result)
- return;
+ return true;
pci_walk_bus(parent->subordinate, find_device_iter, e_info);
+
+ if (!e_info->error_dev_num) {
+ dev_printk(KERN_DEBUG, &parent->dev,
+ "can't find device of ID%04x\n",
+ e_info->id);
+ return false;
+ }
+ return true;
}
static int report_error_detected(struct pci_dev *dev, void *data)
@@ -403,43 +372,77 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
return result_data.result;
}
-struct find_aer_service_data {
- struct pcie_port_service_driver *aer_driver;
- int is_downstream;
-};
-
-static int find_aer_service_iter(struct device *device, void *data)
+/**
+ * aer_do_secondary_bus_reset - perform secondary bus reset
+ * @dev: pointer to bridge's pci_dev data structure
+ *
+ * Invoked when performing link reset at Root Port or Downstream Port.
+ */
+void aer_do_secondary_bus_reset(struct pci_dev *dev)
{
- struct device_driver *driver;
- struct pcie_port_service_driver *service_driver;
- struct find_aer_service_data *result;
+ u16 p2p_ctrl;
+
+ /* Assert Secondary Bus Reset */
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl);
+ p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
+
+ /*
+ * we should send hot reset message for 2ms to allow it time to
+ * propagate to all downstream ports
+ */
+ msleep(2);
+
+ /* De-assert Secondary Bus Reset */
+ p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
+
+ /*
+ * System software must wait for at least 100ms from the end
+ * of a reset of one or more device before it is permitted
+ * to issue Configuration Requests to those devices.
+ */
+ msleep(200);
+}
- result = (struct find_aer_service_data *) data;
+/**
+ * default_downstream_reset_link - default reset function for Downstream Port
+ * @dev: pointer to downstream port's pci_dev data structure
+ *
+ * Invoked when performing link reset at Downstream Port w/ no aer driver.
+ */
+static pci_ers_result_t default_downstream_reset_link(struct pci_dev *dev)
+{
+ aer_do_secondary_bus_reset(dev);
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "Downstream Port link has been reset\n");
+ return PCI_ERS_RESULT_RECOVERED;
+}
- if (device->bus == &pcie_port_bus_type) {
- struct pcie_device *pcie = to_pcie_device(device);
+static int find_aer_service_iter(struct device *device, void *data)
+{
+ struct pcie_port_service_driver *service_driver, **drv;
- if (pcie->port->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
- result->is_downstream = 1;
+ drv = (struct pcie_port_service_driver **) data;
- driver = device->driver;
- if (driver) {
- service_driver = to_service_driver(driver);
- if (service_driver->service == PCIE_PORT_SERVICE_AER) {
- result->aer_driver = service_driver;
- return 1;
- }
+ if (device->bus == &pcie_port_bus_type && device->driver) {
+ service_driver = to_service_driver(device->driver);
+ if (service_driver->service == PCIE_PORT_SERVICE_AER) {
+ *drv = service_driver;
+ return 1;
}
}
return 0;
}
-static void find_aer_service(struct pci_dev *dev,
- struct find_aer_service_data *data)
+static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev)
{
- int retval;
- retval = device_for_each_child(&dev->dev, data, find_aer_service_iter);
+ struct pcie_port_service_driver *drv = NULL;
+
+ device_for_each_child(&dev->dev, &drv, find_aer_service_iter);
+
+ return drv;
}
static pci_ers_result_t reset_link(struct pcie_device *aerdev,
@@ -447,38 +450,34 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev,
{
struct pci_dev *udev;
pci_ers_result_t status;
- struct find_aer_service_data data;
+ struct pcie_port_service_driver *driver;
- if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)
+ if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
+ /* Reset this port for all subordinates */
udev = dev;
- else
+ } else {
+ /* Reset the upstream component (likely downstream port) */
udev = dev->bus->self;
+ }
- data.is_downstream = 0;
- data.aer_driver = NULL;
- find_aer_service(udev, &data);
+ /* Use the aer driver of the component firstly */
+ driver = find_aer_service(udev);
- /*
- * Use the aer driver of the error agent firstly.
- * If it hasn't the aer driver, use the root port's
- */
- if (!data.aer_driver || !data.aer_driver->reset_link) {
- if (data.is_downstream &&
- aerdev->device.driver &&
- to_service_driver(aerdev->device.driver)->reset_link) {
- data.aer_driver =
- to_service_driver(aerdev->device.driver);
- } else {
- dev_printk(KERN_DEBUG, &dev->dev, "no link-reset "
- "support\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
+ if (driver && driver->reset_link) {
+ status = driver->reset_link(udev);
+ } else if (udev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) {
+ status = default_downstream_reset_link(udev);
+ } else {
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "no link-reset support at upstream device %s\n",
+ pci_name(udev));
+ return PCI_ERS_RESULT_DISCONNECT;
}
- status = data.aer_driver->reset_link(udev);
if (status != PCI_ERS_RESULT_RECOVERED) {
- dev_printk(KERN_DEBUG, &dev->dev, "link reset at upstream "
- "device %s failed\n", pci_name(udev));
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "link reset at upstream device %s failed\n",
+ pci_name(udev));
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -495,8 +494,7 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev,
* error detected message to all downstream drivers within a hierarchy in
* question and return the returned code.
*/
-static pci_ers_result_t do_recovery(struct pcie_device *aerdev,
- struct pci_dev *dev,
+static void do_recovery(struct pcie_device *aerdev, struct pci_dev *dev,
int severity)
{
pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED;
@@ -514,10 +512,8 @@ static pci_ers_result_t do_recovery(struct pcie_device *aerdev,
if (severity == AER_FATAL) {
result = reset_link(aerdev, dev);
- if (result != PCI_ERS_RESULT_RECOVERED) {
- /* TODO: Should panic here? */
- return result;
- }
+ if (result != PCI_ERS_RESULT_RECOVERED)
+ goto failed;
}
if (status == PCI_ERS_RESULT_CAN_RECOVER)
@@ -538,13 +534,22 @@ static pci_ers_result_t do_recovery(struct pcie_device *aerdev,
report_slot_reset);
}
- if (status == PCI_ERS_RESULT_RECOVERED)
- broadcast_error_message(dev,
+ if (status != PCI_ERS_RESULT_RECOVERED)
+ goto failed;
+
+ broadcast_error_message(dev,
state,
"resume",
report_resume);
- return status;
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "AER driver successfully recovered\n");
+ return;
+
+failed:
+ /* TODO: Should kernel panic here? */
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "AER driver didn't recover\n");
}
/**
@@ -559,7 +564,6 @@ static void handle_error_source(struct pcie_device *aerdev,
struct pci_dev *dev,
struct aer_err_info *info)
{
- pci_ers_result_t status = 0;
int pos;
if (info->severity == AER_CORRECTABLE) {
@@ -571,114 +575,8 @@ static void handle_error_source(struct pcie_device *aerdev,
if (pos)
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
info->status);
- } else {
- status = do_recovery(aerdev, dev, info->severity);
- if (status == PCI_ERS_RESULT_RECOVERED) {
- dev_printk(KERN_DEBUG, &dev->dev, "AER driver "
- "successfully recovered\n");
- } else {
- /* TODO: Should kernel panic here? */
- dev_printk(KERN_DEBUG, &dev->dev, "AER driver didn't "
- "recover\n");
- }
- }
-}
-
-/**
- * aer_enable_rootport - enable Root Port's interrupts when receiving messages
- * @rpc: pointer to a Root Port data structure
- *
- * Invoked when PCIe bus loads AER service driver.
- */
-void aer_enable_rootport(struct aer_rpc *rpc)
-{
- struct pci_dev *pdev = rpc->rpd->port;
- int pos, aer_pos;
- u16 reg16;
- u32 reg32;
-
- pos = pci_pcie_cap(pdev);
- /* Clear PCIe Capability's Device Status */
- pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16);
- pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16);
-
- /* Disable system error generation in response to error messages */
- pci_read_config_word(pdev, pos + PCI_EXP_RTCTL, &reg16);
- reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK);
- pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16);
-
- aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
- /* Clear error status */
- pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32);
- pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32);
- pci_read_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, &reg32);
- pci_write_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, reg32);
- pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32);
- pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32);
-
- /*
- * Enable error reporting for the root port device and downstream port
- * devices.
- */
- set_downstream_devices_error_reporting(pdev, true);
-
- /* Enable Root Port's interrupt in response to error messages */
- pci_write_config_dword(pdev,
- aer_pos + PCI_ERR_ROOT_COMMAND,
- ROOT_PORT_INTR_ON_MESG_MASK);
-}
-
-/**
- * disable_root_aer - disable Root Port's interrupts when receiving messages
- * @rpc: pointer to a Root Port data structure
- *
- * Invoked when PCIe bus unloads AER service driver.
- */
-static void disable_root_aer(struct aer_rpc *rpc)
-{
- struct pci_dev *pdev = rpc->rpd->port;
- u32 reg32;
- int pos;
-
- /*
- * Disable error reporting for the root port device and downstream port
- * devices.
- */
- set_downstream_devices_error_reporting(pdev, false);
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
- /* Disable Root's interrupt in response to error messages */
- pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0);
-
- /* Clear Root's error status reg */
- pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, &reg32);
- pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, reg32);
-}
-
-/**
- * get_e_source - retrieve an error source
- * @rpc: pointer to the root port which holds an error
- *
- * Invoked by DPC handler to consume an error.
- */
-static struct aer_err_source *get_e_source(struct aer_rpc *rpc)
-{
- struct aer_err_source *e_source;
- unsigned long flags;
-
- /* Lock access to Root error producer/consumer index */
- spin_lock_irqsave(&rpc->e_lock, flags);
- if (rpc->prod_idx == rpc->cons_idx) {
- spin_unlock_irqrestore(&rpc->e_lock, flags);
- return NULL;
- }
- e_source = &rpc->e_sources[rpc->cons_idx];
- rpc->cons_idx++;
- if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
- rpc->cons_idx = 0;
- spin_unlock_irqrestore(&rpc->e_lock, flags);
-
- return e_source;
+ } else
+ do_recovery(aerdev, dev, info->severity);
}
/**
@@ -687,11 +585,14 @@ static struct aer_err_source *get_e_source(struct aer_rpc *rpc)
* @info: pointer to structure to store the error record
*
* Return 1 on success, 0 on error.
+ *
+ * Note that @info is reused among all error devices. Clear fields properly.
*/
static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
{
int pos, temp;
+ /* Must reset in this function */
info->status = 0;
info->tlp_header_valid = 0;
@@ -744,12 +645,6 @@ static inline void aer_process_err_devices(struct pcie_device *p_device,
{
int i;
- if (!e_info->dev[0]) {
- dev_printk(KERN_DEBUG, &p_device->port->dev,
- "can't find device of ID%04x\n",
- e_info->id);
- }
-
/* Report all before handle them, not to lost records by reset etc. */
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
if (get_device_error_info(e_info->dev[i], e_info))
@@ -770,11 +665,10 @@ static void aer_isr_one_error(struct pcie_device *p_device,
struct aer_err_source *e_src)
{
struct aer_err_info *e_info;
- int i;
/* struct aer_err_info might be big, so we allocate it with slab */
e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL);
- if (e_info == NULL) {
+ if (!e_info) {
dev_printk(KERN_DEBUG, &p_device->port->dev,
"Can't allocate mem when processing AER errors\n");
return;
@@ -784,37 +678,72 @@ static void aer_isr_one_error(struct pcie_device *p_device,
* There is a possibility that both correctable error and
* uncorrectable error being logged. Report correctable error first.
*/
- for (i = 1; i & ROOT_ERR_STATUS_MASKS ; i <<= 2) {
- if (i > 4)
- break;
- if (!(e_src->status & i))
- continue;
-
- memset(e_info, 0, sizeof(struct aer_err_info));
-
- /* Init comprehensive error information */
- if (i & PCI_ERR_ROOT_COR_RCV) {
- e_info->id = ERR_COR_ID(e_src->id);
- e_info->severity = AER_CORRECTABLE;
- } else {
- e_info->id = ERR_UNCOR_ID(e_src->id);
- e_info->severity = ((e_src->status >> 6) & 1);
- }
- if (e_src->status &
- (PCI_ERR_ROOT_MULTI_COR_RCV |
- PCI_ERR_ROOT_MULTI_UNCOR_RCV))
+ if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
+ e_info->id = ERR_COR_ID(e_src->id);
+ e_info->severity = AER_CORRECTABLE;
+
+ if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
e_info->multi_error_valid = 1;
+ else
+ e_info->multi_error_valid = 0;
+
+ aer_print_port_info(p_device->port, e_info);
+
+ if (find_source_device(p_device->port, e_info))
+ aer_process_err_devices(p_device, e_info);
+ }
+
+ if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
+ e_info->id = ERR_UNCOR_ID(e_src->id);
+
+ if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
+ e_info->severity = AER_FATAL;
+ else
+ e_info->severity = AER_NONFATAL;
+
+ if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
+ e_info->multi_error_valid = 1;
+ else
+ e_info->multi_error_valid = 0;
aer_print_port_info(p_device->port, e_info);
- find_source_device(p_device->port, e_info);
- aer_process_err_devices(p_device, e_info);
+ if (find_source_device(p_device->port, e_info))
+ aer_process_err_devices(p_device, e_info);
}
kfree(e_info);
}
/**
+ * get_e_source - retrieve an error source
+ * @rpc: pointer to the root port which holds an error
+ * @e_src: pointer to store retrieved error source
+ *
+ * Return 1 if an error source is retrieved, otherwise 0.
+ *
+ * Invoked by DPC handler to consume an error.
+ */
+static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ /* Lock access to Root error producer/consumer index */
+ spin_lock_irqsave(&rpc->e_lock, flags);
+ if (rpc->prod_idx != rpc->cons_idx) {
+ *e_src = rpc->e_sources[rpc->cons_idx];
+ rpc->cons_idx++;
+ if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
+ rpc->cons_idx = 0;
+ ret = 1;
+ }
+ spin_unlock_irqrestore(&rpc->e_lock, flags);
+
+ return ret;
+}
+
+/**
* aer_isr - consume errors detected by root port
* @work: definition of this work item
*
@@ -824,34 +753,17 @@ void aer_isr(struct work_struct *work)
{
struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
struct pcie_device *p_device = rpc->rpd;
- struct aer_err_source *e_src;
+ struct aer_err_source e_src;
mutex_lock(&rpc->rpc_mutex);
- e_src = get_e_source(rpc);
- while (e_src) {
- aer_isr_one_error(p_device, e_src);
- e_src = get_e_source(rpc);
- }
+ while (get_e_source(rpc, &e_src))
+ aer_isr_one_error(p_device, &e_src);
mutex_unlock(&rpc->rpc_mutex);
wake_up(&rpc->wait_release);
}
/**
- * aer_delete_rootport - disable root port aer and delete service data
- * @rpc: pointer to a root port device being deleted
- *
- * Invoked when AER service unloaded on a specific Root Port
- */
-void aer_delete_rootport(struct aer_rpc *rpc)
-{
- /* Disable root port AER itself */
- disable_root_aer(rpc);
-
- kfree(rpc);
-}
-
-/**
* aer_init - provide AER initialization
* @dev: pointer to AER pcie device
*
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 882bd8d29fe3..c82548afcd5c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -174,19 +174,14 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
pci_read_config_dword(dev, pos, &sz);
pci_write_config_dword(dev, pos, l);
- if (!sz)
- goto fail; /* BAR not implemented */
-
/*
* All bits set in sz means the device isn't working properly.
- * If it's a memory BAR or a ROM, bit 0 must be clear; if it's
- * an io BAR, bit 1 must be clear.
+ * If the BAR isn't implemented, all bits must be 0. If it's a
+ * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
+ * 1 must be clear.
*/
- if (sz == 0xffffffff) {
- dev_err(&dev->dev, "reg %x: invalid size %#x; broken device?\n",
- pos, sz);
+ if (!sz || sz == 0xffffffff)
goto fail;
- }
/*
* I don't know how l can have all bits set. Copied from old code.
@@ -249,17 +244,13 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
pos, res);
}
} else {
- u32 size = pci_size(l, sz, mask);
+ sz = pci_size(l, sz, mask);
- if (!size) {
- dev_err(&dev->dev, "reg %x: invalid size "
- "(l %#x sz %#x mask %#x); broken device?",
- pos, l, sz, mask);
+ if (!sz)
goto fail;
- }
res->start = l;
- res->end = l + size;
+ res->end = l + sz;
dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 27c0e6eb7136..b7512cf08c58 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2127,6 +2127,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9602, quirk_disable_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASUSTEK, 0x9602, quirk_disable_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AI, 0x9602, quirk_disable_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
/* Go through the list of Hypertransport capabilities and
* return 1 if a HT MSI capability is found and enabled */
@@ -2218,15 +2219,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
ht_enable_msi_mapping);
-/* The P5N32-SLI Premium motherboard from Asus has a problem with msi
+/* The P5N32-SLI motherboards from Asus have a problem with msi
* for the MCP55 NIC. It is not yet determined whether the msi problem
* also affects other devices. As for now, turn off msi for this device.
*/
static void __devinit nvenet_msi_disable(struct pci_dev *dev)
{
- if (dmi_name_in_vendors("P5N32-SLI PREMIUM")) {
+ if (dmi_name_in_vendors("P5N32-SLI PREMIUM") ||
+ dmi_name_in_vendors("P5N32-E SLI")) {
dev_info(&dev->dev,
- "Disabling msi for MCP55 NIC on P5N32-SLI Premium\n");
+ "Disabling msi for MCP55 NIC on P5N32-SLI\n");
dev->no_msi = 1;
}
}
@@ -2552,6 +2554,19 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1518, quirk_i82576_sriov);
#endif /* CONFIG_PCI_IOV */
+/* Allow manual resource allocation for PCI hotplug bridges
+ * via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For
+ * some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6),
+ * kernel fails to allocate resources when hotplug device is
+ * inserted and PCI bus is rescanned.
+ */
+static void __devinit quirk_hotplug_bridge(struct pci_dev *dev)
+{
+ dev->is_hotplug_bridge = 1;
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
+
/*
* This is a quirk for the Ricoh MMC controller found as a part of
* some mulifunction chips.
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 4fe36d2e1049..19b111383f62 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -838,65 +838,11 @@ static void pci_bus_dump_resources(struct pci_bus *bus)
}
}
-static int __init pci_bus_get_depth(struct pci_bus *bus)
-{
- int depth = 0;
- struct pci_dev *dev;
-
- list_for_each_entry(dev, &bus->devices, bus_list) {
- int ret;
- struct pci_bus *b = dev->subordinate;
- if (!b)
- continue;
-
- ret = pci_bus_get_depth(b);
- if (ret + 1 > depth)
- depth = ret + 1;
- }
-
- return depth;
-}
-static int __init pci_get_max_depth(void)
-{
- int depth = 0;
- struct pci_bus *bus;
-
- list_for_each_entry(bus, &pci_root_buses, node) {
- int ret;
-
- ret = pci_bus_get_depth(bus);
- if (ret > depth)
- depth = ret;
- }
-
- return depth;
-}
-
-/*
- * first try will not touch pci bridge res
- * second and later try will clear small leaf bridge res
- * will stop till to the max deepth if can not find good one
- */
void __init
pci_assign_unassigned_resources(void)
{
struct pci_bus *bus;
- int tried_times = 0;
- enum release_type rel_type = leaf_only;
- struct resource_list_x head, *list;
- unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
- IORESOURCE_PREFETCH;
- unsigned long failed_type;
- int max_depth = pci_get_max_depth();
- int pci_try_num;
- head.next = NULL;
-
- pci_try_num = max_depth + 1;
- printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n",
- max_depth, pci_try_num);
-
-again:
/* Depth first, calculate sizes and alignments of all
subordinate buses. */
list_for_each_entry(bus, &pci_root_buses, node) {
@@ -904,65 +850,9 @@ again:
}
/* Depth last, allocate resources and update the hardware. */
list_for_each_entry(bus, &pci_root_buses, node) {
- __pci_bus_assign_resources(bus, &head);
- }
- tried_times++;
-
- /* any device complain? */
- if (!head.next)
- goto enable_and_dump;
- failed_type = 0;
- for (list = head.next; list;) {
- failed_type |= list->flags;
- list = list->next;
- }
- /*
- * io port are tight, don't try extra
- * or if reach the limit, don't want to try more
- */
- failed_type &= type_mask;
- if ((failed_type == IORESOURCE_IO) || (tried_times >= pci_try_num)) {
- free_failed_list(&head);
- goto enable_and_dump;
- }
-
- printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
- tried_times + 1);
-
- /* third times and later will not check if it is leaf */
- if ((tried_times + 1) > 2)
- rel_type = whole_subtree;
-
- /*
- * Try to release leaf bridge's resources that doesn't fit resource of
- * child device under that bridge
- */
- for (list = head.next; list;) {
- bus = list->dev->bus;
- pci_bus_release_bridge_resources(bus, list->flags & type_mask,
- rel_type);
- list = list->next;
- }
- /* restore size and flags */
- for (list = head.next; list;) {
- struct resource *res = list->res;
-
- res->start = list->start;
- res->end = list->end;
- res->flags = list->flags;
- if (list->dev->subordinate)
- res->flags = 0;
-
- list = list->next;
- }
- free_failed_list(&head);
-
- goto again;
-
-enable_and_dump:
- /* Depth last, update the hardware. */
- list_for_each_entry(bus, &pci_root_buses, node)
+ pci_bus_assign_resources(bus);
pci_enable_bridges(bus);
+ }
/* dump the resource on buses */
list_for_each_entry(bus, &pci_root_buses, node) {
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 659eaa0fc48f..e0189cf7c558 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -97,6 +97,50 @@ static ssize_t cur_speed_read_file(struct pci_slot *slot, char *buf)
return bus_speed_read(slot->bus->cur_bus_speed, buf);
}
+static void remove_sysfs_files(struct pci_slot *slot)
+{
+ char func[10];
+ struct list_head *tmp;
+
+ list_for_each(tmp, &slot->bus->devices) {
+ struct pci_dev *dev = pci_dev_b(tmp);
+ if (PCI_SLOT(dev->devfn) != slot->number)
+ continue;
+ sysfs_remove_link(&dev->dev.kobj, "slot");
+
+ snprintf(func, 10, "function%d", PCI_FUNC(dev->devfn));
+ sysfs_remove_link(&slot->kobj, func);
+ }
+}
+
+static int create_sysfs_files(struct pci_slot *slot)
+{
+ int result;
+ char func[10];
+ struct list_head *tmp;
+
+ list_for_each(tmp, &slot->bus->devices) {
+ struct pci_dev *dev = pci_dev_b(tmp);
+ if (PCI_SLOT(dev->devfn) != slot->number)
+ continue;
+
+ result = sysfs_create_link(&dev->dev.kobj, &slot->kobj, "slot");
+ if (result)
+ goto fail;
+
+ snprintf(func, 10, "function%d", PCI_FUNC(dev->devfn));
+ result = sysfs_create_link(&slot->kobj, &dev->dev.kobj, func);
+ if (result)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ remove_sysfs_files(slot);
+ return result;
+}
+
static void pci_slot_release(struct kobject *kobj)
{
struct pci_dev *dev;
@@ -109,6 +153,8 @@ static void pci_slot_release(struct kobject *kobj)
if (PCI_SLOT(dev->devfn) == slot->number)
dev->slot = NULL;
+ remove_sysfs_files(slot);
+
list_del(&slot->list);
kfree(slot);
@@ -300,6 +346,8 @@ placeholder:
INIT_LIST_HEAD(&slot->list);
list_add(&slot->list, &parent->slots);
+ create_sysfs_files(slot);
+
list_for_each_entry(dev, &parent->devices, bus_list)
if (PCI_SLOT(dev->devfn) == slot_nr)
dev->slot = slot;