summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/eeh.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/eeh.c')
-rw-r--r--arch/powerpc/kernel/eeh.c366
1 files changed, 343 insertions, 23 deletions
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 86e25702aaca..59a64f8dc85f 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -27,6 +27,7 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/pci.h>
+#include <linux/iommu.h>
#include <linux/proc_fs.h>
#include <linux/rbtree.h>
#include <linux/reboot.h>
@@ -40,6 +41,7 @@
#include <asm/eeh.h>
#include <asm/eeh_event.h>
#include <asm/io.h>
+#include <asm/iommu.h>
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
#include <asm/rtas.h>
@@ -108,6 +110,9 @@ struct eeh_ops *eeh_ops = NULL;
/* Lock to avoid races due to multiple reports of an error */
DEFINE_RAW_SPINLOCK(confirm_error_lock);
+/* Lock to protect passed flags */
+static DEFINE_MUTEX(eeh_dev_mutex);
+
/* Buffer for reporting pci register dumps. Its here in BSS, and
* not dynamically alloced, so that it ends up in RMO where RTAS
* can access it.
@@ -137,7 +142,7 @@ static struct eeh_stats eeh_stats;
static int __init eeh_setup(char *str)
{
if (!strcmp(str, "off"))
- eeh_subsystem_flags |= EEH_FORCE_DISABLED;
+ eeh_add_flag(EEH_FORCE_DISABLED);
return 1;
}
@@ -152,12 +157,13 @@ __setup("eeh=", eeh_setup);
* This routine captures assorted PCI configuration space data,
* and puts them into a buffer for RTAS error logging.
*/
-static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
+static size_t eeh_gather_pci_data(struct eeh_dev *edev, char *buf, size_t len)
{
struct device_node *dn = eeh_dev_to_of_node(edev);
u32 cfg;
int cap, i;
- int n = 0;
+ int n = 0, l = 0;
+ char buffer[128];
n += scnprintf(buf+n, len-n, "%s\n", dn->full_name);
pr_warn("EEH: of node=%s\n", dn->full_name);
@@ -202,8 +208,22 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
for (i=0; i<=8; i++) {
eeh_ops->read_config(dn, cap+4*i, 4, &cfg);
n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
- pr_warn("EEH: PCI-E %02x: %08x\n", i, cfg);
+
+ if ((i % 4) == 0) {
+ if (i != 0)
+ pr_warn("%s\n", buffer);
+
+ l = scnprintf(buffer, sizeof(buffer),
+ "EEH: PCI-E %02x: %08x ",
+ 4*i, cfg);
+ } else {
+ l += scnprintf(buffer+l, sizeof(buffer)-l,
+ "%08x ", cfg);
+ }
+
}
+
+ pr_warn("%s\n", buffer);
}
/* If AER capable, dump it */
@@ -212,11 +232,24 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
n += scnprintf(buf+n, len-n, "pci-e AER:\n");
pr_warn("EEH: PCI-E AER capability register set follows:\n");
- for (i=0; i<14; i++) {
+ for (i=0; i<=13; i++) {
eeh_ops->read_config(dn, cap+4*i, 4, &cfg);
n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
- pr_warn("EEH: PCI-E AER %02x: %08x\n", i, cfg);
+
+ if ((i % 4) == 0) {
+ if (i != 0)
+ pr_warn("%s\n", buffer);
+
+ l = scnprintf(buffer, sizeof(buffer),
+ "EEH: PCI-E AER %02x: %08x ",
+ 4*i, cfg);
+ } else {
+ l += scnprintf(buffer+l, sizeof(buffer)-l,
+ "%08x ", cfg);
+ }
}
+
+ pr_warn("%s\n", buffer);
}
return n;
@@ -247,7 +280,7 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
* 0xFF's is always returned from PCI config space.
*/
if (!(pe->type & EEH_PE_PHB)) {
- if (eeh_probe_mode_devtree())
+ if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
eeh_ops->configure_bridge(pe);
eeh_pe_restore_bars(pe);
@@ -298,14 +331,14 @@ static int eeh_phb_check_failure(struct eeh_pe *pe)
unsigned long flags;
int ret;
- if (!eeh_probe_mode_dev())
+ if (!eeh_has_flag(EEH_PROBE_MODE_DEV))
return -EPERM;
/* Find the PHB PE */
phb_pe = eeh_phb_pe_get(pe->phb);
if (!phb_pe) {
- pr_warning("%s Can't find PE for PHB#%d\n",
- __func__, pe->phb->global_number);
+ pr_warn("%s Can't find PE for PHB#%d\n",
+ __func__, pe->phb->global_number);
return -EEXIST;
}
@@ -400,6 +433,14 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
if (ret > 0)
return ret;
+ /*
+ * If the PE isn't owned by us, we shouldn't check the
+ * state. Instead, let the owner handle it if the PE has
+ * been frozen.
+ */
+ if (eeh_pe_passed(pe))
+ return 0;
+
/* If we already have a pending isolation event for this
* slot, we know it's bad already, we don't need to check.
* Do this checking under a lock; as multiple PCI devices
@@ -746,13 +787,13 @@ void eeh_save_bars(struct eeh_dev *edev)
int __init eeh_ops_register(struct eeh_ops *ops)
{
if (!ops->name) {
- pr_warning("%s: Invalid EEH ops name for %p\n",
+ pr_warn("%s: Invalid EEH ops name for %p\n",
__func__, ops);
return -EINVAL;
}
if (eeh_ops && eeh_ops != ops) {
- pr_warning("%s: EEH ops of platform %s already existing (%s)\n",
+ pr_warn("%s: EEH ops of platform %s already existing (%s)\n",
__func__, eeh_ops->name, ops->name);
return -EEXIST;
}
@@ -772,7 +813,7 @@ int __init eeh_ops_register(struct eeh_ops *ops)
int __exit eeh_ops_unregister(const char *name)
{
if (!name || !strlen(name)) {
- pr_warning("%s: Invalid EEH ops name\n",
+ pr_warn("%s: Invalid EEH ops name\n",
__func__);
return -EINVAL;
}
@@ -788,7 +829,7 @@ int __exit eeh_ops_unregister(const char *name)
static int eeh_reboot_notifier(struct notifier_block *nb,
unsigned long action, void *unused)
{
- eeh_set_enable(false);
+ eeh_clear_flag(EEH_ENABLED);
return NOTIFY_DONE;
}
@@ -837,11 +878,11 @@ int eeh_init(void)
/* call platform initialization function */
if (!eeh_ops) {
- pr_warning("%s: Platform EEH operation not found\n",
+ pr_warn("%s: Platform EEH operation not found\n",
__func__);
return -EEXIST;
} else if ((ret = eeh_ops->init())) {
- pr_warning("%s: Failed to call platform init function (%d)\n",
+ pr_warn("%s: Failed to call platform init function (%d)\n",
__func__, ret);
return ret;
}
@@ -852,13 +893,13 @@ int eeh_init(void)
return ret;
/* Enable EEH for all adapters */
- if (eeh_probe_mode_devtree()) {
+ if (eeh_has_flag(EEH_PROBE_MODE_DEVTREE)) {
list_for_each_entry_safe(hose, tmp,
&hose_list, list_node) {
phb = hose->dn;
traverse_pci_devices(phb, eeh_ops->of_probe, NULL);
}
- } else if (eeh_probe_mode_dev()) {
+ } else if (eeh_has_flag(EEH_PROBE_MODE_DEV)) {
list_for_each_entry_safe(hose, tmp,
&hose_list, list_node)
pci_walk_bus(hose->bus, eeh_ops->dev_probe, NULL);
@@ -882,7 +923,7 @@ int eeh_init(void)
if (eeh_enabled())
pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n");
else
- pr_warning("EEH: No capable adapters found\n");
+ pr_warn("EEH: No capable adapters found\n");
return ret;
}
@@ -910,7 +951,7 @@ void eeh_add_device_early(struct device_node *dn)
* would delay the probe until late stage because
* the PCI device isn't available this moment.
*/
- if (!eeh_probe_mode_devtree())
+ if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE))
return;
if (!of_node_to_eeh_dev(dn))
@@ -996,7 +1037,7 @@ void eeh_add_device_late(struct pci_dev *dev)
* We have to do the EEH probe here because the PCI device
* hasn't been created yet in the early stage.
*/
- if (eeh_probe_mode_dev())
+ if (eeh_has_flag(EEH_PROBE_MODE_DEV))
eeh_ops->dev_probe(dev, NULL);
eeh_addr_cache_insert_dev(dev);
@@ -1100,6 +1141,285 @@ void eeh_remove_device(struct pci_dev *dev)
edev->mode &= ~EEH_DEV_SYSFS;
}
+/**
+ * eeh_dev_open - Increase count of pass through devices for PE
+ * @pdev: PCI device
+ *
+ * Increase count of passed through devices for the indicated
+ * PE. In the result, the EEH errors detected on the PE won't be
+ * reported. The PE owner will be responsible for detection
+ * and recovery.
+ */
+int eeh_dev_open(struct pci_dev *pdev)
+{
+ struct eeh_dev *edev;
+
+ mutex_lock(&eeh_dev_mutex);
+
+ /* No PCI device ? */
+ if (!pdev)
+ goto out;
+
+ /* No EEH device or PE ? */
+ edev = pci_dev_to_eeh_dev(pdev);
+ if (!edev || !edev->pe)
+ goto out;
+
+ /* Increase PE's pass through count */
+ atomic_inc(&edev->pe->pass_dev_cnt);
+ mutex_unlock(&eeh_dev_mutex);
+
+ return 0;
+out:
+ mutex_unlock(&eeh_dev_mutex);
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(eeh_dev_open);
+
+/**
+ * eeh_dev_release - Decrease count of pass through devices for PE
+ * @pdev: PCI device
+ *
+ * Decrease count of pass through devices for the indicated PE. If
+ * there is no passed through device in PE, the EEH errors detected
+ * on the PE will be reported and handled as usual.
+ */
+void eeh_dev_release(struct pci_dev *pdev)
+{
+ struct eeh_dev *edev;
+
+ mutex_lock(&eeh_dev_mutex);
+
+ /* No PCI device ? */
+ if (!pdev)
+ goto out;
+
+ /* No EEH device ? */
+ edev = pci_dev_to_eeh_dev(pdev);
+ if (!edev || !edev->pe || !eeh_pe_passed(edev->pe))
+ goto out;
+
+ /* Decrease PE's pass through count */
+ atomic_dec(&edev->pe->pass_dev_cnt);
+ WARN_ON(atomic_read(&edev->pe->pass_dev_cnt) < 0);
+out:
+ mutex_unlock(&eeh_dev_mutex);
+}
+EXPORT_SYMBOL(eeh_dev_release);
+
+#ifdef CONFIG_IOMMU_API
+
+static int dev_has_iommu_table(struct device *dev, void *data)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_dev **ppdev = data;
+ struct iommu_table *tbl;
+
+ if (!dev)
+ return 0;
+
+ tbl = get_iommu_table_base(dev);
+ if (tbl && tbl->it_group) {
+ *ppdev = pdev;
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * eeh_iommu_group_to_pe - Convert IOMMU group to EEH PE
+ * @group: IOMMU group
+ *
+ * The routine is called to convert IOMMU group to EEH PE.
+ */
+struct eeh_pe *eeh_iommu_group_to_pe(struct iommu_group *group)
+{
+ struct pci_dev *pdev = NULL;
+ struct eeh_dev *edev;
+ int ret;
+
+ /* No IOMMU group ? */
+ if (!group)
+ return NULL;
+
+ ret = iommu_group_for_each_dev(group, &pdev, dev_has_iommu_table);
+ if (!ret || !pdev)
+ return NULL;
+
+ /* No EEH device or PE ? */
+ edev = pci_dev_to_eeh_dev(pdev);
+ if (!edev || !edev->pe)
+ return NULL;
+
+ return edev->pe;
+}
+EXPORT_SYMBOL_GPL(eeh_iommu_group_to_pe);
+
+#endif /* CONFIG_IOMMU_API */
+
+/**
+ * eeh_pe_set_option - Set options for the indicated PE
+ * @pe: EEH PE
+ * @option: requested option
+ *
+ * The routine is called to enable or disable EEH functionality
+ * on the indicated PE, to enable IO or DMA for the frozen PE.
+ */
+int eeh_pe_set_option(struct eeh_pe *pe, int option)
+{
+ int ret = 0;
+
+ /* Invalid PE ? */
+ if (!pe)
+ return -ENODEV;
+
+ /*
+ * EEH functionality could possibly be disabled, just
+ * return error for the case. And the EEH functinality
+ * isn't expected to be disabled on one specific PE.
+ */
+ switch (option) {
+ case EEH_OPT_ENABLE:
+ if (eeh_enabled())
+ break;
+ ret = -EIO;
+ break;
+ case EEH_OPT_DISABLE:
+ break;
+ case EEH_OPT_THAW_MMIO:
+ case EEH_OPT_THAW_DMA:
+ if (!eeh_ops || !eeh_ops->set_option) {
+ ret = -ENOENT;
+ break;
+ }
+
+ ret = eeh_ops->set_option(pe, option);
+ break;
+ default:
+ pr_debug("%s: Option %d out of range (%d, %d)\n",
+ __func__, option, EEH_OPT_DISABLE, EEH_OPT_THAW_DMA);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(eeh_pe_set_option);
+
+/**
+ * eeh_pe_get_state - Retrieve PE's state
+ * @pe: EEH PE
+ *
+ * Retrieve the PE's state, which includes 3 aspects: enabled
+ * DMA, enabled IO and asserted reset.
+ */
+int eeh_pe_get_state(struct eeh_pe *pe)
+{
+ int result, ret = 0;
+ bool rst_active, dma_en, mmio_en;
+
+ /* Existing PE ? */
+ if (!pe)
+ return -ENODEV;
+
+ if (!eeh_ops || !eeh_ops->get_state)
+ return -ENOENT;
+
+ result = eeh_ops->get_state(pe, NULL);
+ rst_active = !!(result & EEH_STATE_RESET_ACTIVE);
+ dma_en = !!(result & EEH_STATE_DMA_ENABLED);
+ mmio_en = !!(result & EEH_STATE_MMIO_ENABLED);
+
+ if (rst_active)
+ ret = EEH_PE_STATE_RESET;
+ else if (dma_en && mmio_en)
+ ret = EEH_PE_STATE_NORMAL;
+ else if (!dma_en && !mmio_en)
+ ret = EEH_PE_STATE_STOPPED_IO_DMA;
+ else if (!dma_en && mmio_en)
+ ret = EEH_PE_STATE_STOPPED_DMA;
+ else
+ ret = EEH_PE_STATE_UNAVAIL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(eeh_pe_get_state);
+
+/**
+ * eeh_pe_reset - Issue PE reset according to specified type
+ * @pe: EEH PE
+ * @option: reset type
+ *
+ * The routine is called to reset the specified PE with the
+ * indicated type, either fundamental reset or hot reset.
+ * PE reset is the most important part for error recovery.
+ */
+int eeh_pe_reset(struct eeh_pe *pe, int option)
+{
+ int ret = 0;
+
+ /* Invalid PE ? */
+ if (!pe)
+ return -ENODEV;
+
+ if (!eeh_ops || !eeh_ops->set_option || !eeh_ops->reset)
+ return -ENOENT;
+
+ switch (option) {
+ case EEH_RESET_DEACTIVATE:
+ ret = eeh_ops->reset(pe, option);
+ if (ret)
+ break;
+
+ /*
+ * The PE is still in frozen state and we need to clear
+ * that. It's good to clear frozen state after deassert
+ * to avoid messy IO access during reset, which might
+ * cause recursive frozen PE.
+ */
+ ret = eeh_ops->set_option(pe, EEH_OPT_THAW_MMIO);
+ if (!ret)
+ ret = eeh_ops->set_option(pe, EEH_OPT_THAW_DMA);
+ if (!ret)
+ eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
+ break;
+ case EEH_RESET_HOT:
+ case EEH_RESET_FUNDAMENTAL:
+ ret = eeh_ops->reset(pe, option);
+ break;
+ default:
+ pr_debug("%s: Unsupported option %d\n",
+ __func__, option);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(eeh_pe_reset);
+
+/**
+ * eeh_pe_configure - Configure PCI bridges after PE reset
+ * @pe: EEH PE
+ *
+ * The routine is called to restore the PCI config space for
+ * those PCI devices, especially PCI bridges affected by PE
+ * reset issued previously.
+ */
+int eeh_pe_configure(struct eeh_pe *pe)
+{
+ int ret = 0;
+
+ /* Invalid PE ? */
+ if (!pe)
+ return -ENODEV;
+
+ /* Restore config space for the affected devices */
+ eeh_pe_restore_bars(pe);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(eeh_pe_configure);
+
static int proc_eeh_show(struct seq_file *m, void *v)
{
if (!eeh_enabled()) {
@@ -1143,9 +1463,9 @@ static const struct file_operations proc_eeh_operations = {
static int eeh_enable_dbgfs_set(void *data, u64 val)
{
if (val)
- eeh_subsystem_flags &= ~EEH_FORCE_DISABLED;
+ eeh_clear_flag(EEH_FORCE_DISABLED);
else
- eeh_subsystem_flags |= EEH_FORCE_DISABLED;
+ eeh_add_flag(EEH_FORCE_DISABLED);
/* Notify the backend */
if (eeh_ops->post_init)