summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 03:22:04 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 03:22:04 +0100
commite07e0d4cb0c4bfe822ec8491cc06269096a38bea (patch)
tree856bbf6c33d4de660d0b1e2c85019d0d3555123f
parentMerge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/... (diff)
parentMerge tag 'please-pull-einj-mmcfg' of git://git.kernel.org/pub/scm/linux/kern... (diff)
downloadlinux-e07e0d4cb0c4bfe822ec8491cc06269096a38bea.tar.xz
linux-e07e0d4cb0c4bfe822ec8491cc06269096a38bea.zip
Merge branch 'x86-ras-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 RAS update from Ingo Molnar: "The changes in this cycle were: - allow mmcfg access to APEI error injection handlers - improve MCE error messages - smaller cleanups" * 'x86-ras-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, mce: Fix sparse errors x86, mce: Improve timeout error messages ACPI, EINJ: Enhance error injection tolerance level
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c23
-rw-r--r--arch/x86/pci/mmconfig-shared.c28
-rw-r--r--drivers/acpi/apei/apei-base.c32
3 files changed, 67 insertions, 16 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index d23179900755..cdfed7953963 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -116,7 +116,7 @@ static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
* CPU/chipset specific EDAC code can register a notifier call here to print
* MCE errors in a human-readable form.
*/
-ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
+static ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
/* Do initial initialization of a struct mce */
void mce_setup(struct mce *m)
@@ -312,7 +312,7 @@ static void wait_for_panic(void)
panic("Panicing machine check CPU died");
}
-static void mce_panic(char *msg, struct mce *final, char *exp)
+static void mce_panic(const char *msg, struct mce *final, char *exp)
{
int i, apei_err = 0;
@@ -530,7 +530,7 @@ static void mce_schedule_work(void)
schedule_work(this_cpu_ptr(&mce_work));
}
-DEFINE_PER_CPU(struct irq_work, mce_irq_work);
+static DEFINE_PER_CPU(struct irq_work, mce_irq_work);
static void mce_irq_work_cb(struct irq_work *entry)
{
@@ -736,7 +736,7 @@ static atomic_t mce_callin;
/*
* Check if a timeout waiting for other CPUs happened.
*/
-static int mce_timed_out(u64 *t)
+static int mce_timed_out(u64 *t, const char *msg)
{
/*
* The others already did panic for some reason.
@@ -751,8 +751,7 @@ static int mce_timed_out(u64 *t)
goto out;
if ((s64)*t < SPINUNIT) {
if (mca_cfg.tolerant <= 1)
- mce_panic("Timeout synchronizing machine check over CPUs",
- NULL, NULL);
+ mce_panic(msg, NULL, NULL);
cpu_missing = 1;
return 1;
}
@@ -868,7 +867,8 @@ static int mce_start(int *no_way_out)
* Wait for everyone.
*/
while (atomic_read(&mce_callin) != cpus) {
- if (mce_timed_out(&timeout)) {
+ if (mce_timed_out(&timeout,
+ "Timeout: Not all CPUs entered broadcast exception handler")) {
atomic_set(&global_nwo, 0);
return -1;
}
@@ -893,7 +893,8 @@ static int mce_start(int *no_way_out)
* only seen by one CPU before cleared, avoiding duplicates.
*/
while (atomic_read(&mce_executing) < order) {
- if (mce_timed_out(&timeout)) {
+ if (mce_timed_out(&timeout,
+ "Timeout: Subject CPUs unable to finish machine check processing")) {
atomic_set(&global_nwo, 0);
return -1;
}
@@ -937,7 +938,8 @@ static int mce_end(int order)
* loops.
*/
while (atomic_read(&mce_executing) <= cpus) {
- if (mce_timed_out(&timeout))
+ if (mce_timed_out(&timeout,
+ "Timeout: Monarch CPU unable to finish machine check processing"))
goto reset;
ndelay(SPINUNIT);
}
@@ -950,7 +952,8 @@ static int mce_end(int order)
* Subject: Wait for Monarch to finish.
*/
while (atomic_read(&mce_executing) != 0) {
- if (mce_timed_out(&timeout))
+ if (mce_timed_out(&timeout,
+ "Timeout: Monarch CPU did not finish machine check processing"))
goto reset;
ndelay(SPINUNIT);
}
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 326198a4434e..676e5e04e4d4 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -610,6 +610,32 @@ static int __init pci_parse_mcfg(struct acpi_table_header *header)
return 0;
}
+#ifdef CONFIG_ACPI_APEI
+extern int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
+ void *data), void *data);
+
+static int pci_mmcfg_for_each_region(int (*func)(__u64 start, __u64 size,
+ void *data), void *data)
+{
+ struct pci_mmcfg_region *cfg;
+ int rc;
+
+ if (list_empty(&pci_mmcfg_list))
+ return 0;
+
+ list_for_each_entry(cfg, &pci_mmcfg_list, list) {
+ rc = func(cfg->res.start, resource_size(&cfg->res), data);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+#define set_apei_filter() (arch_apei_filter_addr = pci_mmcfg_for_each_region)
+#else
+#define set_apei_filter()
+#endif
+
static void __init __pci_mmcfg_init(int early)
{
pci_mmcfg_reject_broken(early);
@@ -644,6 +670,8 @@ void __init pci_mmcfg_early_init(void)
else
acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
__pci_mmcfg_init(1);
+
+ set_apei_filter();
}
}
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 2cd7bdd6c8b3..a85ac07f3da3 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -449,7 +449,7 @@ int apei_resources_sub(struct apei_resources *resources1,
}
EXPORT_SYMBOL_GPL(apei_resources_sub);
-static int apei_get_nvs_callback(__u64 start, __u64 size, void *data)
+static int apei_get_res_callback(__u64 start, __u64 size, void *data)
{
struct apei_resources *resources = data;
return apei_res_add(&resources->iomem, start, size);
@@ -457,7 +457,15 @@ static int apei_get_nvs_callback(__u64 start, __u64 size, void *data)
static int apei_get_nvs_resources(struct apei_resources *resources)
{
- return acpi_nvs_for_each_region(apei_get_nvs_callback, resources);
+ return acpi_nvs_for_each_region(apei_get_res_callback, resources);
+}
+
+int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
+ void *data), void *data);
+static int apei_get_arch_resources(struct apei_resources *resources)
+
+{
+ return arch_apei_filter_addr(apei_get_res_callback, resources);
}
/*
@@ -470,7 +478,7 @@ int apei_resources_request(struct apei_resources *resources,
{
struct apei_res *res, *res_bak = NULL;
struct resource *r;
- struct apei_resources nvs_resources;
+ struct apei_resources nvs_resources, arch_res;
int rc;
rc = apei_resources_sub(resources, &apei_resources_all);
@@ -485,10 +493,20 @@ int apei_resources_request(struct apei_resources *resources,
apei_resources_init(&nvs_resources);
rc = apei_get_nvs_resources(&nvs_resources);
if (rc)
- goto res_fini;
+ goto nvs_res_fini;
rc = apei_resources_sub(resources, &nvs_resources);
if (rc)
- goto res_fini;
+ goto nvs_res_fini;
+
+ if (arch_apei_filter_addr) {
+ apei_resources_init(&arch_res);
+ rc = apei_get_arch_resources(&arch_res);
+ if (rc)
+ goto arch_res_fini;
+ rc = apei_resources_sub(resources, &arch_res);
+ if (rc)
+ goto arch_res_fini;
+ }
rc = -EINVAL;
list_for_each_entry(res, &resources->iomem, list) {
@@ -536,7 +554,9 @@ err_unmap_iomem:
break;
release_mem_region(res->start, res->end - res->start);
}
-res_fini:
+arch_res_fini:
+ apei_resources_fini(&arch_res);
+nvs_res_fini:
apei_resources_fini(&nvs_resources);
return rc;
}