summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-05-08 09:18:43 +0200
committerIngo Molnar <mingo@elte.hu>2008-05-08 15:43:51 +0200
commit8d4a4300854f3971502e81dacd930704cb88f606 (patch)
treed091b49851e60af1530dd3d7cd54057f98d48ffb /arch/x86
parentx86: geode: define geode_has_vsa2() even if CONFIG_MGEODE_LX is not set (diff)
downloadlinux-8d4a4300854f3971502e81dacd930704cb88f606.tar.xz
linux-8d4a4300854f3971502e81dacd930704cb88f606.zip
x86: cleanup PAT cpu validation
Move the scattered checks for PAT support to a single function. Its moved to addon_cpuid_features.c as this file is shared between 32 and 64 bit. Remove the manipulation of the PAT feature bit and just disable PAT in the PAT layer, based on the PAT bit provided by the CPU and the current CPU version/model white list. Change the boot CPU check so it works on Voyager somewhere in the future as well :) Also panic, when a secondary has PAT disabled but the primary one has alrady switched to PAT. We have no way to undo that. The white list is kept for now to ensure that we can rely on known to work CPU types and concentrate on the software induced problems instead of fighthing CPU erratas and subtle wreckage caused by not yet verified CPUs. Once the PAT code has stabilized enough, we can remove the white list and open the can of worms. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c21
-rw-r--r--arch/x86/kernel/cpu/common.c27
-rw-r--r--arch/x86/kernel/setup_64.c9
-rw-r--r--arch/x86/mm/pat.c50
4 files changed, 47 insertions, 60 deletions
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 238468ae1993..c2e1ce33c7cb 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -6,6 +6,7 @@
#include <linux/cpu.h>
+#include <asm/pat.h>
#include <asm/processor.h>
struct cpuid_bit {
@@ -48,3 +49,23 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
set_cpu_cap(c, cb->feature);
}
}
+
+#ifdef CONFIG_X86_PAT
+void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
+{
+ switch (c->x86_vendor) {
+ case X86_VENDOR_AMD:
+ if (c->x86 >= 0xf && c->x86 <= 0x11)
+ return;
+ break;
+ case X86_VENDOR_INTEL:
+ if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
+ return;
+ break;
+ }
+
+ pat_disable(cpu_has_pat ?
+ "PAT disabled. Not yet verified on this CPU type." :
+ "PAT not supported by CPU.");
+}
+#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 35b4f6a9c8ef..d0463a946247 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -12,6 +12,7 @@
#include <asm/mmu_context.h>
#include <asm/mtrr.h>
#include <asm/mce.h>
+#include <asm/pat.h>
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/mpspec.h>
#include <asm/apic.h>
@@ -308,19 +309,6 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
}
- clear_cpu_cap(c, X86_FEATURE_PAT);
-
- switch (c->x86_vendor) {
- case X86_VENDOR_AMD:
- if (c->x86 >= 0xf && c->x86 <= 0x11)
- set_cpu_cap(c, X86_FEATURE_PAT);
- break;
- case X86_VENDOR_INTEL:
- if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
- set_cpu_cap(c, X86_FEATURE_PAT);
- break;
- }
-
}
/*
@@ -409,18 +397,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
init_scattered_cpuid_features(c);
}
- clear_cpu_cap(c, X86_FEATURE_PAT);
-
- switch (c->x86_vendor) {
- case X86_VENDOR_AMD:
- if (c->x86 >= 0xf && c->x86 <= 0x11)
- set_cpu_cap(c, X86_FEATURE_PAT);
- break;
- case X86_VENDOR_INTEL:
- if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
- set_cpu_cap(c, X86_FEATURE_PAT);
- break;
- }
}
static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
@@ -651,6 +627,7 @@ void __init early_cpu_init(void)
cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
early_cpu_detect();
+ validate_pat_support(&boot_cpu_data);
}
/* Make sure %fs is initialized properly in idle threads */
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index 22c14e21c97c..80d80fab7006 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -70,6 +70,7 @@
#include <asm/ds.h>
#include <asm/topology.h>
#include <asm/trampoline.h>
+#include <asm/pat.h>
#include <mach_apic.h>
#ifdef CONFIG_PARAVIRT
@@ -1063,25 +1064,19 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
if (c->extended_cpuid_level >= 0x80000007)
c->x86_power = cpuid_edx(0x80000007);
-
- clear_cpu_cap(c, X86_FEATURE_PAT);
-
switch (c->x86_vendor) {
case X86_VENDOR_AMD:
early_init_amd(c);
- if (c->x86 >= 0xf && c->x86 <= 0x11)
- set_cpu_cap(c, X86_FEATURE_PAT);
break;
case X86_VENDOR_INTEL:
early_init_intel(c);
- if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15))
- set_cpu_cap(c, X86_FEATURE_PAT);
break;
case X86_VENDOR_CENTAUR:
early_init_centaur(c);
break;
}
+ validate_pat_support(c);
}
/*
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 277446cd30b6..60adbe22efa0 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -25,31 +25,24 @@
#include <asm/mtrr.h>
#include <asm/io.h>
-int pat_wc_enabled = 1;
+#ifdef CONFIG_X86_PAT
+int __read_mostly pat_wc_enabled = 1;
-static u64 __read_mostly boot_pat_state;
-
-static int nopat(char *str)
+void __init pat_disable(char *reason)
{
pat_wc_enabled = 0;
- printk(KERN_INFO "x86: PAT support disabled.\n");
-
- return 0;
+ printk(KERN_INFO "%s\n", reason);
}
-early_param("nopat", nopat);
-static int pat_known_cpu(void)
+static int nopat(char *str)
{
- if (!pat_wc_enabled)
- return 0;
-
- if (cpu_has_pat)
- return 1;
-
- pat_wc_enabled = 0;
- printk(KERN_INFO "CPU and/or kernel does not support PAT.\n");
+ pat_disable("PAT support disabled.");
return 0;
}
+early_param("nopat", nopat);
+#endif
+
+static u64 __read_mostly boot_pat_state;
enum {
PAT_UC = 0, /* uncached */
@@ -66,17 +59,19 @@ void pat_init(void)
{
u64 pat;
-#ifndef CONFIG_X86_PAT
- nopat(NULL);
-#endif
-
- /* Boot CPU enables PAT based on CPU feature */
- if (!smp_processor_id() && !pat_known_cpu())
+ if (!pat_wc_enabled)
return;
- /* APs enable PAT iff boot CPU has enabled it before */
- if (smp_processor_id() && !pat_wc_enabled)
- return;
+ /* Paranoia check. */
+ if (!cpu_has_pat) {
+ printk(KERN_ERR "PAT enabled, but CPU feature cleared\n");
+ /*
+ * Panic if this happens on the secondary CPU, and we
+ * switched to PAT on the boot CPU. We have no way to
+ * undo PAT.
+ */
+ BUG_ON(boot_pat_state);
+ }
/* Set PWT to Write-Combining. All other bits stay the same */
/*
@@ -95,9 +90,8 @@ void pat_init(void)
PAT(4,WB) | PAT(5,WC) | PAT(6,UC_MINUS) | PAT(7,UC);
/* Boot CPU check */
- if (!smp_processor_id()) {
+ if (!boot_pat_state)
rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
- }
wrmsrl(MSR_IA32_CR_PAT, pat);
printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",