diff options
Diffstat (limited to 'arch/i386/kernel/cpu')
-rw-r--r-- | arch/i386/kernel/cpu/common.c | 16 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c | 57 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/longhaul.c | 16 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c | 20 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/speedstep-smi.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cyrix.c | 6 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/intel.c | 9 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/intel_cacheinfo.c | 9 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/mtrr/main.c | 2 |
9 files changed, 72 insertions, 65 deletions
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index 4553ffd94b1f..46ce9b248f55 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c @@ -613,8 +613,8 @@ void __devinit cpu_init(void) memcpy(thread->tls_array, &per_cpu(cpu_gdt_table, cpu), GDT_ENTRY_TLS_ENTRIES * 8); - __asm__ __volatile__("lgdt %0" : : "m" (cpu_gdt_descr[cpu])); - __asm__ __volatile__("lidt %0" : : "m" (idt_descr)); + load_gdt(&cpu_gdt_descr[cpu]); + load_idt(&idt_descr); /* * Delete NT @@ -642,12 +642,12 @@ void __devinit cpu_init(void) asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs"); /* Clear all 6 debug registers: */ - -#define CD(register) set_debugreg(0, register) - - CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7); - -#undef CD + set_debugreg(0, 0); + set_debugreg(0, 1); + set_debugreg(0, 2); + set_debugreg(0, 3); + set_debugreg(0, 6); + set_debugreg(0, 7); /* * Force FPU initialization: diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c index 60a9e54dd20e..822c8ce9d1f1 100644 --- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -31,6 +31,7 @@ #include <linux/cpufreq.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> +#include <linux/compiler.h> #include <asm/io.h> #include <asm/delay.h> #include <asm/uaccess.h> @@ -57,6 +58,8 @@ static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS]; static struct cpufreq_driver acpi_cpufreq_driver; +static unsigned int acpi_pstate_strict; + static int acpi_processor_write_port( u16 port, @@ -163,34 +166,44 @@ acpi_processor_set_performance ( } /* - * Then we read the 'status_register' and compare the value with the - * target state's 'status' to make sure the transition was successful. - * Note that we'll poll for up to 1ms (100 cycles of 10us) before - * giving up. + * Assume the write went through when acpi_pstate_strict is not used. + * As read status_register is an expensive operation and there + * are no specific error cases where an IO port write will fail. */ - - port = data->acpi_data.status_register.address; - bit_width = data->acpi_data.status_register.bit_width; - - dprintk("Looking for 0x%08x from port 0x%04x\n", - (u32) data->acpi_data.states[state].status, port); - - for (i=0; i<100; i++) { - ret = acpi_processor_read_port(port, bit_width, &value); - if (ret) { - dprintk("Invalid port width 0x%04x\n", bit_width); - retval = ret; - goto migrate_end; + if (acpi_pstate_strict) { + /* Then we read the 'status_register' and compare the value + * with the target state's 'status' to make sure the + * transition was successful. + * Note that we'll poll for up to 1ms (100 cycles of 10us) + * before giving up. + */ + + port = data->acpi_data.status_register.address; + bit_width = data->acpi_data.status_register.bit_width; + + dprintk("Looking for 0x%08x from port 0x%04x\n", + (u32) data->acpi_data.states[state].status, port); + + for (i=0; i<100; i++) { + ret = acpi_processor_read_port(port, bit_width, &value); + if (ret) { + dprintk("Invalid port width 0x%04x\n", bit_width); + retval = ret; + goto migrate_end; + } + if (value == (u32) data->acpi_data.states[state].status) + break; + udelay(10); } - if (value == (u32) data->acpi_data.states[state].status) - break; - udelay(10); + } else { + i = 0; + value = (u32) data->acpi_data.states[state].status; } /* notify cpufreq */ cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); - if (value != (u32) data->acpi_data.states[state].status) { + if (unlikely(value != (u32) data->acpi_data.states[state].status)) { unsigned int tmp = cpufreq_freqs.new; cpufreq_freqs.new = cpufreq_freqs.old; cpufreq_freqs.old = tmp; @@ -537,6 +550,8 @@ acpi_cpufreq_exit (void) return; } +module_param(acpi_pstate_strict, uint, 0644); +MODULE_PARM_DESC(acpi_pstate_strict, "value 0 or non-zero. non-zero -> strict ACPI checks are performed during frequency changes."); late_initcall(acpi_cpufreq_init); module_exit(acpi_cpufreq_exit); diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c index 04e3563da4fe..8ef38544453c 100644 --- a/arch/i386/kernel/cpu/cpufreq/longhaul.c +++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c @@ -64,8 +64,6 @@ static int dont_scale_voltage; #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longhaul", msg) -#define __hlt() __asm__ __volatile__("hlt": : :"memory") - /* Clock ratios multiplied by 10 */ static int clock_ratio[32]; static int eblcr_table[32]; @@ -168,11 +166,9 @@ static void do_powersaver(union msr_longhaul *longhaul, outb(0xFE,0x21); /* TMR0 only */ outb(0xFF,0x80); /* delay */ - local_irq_enable(); - - __hlt(); + safe_halt(); wrmsrl(MSR_VIA_LONGHAUL, longhaul->val); - __hlt(); + halt(); local_irq_disable(); @@ -251,9 +247,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index) bcr2.bits.CLOCKMUL = clock_ratio_index; local_irq_disable(); wrmsrl (MSR_VIA_BCR2, bcr2.val); - local_irq_enable(); - - __hlt(); + safe_halt(); /* Disable software clock multiplier */ rdmsrl (MSR_VIA_BCR2, bcr2.val); @@ -473,11 +467,11 @@ static void __init longhaul_setup_voltagescaling(void) } if (vrmrev==0) { - dprintk ("VRM 8.5 \n"); + dprintk ("VRM 8.5\n"); memcpy (voltage_table, vrm85scales, sizeof(voltage_table)); numvscales = (voltage_table[maxvid]-voltage_table[minvid])/25; } else { - dprintk ("Mobile VRM \n"); + dprintk ("Mobile VRM\n"); memcpy (voltage_table, mobilevrmscales, sizeof(voltage_table)); numvscales = (voltage_table[maxvid]-voltage_table[minvid])/5; } diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c index 327a55d4d1c6..c397b6220430 100644 --- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c @@ -259,7 +259,7 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy) if (model->op_points == NULL) { /* Matched a non-match */ - dprintk(KERN_INFO PFX "no table support for CPU model \"%s\": \n", + dprintk(KERN_INFO PFX "no table support for CPU model \"%s\"\n", cpu->x86_model_id); #ifndef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI dprintk(KERN_INFO PFX "try compiling with CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI enabled\n"); @@ -402,7 +402,7 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy) for (i=0; i<p.state_count; i++) { if (p.states[i].control != p.states[i].status) { - dprintk("Different control (%x) and status values (%x)\n", + dprintk("Different control (%llu) and status values (%llu)\n", p.states[i].control, p.states[i].status); result = -EINVAL; goto err_unreg; @@ -415,7 +415,7 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy) } if (p.states[i].core_frequency > p.states[0].core_frequency) { - dprintk("P%u has larger frequency (%u) than P0 (%u), skipping\n", i, + dprintk("P%u has larger frequency (%llu) than P0 (%llu), skipping\n", i, p.states[i].core_frequency, p.states[0].core_frequency); p.states[i].core_frequency = 0; continue; @@ -498,13 +498,6 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST)) return -ENODEV; - for (i = 0; i < N_IDS; i++) - if (centrino_verify_cpu_id(cpu, &cpu_ids[i])) - break; - - if (i != N_IDS) - centrino_cpu[policy->cpu] = &cpu_ids[i]; - if (is_const_loops_cpu(policy->cpu)) { centrino_driver.flags |= CPUFREQ_CONST_LOOPS; } @@ -513,6 +506,13 @@ static int centrino_cpu_init(struct cpufreq_policy *policy) if (policy->cpu != 0) return -ENODEV; + for (i = 0; i < N_IDS; i++) + if (centrino_verify_cpu_id(cpu, &cpu_ids[i])) + break; + + if (i != N_IDS) + centrino_cpu[policy->cpu] = &cpu_ids[i]; + if (!centrino_cpu[policy->cpu]) { dprintk(KERN_INFO PFX "found unsupported CPU with " "Enhanced SpeedStep: send /proc/cpuinfo to " diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c index b25fb6b635ae..2718fb6f6aba 100644 --- a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c +++ b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c @@ -99,7 +99,7 @@ static int speedstep_smi_get_freqs (unsigned int *low, unsigned int *high) u32 function = GET_SPEEDSTEP_FREQS; if (!(ist_info.event & 0xFFFF)) { - dprintk("bug #1422 -- can't read freqs from BIOS\n", result); + dprintk("bug #1422 -- can't read freqs from BIOS\n"); return -ENODEV; } diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c index ba4b01138c8f..ff87cc22b323 100644 --- a/arch/i386/kernel/cpu/cyrix.c +++ b/arch/i386/kernel/cpu/cyrix.c @@ -132,11 +132,7 @@ static void __init set_cx86_memwb(void) setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04); /* set 'Not Write-through' */ cr0 = 0x20000000; - __asm__("movl %%cr0,%%eax\n\t" - "orl %0,%%eax\n\t" - "movl %%eax,%%cr0\n" - : : "r" (cr0) - :"ax"); + write_cr0(read_cr0() | cr0); /* CCR2 bit 2: lock NW bit and set WT1 */ setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 ); } diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c index a2c33c1a46c5..43601de0f633 100644 --- a/arch/i386/kernel/cpu/intel.c +++ b/arch/i386/kernel/cpu/intel.c @@ -82,16 +82,13 @@ static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c) */ static int __devinit num_cpu_cores(struct cpuinfo_x86 *c) { - unsigned int eax; + unsigned int eax, ebx, ecx, edx; if (c->cpuid_level < 4) return 1; - __asm__("cpuid" - : "=a" (eax) - : "0" (4), "c" (0) - : "bx", "dx"); - + /* Intel has a non-standard dependency on %ecx for this CPUID level. */ + cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); if (eax & 0x1f) return ((eax >> 26) + 1); else diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c index 6c55b50cf048..9e0d5f83cb9f 100644 --- a/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/arch/i386/kernel/cpu/intel_cacheinfo.c @@ -305,6 +305,9 @@ static void __devinit cache_shared_cpu_map_setup(unsigned int cpu, int index) { struct _cpuid4_info *this_leaf; unsigned long num_threads_sharing; +#ifdef CONFIG_X86_HT + struct cpuinfo_x86 *c = cpu_data + cpu; +#endif this_leaf = CPUID4_INFO_IDX(cpu, index); num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; @@ -314,10 +317,12 @@ static void __devinit cache_shared_cpu_map_setup(unsigned int cpu, int index) #ifdef CONFIG_X86_HT else if (num_threads_sharing == smp_num_siblings) this_leaf->shared_cpu_map = cpu_sibling_map[cpu]; -#endif + else if (num_threads_sharing == (c->x86_num_cores * smp_num_siblings)) + this_leaf->shared_cpu_map = cpu_core_map[cpu]; else - printk(KERN_INFO "Number of CPUs sharing cache didn't match " + printk(KERN_DEBUG "Number of CPUs sharing cache didn't match " "any known set of CPUs\n"); +#endif } #else static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {} diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c index 764cac64e211..dd4ebd6af7e4 100644 --- a/arch/i386/kernel/cpu/mtrr/main.c +++ b/arch/i386/kernel/cpu/mtrr/main.c @@ -561,7 +561,7 @@ struct mtrr_value { static struct mtrr_value * mtrr_state; -static int mtrr_save(struct sys_device * sysdev, u32 state) +static int mtrr_save(struct sys_device * sysdev, pm_message_t state) { int i; int size = num_var_ranges * sizeof(struct mtrr_value); |