From 9963d1aad40946b1b6d34f9bee8d8a1b9032ae22 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 21 Nov 2008 21:07:16 +1030 Subject: [CPUFREQ] clean up speedstep-centrino and reduce cpumask_t usage Impact: cleanup 1) The #ifdef CONFIG_HOTPLUG_CPU seems unnecessary these days. 2) The loop can simply skip over offline cpus, rather than creating a tmp mask. 3) set_mask is set to either a single cpu or all online cpus in a policy. Since it's just used for set_cpus_allowed(), any offline cpus in a policy don't matter, so we can just use cpumask_of_cpu() or the policy->cpus. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 51 +++++++++++------------- 1 file changed, 24 insertions(+), 27 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 3b5f06423e77..f0ea6fa2f53c 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c @@ -459,9 +459,7 @@ static int centrino_verify (struct cpufreq_policy *policy) * Sets a new CPUFreq policy. */ struct allmasks { - cpumask_t online_policy_cpus; cpumask_t saved_mask; - cpumask_t set_mask; cpumask_t covered_cpus; }; @@ -475,9 +473,7 @@ static int centrino_target (struct cpufreq_policy *policy, int retval = 0; unsigned int j, k, first_cpu, tmp; CPUMASK_ALLOC(allmasks); - CPUMASK_PTR(online_policy_cpus, allmasks); CPUMASK_PTR(saved_mask, allmasks); - CPUMASK_PTR(set_mask, allmasks); CPUMASK_PTR(covered_cpus, allmasks); if (unlikely(allmasks == NULL)) @@ -497,30 +493,28 @@ static int centrino_target (struct cpufreq_policy *policy, goto out; } -#ifdef CONFIG_HOTPLUG_CPU - /* cpufreq holds the hotplug lock, so we are safe from here on */ - cpus_and(*online_policy_cpus, cpu_online_map, policy->cpus); -#else - *online_policy_cpus = policy->cpus; -#endif - *saved_mask = current->cpus_allowed; first_cpu = 1; cpus_clear(*covered_cpus); - for_each_cpu_mask_nr(j, *online_policy_cpus) { + for_each_cpu_mask_nr(j, policy->cpus) { + const cpumask_t *mask; + + /* cpufreq holds the hotplug lock, so we are safe here */ + if (!cpu_online(j)) + continue; + /* * Support for SMP systems. * Make sure we are running on CPU that wants to change freq */ - cpus_clear(*set_mask); if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) - cpus_or(*set_mask, *set_mask, *online_policy_cpus); + mask = &policy->cpus; else - cpu_set(j, *set_mask); + mask = &cpumask_of_cpu(j); - set_cpus_allowed_ptr(current, set_mask); + set_cpus_allowed_ptr(current, mask); preempt_disable(); - if (unlikely(!cpu_isset(smp_processor_id(), *set_mask))) { + if (unlikely(!cpu_isset(smp_processor_id(), *mask))) { dprintk("couldn't limit to CPUs in this domain\n"); retval = -EAGAIN; if (first_cpu) { @@ -548,7 +542,9 @@ static int centrino_target (struct cpufreq_policy *policy, dprintk("target=%dkHz old=%d new=%d msr=%04x\n", target_freq, freqs.old, freqs.new, msr); - for_each_cpu_mask_nr(k, *online_policy_cpus) { + for_each_cpu_mask_nr(k, policy->cpus) { + if (!cpu_online(k)) + continue; freqs.cpu = k; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); @@ -571,7 +567,9 @@ static int centrino_target (struct cpufreq_policy *policy, preempt_enable(); } - for_each_cpu_mask_nr(k, *online_policy_cpus) { + for_each_cpu_mask_nr(k, policy->cpus) { + if (!cpu_online(k)) + continue; freqs.cpu = k; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } @@ -584,18 +582,17 @@ static int centrino_target (struct cpufreq_policy *policy, * Best effort undo.. */ - if (!cpus_empty(*covered_cpus)) - for_each_cpu_mask_nr(j, *covered_cpus) { - set_cpus_allowed_ptr(current, - &cpumask_of_cpu(j)); - wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); - } + for_each_cpu_mask_nr(j, *covered_cpus) { + set_cpus_allowed_ptr(current, &cpumask_of_cpu(j)); + wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); + } tmp = freqs.new; freqs.new = freqs.old; freqs.old = tmp; - for_each_cpu_mask_nr(j, *online_policy_cpus) { - freqs.cpu = j; + for_each_cpu_mask_nr(j, policy->cpus) { + if (!cpu_online(j)) + continue; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } -- cgit v1.2.3 From 10db2e5cbda5b4e13d2e2f134b963bee2e129999 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Fri, 17 Oct 2008 22:52:04 +0200 Subject: [CPUFREQ] p4-clockmod: reduce noise On those CPUs which are SpeedStep (EST) capable, we do not care at all if p4-clockmod does not work, since a technically superior CPU frequency management technology is to be used. Signed-off-by: Dominik Brodowski Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index b8e05ee4f736..ba3a94a997c3 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c @@ -171,7 +171,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) } if (c->x86 != 0xF) { - printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. Please send an e-mail to \n"); + if (!cpu_has(c, X86_FEATURE_EST)) + printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. " + "Please send an e-mail to \n"); return 0; } -- cgit v1.2.3 From e088e4c9cdb618675874becb91b2fd581ee707e6 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Tue, 25 Nov 2008 13:29:47 -0500 Subject: [CPUFREQ] Disable sysfs ui for p4-clockmod. p4-clockmod has a long history of abuse. It pretends to be a CPU frequency scaling driver, even though it doesn't actually change the CPU frequency, but instead just modulates the frequency with wait-states. The biggest misconception is that when running at the lower 'frequency' p4-clockmod is saving power. This isn't the case, as workloads running slower take longer to complete, preventing the CPU from entering deep C states. However p4-clockmod does have a purpose. It can prevent overheating. Having it hooked up to the cpufreq interfaces is the wrong way to achieve cooling however. It should instead be hooked up to ACPI. This diff introduces a means for a cpufreq driver to register with the cpufreq core, but not present a sysfs interface. Signed-off-by: Matthew Garrett Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | 1 + drivers/cpufreq/cpufreq.c | 51 ++++++++++++++++++++----------- include/linux/cpufreq.h | 1 + 3 files changed, 35 insertions(+), 18 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index ba3a94a997c3..0c43b2240517 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c @@ -276,6 +276,7 @@ static struct cpufreq_driver p4clockmod_driver = { .name = "p4-clockmod", .owner = THIS_MODULE, .attr = p4clockmod_attr, + .hide_interface = 1, }; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 31d6f535a79d..9044b911d8ae 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -754,6 +754,11 @@ static struct kobj_type ktype_cpufreq = { .release = cpufreq_sysfs_release, }; +static struct kobj_type ktype_empty_cpufreq = { + .sysfs_ops = &sysfs_ops, + .release = cpufreq_sysfs_release, +}; + /** * cpufreq_add_dev - add a CPU device @@ -876,26 +881,36 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); /* prepare interface data */ - ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj, - "cpufreq"); - if (ret) - goto err_out_driver_exit; - - /* set up files for this cpu device */ - drv_attr = cpufreq_driver->attr; - while ((drv_attr) && (*drv_attr)) { - ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); + if (!cpufreq_driver->hide_interface) { + ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, + &sys_dev->kobj, "cpufreq"); if (ret) goto err_out_driver_exit; - drv_attr++; - } - if (cpufreq_driver->get) { - ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); - if (ret) - goto err_out_driver_exit; - } - if (cpufreq_driver->target) { - ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); + + /* set up files for this cpu device */ + drv_attr = cpufreq_driver->attr; + while ((drv_attr) && (*drv_attr)) { + ret = sysfs_create_file(&policy->kobj, + &((*drv_attr)->attr)); + if (ret) + goto err_out_driver_exit; + drv_attr++; + } + if (cpufreq_driver->get) { + ret = sysfs_create_file(&policy->kobj, + &cpuinfo_cur_freq.attr); + if (ret) + goto err_out_driver_exit; + } + if (cpufreq_driver->target) { + ret = sysfs_create_file(&policy->kobj, + &scaling_cur_freq.attr); + if (ret) + goto err_out_driver_exit; + } + } else { + ret = kobject_init_and_add(&policy->kobj, &ktype_empty_cpufreq, + &sys_dev->kobj, "cpufreq"); if (ret) goto err_out_driver_exit; } diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 1ee608fd7b77..484b3abf61bb 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -234,6 +234,7 @@ struct cpufreq_driver { int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg); int (*resume) (struct cpufreq_policy *policy); struct freq_attr **attr; + bool hide_interface; }; /* flags */ -- cgit v1.2.3 From c60e19eb21d9a0fb0d78969884f32d88354abca9 Mon Sep 17 00:00:00 2001 From: Herton Ronaldo Krzesinski Date: Sat, 15 Nov 2008 17:02:46 -0200 Subject: [CPUFREQ] add to speedstep-lib additional fsb values for core processors Add additional fsb values to pentium_core_get_frequency, from latest edition (September 2008) of Intel 64 and IA-32 Architectures Software Develper's Manual, Volume 3B: System Programming Guide, Part 2. Values added are to detect 800, 1067 and 1333 FSB types. Signed-off-by: Herton Ronaldo Krzesinski Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/speedstep-lib.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c index 98d4fdb7dc04..cdac7d62369b 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c @@ -139,6 +139,15 @@ static unsigned int pentium_core_get_frequency(void) case 3: fsb = 166667; break; + case 2: + fsb = 200000; + break; + case 0: + fsb = 266667; + break; + case 4: + fsb = 333333; + break; default: printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value"); } -- cgit v1.2.3 From 8529154ec3f3ac20344c65b7a040c604c7af7651 Mon Sep 17 00:00:00 2001 From: Herton Ronaldo Krzesinski Date: Sat, 15 Nov 2008 17:02:46 -0200 Subject: [CPUFREQ] Add Celeron Core support to p4-clockmod. Add Celeron Core support to p4-clockmod. Signed-off-by: Herton Ronaldo Krzesinski Signed-off-by: Dave Jones --- arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 0c43b2240517..beea4466b063 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c @@ -160,6 +160,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) switch (c->x86_model) { case 0x0E: /* Core */ case 0x0F: /* Core Duo */ + case 0x16: /* Celeron Core */ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PCORE); case 0x0D: /* Pentium M (Dothan) */ -- cgit v1.2.3 From 98a79d6a50181ca1ecf7400eda01d5dc1bc0dbf0 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Sat, 13 Dec 2008 21:19:41 +1030 Subject: cpumask: centralize cpu_online_map and cpu_possible_map Impact: cleanup Each SMP arch defines these themselves. Move them to a central location. Twists: 1) Some archs (m32, parisc, s390) set possible_map to all 1, so we add a CONFIG_INIT_ALL_POSSIBLE for this rather than break them. 2) mips and sparc32 '#define cpu_possible_map phys_cpu_present_map'. Those archs simply have phys_cpu_present_map replaced everywhere. 3) Alpha defined cpu_possible_map to cpu_present_map; this is tricky so I just manipulate them both in sync. 4) IA64, cris and m32r have gratuitous 'extern cpumask_t cpu_possible_map' declarations. Signed-off-by: Rusty Russell Reviewed-by: Grant Grundler Tested-by: Tony Luck Acked-by: Ingo Molnar Cc: Mike Travis Cc: ink@jurassic.park.msu.ru Cc: rmk@arm.linux.org.uk Cc: starvik@axis.com Cc: tony.luck@intel.com Cc: takata@linux-m32r.org Cc: ralf@linux-mips.org Cc: grundler@parisc-linux.org Cc: paulus@samba.org Cc: schwidefsky@de.ibm.com Cc: lethal@linux-sh.org Cc: wli@holomorphy.com Cc: davem@davemloft.net Cc: jdike@addtoit.com Cc: mingo@redhat.com --- arch/alpha/include/asm/smp.h | 1 - arch/alpha/kernel/process.c | 2 ++ arch/alpha/kernel/smp.c | 7 ++----- arch/arm/kernel/smp.c | 10 ---------- arch/cris/arch-v32/kernel/smp.c | 4 ---- arch/cris/include/asm/smp.h | 1 - arch/ia64/include/asm/smp.h | 1 - arch/ia64/kernel/smpboot.c | 6 ------ arch/m32r/Kconfig | 1 + arch/m32r/kernel/smpboot.c | 6 ------ arch/mips/include/asm/smp.h | 3 --- arch/mips/kernel/smp-cmp.c | 2 +- arch/mips/kernel/smp-mt.c | 2 +- arch/mips/kernel/smp.c | 7 +------ arch/mips/kernel/smtc.c | 6 +++--- arch/mips/pmc-sierra/yosemite/smp.c | 6 +++--- arch/mips/sgi-ip27/ip27-smp.c | 2 +- arch/mips/sibyte/bcm1480/smp.c | 8 ++++---- arch/mips/sibyte/sb1250/smp.c | 8 ++++---- arch/parisc/Kconfig | 1 + arch/parisc/kernel/smp.c | 15 --------------- arch/powerpc/kernel/smp.c | 4 ---- arch/s390/Kconfig | 1 + arch/s390/kernel/smp.c | 6 ------ arch/sh/kernel/smp.c | 6 ------ arch/sparc/include/asm/smp_32.h | 2 -- arch/sparc/kernel/smp.c | 6 ++---- arch/sparc/kernel/sparc_ksyms.c | 4 ---- arch/sparc64/kernel/smp.c | 4 ---- arch/um/kernel/smp.c | 7 ------- arch/x86/kernel/smpboot.c | 6 ------ arch/x86/mach-voyager/voyager_smp.c | 7 ------- include/asm-m32r/smp.h | 2 -- init/Kconfig | 9 +++++++++ kernel/cpu.c | 11 ++++++----- 35 files changed, 42 insertions(+), 132 deletions(-) (limited to 'arch/x86') diff --git a/arch/alpha/include/asm/smp.h b/arch/alpha/include/asm/smp.h index 544c69af8168..547e90951cec 100644 --- a/arch/alpha/include/asm/smp.h +++ b/arch/alpha/include/asm/smp.h @@ -45,7 +45,6 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS]; #define raw_smp_processor_id() (current_thread_info()->cpu) extern int smp_num_cpus; -#define cpu_possible_map cpu_present_map extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi(cpumask_t mask); diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 351407e07e71..f238370c907d 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -94,6 +94,7 @@ common_shutdown_1(void *generic_ptr) flags |= 0x00040000UL; /* "remain halted" */ *pflags = flags; cpu_clear(cpuid, cpu_present_map); + cpu_clear(cpuid, cpu_possible_map); halt(); } #endif @@ -120,6 +121,7 @@ common_shutdown_1(void *generic_ptr) #ifdef CONFIG_SMP /* Wait for the secondaries to halt. */ cpu_clear(boot_cpuid, cpu_present_map); + cpu_clear(boot_cpuid, cpu_possible_map); while (cpus_weight(cpu_present_map)) barrier(); #endif diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index cf7da10097bb..d953e510f68d 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -70,11 +70,6 @@ enum ipi_message_type { /* Set to a secondary's cpuid when it comes online. */ static int smp_secondary_alive __devinitdata = 0; -/* Which cpus ids came online. */ -cpumask_t cpu_online_map; - -EXPORT_SYMBOL(cpu_online_map); - int smp_num_probed; /* Internal processor count */ int smp_num_cpus = 1; /* Number that came online. */ EXPORT_SYMBOL(smp_num_cpus); @@ -440,6 +435,7 @@ setup_smp(void) ((char *)cpubase + i*hwrpb->processor_size); if ((cpu->flags & 0x1cc) == 0x1cc) { smp_num_probed++; + cpu_set(i, cpu_possible_map); cpu_set(i, cpu_present_map); cpu->pal_revision = boot_cpu_palrev; } @@ -473,6 +469,7 @@ smp_prepare_cpus(unsigned int max_cpus) /* Nothing to do on a UP box, or when told not to. */ if (smp_num_probed == 1 || max_cpus == 0) { + cpu_possible_map = cpumask_of_cpu(boot_cpuid); cpu_present_map = cpumask_of_cpu(boot_cpuid); printk(KERN_INFO "SMP mode deactivated.\n"); return; diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index e42a749a56dd..bd905c0a7365 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -33,16 +33,6 @@ #include #include -/* - * bitmask of present and online CPUs. - * The present bitmask indicates that the CPU is physically present. - * The online bitmask indicates that the CPU is up and running. - */ -cpumask_t cpu_possible_map; -EXPORT_SYMBOL(cpu_possible_map); -cpumask_t cpu_online_map; -EXPORT_SYMBOL(cpu_online_map); - /* * as from 2.5, kernels no longer have an init_tasks structure * so we need some other way of telling a new secondary core diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c index 52e16c6436f9..9dac17334640 100644 --- a/arch/cris/arch-v32/kernel/smp.c +++ b/arch/cris/arch-v32/kernel/smp.c @@ -29,11 +29,7 @@ spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED}; /* CPU masks */ -cpumask_t cpu_online_map = CPU_MASK_NONE; -EXPORT_SYMBOL(cpu_online_map); cpumask_t phys_cpu_present_map = CPU_MASK_NONE; -cpumask_t cpu_possible_map; -EXPORT_SYMBOL(cpu_possible_map); EXPORT_SYMBOL(phys_cpu_present_map); /* Variables used during SMP boot */ diff --git a/arch/cris/include/asm/smp.h b/arch/cris/include/asm/smp.h index dba33aba3e95..c615a06dd757 100644 --- a/arch/cris/include/asm/smp.h +++ b/arch/cris/include/asm/smp.h @@ -4,7 +4,6 @@ #include extern cpumask_t phys_cpu_present_map; -extern cpumask_t cpu_possible_map; #define raw_smp_processor_id() (current_thread_info()->cpu) diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h index 12d96e0cd513..21c402365d0e 100644 --- a/arch/ia64/include/asm/smp.h +++ b/arch/ia64/include/asm/smp.h @@ -57,7 +57,6 @@ extern struct smp_boot_data { extern char no_int_routing __devinitdata; -extern cpumask_t cpu_online_map; extern cpumask_t cpu_core_map[NR_CPUS]; DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); extern int smp_num_siblings; diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 1dcbb85fc4ee..4ede6e571c38 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -131,12 +131,6 @@ struct task_struct *task_for_booting_cpu; */ DEFINE_PER_CPU(int, cpu_state); -/* Bitmasks of currently online, and possible CPUs */ -cpumask_t cpu_online_map; -EXPORT_SYMBOL(cpu_online_map); -cpumask_t cpu_possible_map = CPU_MASK_NONE; -EXPORT_SYMBOL(cpu_possible_map); - cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; EXPORT_SYMBOL(cpu_core_map); DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index dbaed4a63815..17a6dab09319 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -10,6 +10,7 @@ config M32R default y select HAVE_IDE select HAVE_OPROFILE + select INIT_ALL_POSSIBLE config SBUS bool diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index 39cb6da72dcb..0f06b3722e96 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c @@ -73,17 +73,11 @@ static unsigned int bsp_phys_id = -1; /* Bitmask of physically existing CPUs */ physid_mask_t phys_cpu_present_map; -/* Bitmask of currently online CPUs */ -cpumask_t cpu_online_map; -EXPORT_SYMBOL(cpu_online_map); - cpumask_t cpu_bootout_map; cpumask_t cpu_bootin_map; static cpumask_t cpu_callin_map; cpumask_t cpu_callout_map; EXPORT_SYMBOL(cpu_callout_map); -cpumask_t cpu_possible_map = CPU_MASK_ALL; -EXPORT_SYMBOL(cpu_possible_map); /* Per CPU bogomips and other parameters */ struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned; diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 0ff5b523ea77..86557b5d1b3f 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -38,9 +38,6 @@ extern int __cpu_logical_map[NR_CPUS]; #define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */ #define SMP_CALL_FUNCTION 0x2 -extern cpumask_t phys_cpu_present_map; -#define cpu_possible_map phys_cpu_present_map - extern void asmlinkage smp_bootstrap(void); /* diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index ca476c4f62a5..6789c1a12120 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c @@ -226,7 +226,7 @@ void __init cmp_smp_setup(void) for (i = 1; i < NR_CPUS; i++) { if (amon_cpu_avail(i)) { - cpu_set(i, phys_cpu_present_map); + cpu_set(i, cpu_possible_map); __cpu_number_map[i] = ++ncpu; __cpu_logical_map[ncpu] = i; } diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 87a1816c1f45..6f7ee5ac46ee 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -70,7 +70,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0, write_vpe_c0_vpeconf0(tmp); /* Record this as available CPU */ - cpu_set(tc, phys_cpu_present_map); + cpu_set(tc, cpu_possible_map); __cpu_number_map[tc] = ++ncpu; __cpu_logical_map[ncpu] = tc; } diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 8bf88faf5afd..3da94704f816 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -44,15 +44,10 @@ #include #endif /* CONFIG_MIPS_MT_SMTC */ -cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */ volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ -cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */ int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ -EXPORT_SYMBOL(phys_cpu_present_map); -EXPORT_SYMBOL(cpu_online_map); - extern void cpu_idle(void); /* Number of TCs (or siblings in Intel speak) per CPU core */ @@ -195,7 +190,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) /* preload SMP state for boot cpu */ void __devinit smp_prepare_boot_cpu(void) { - cpu_set(0, phys_cpu_present_map); + cpu_set(0, cpu_possible_map); cpu_set(0, cpu_online_map); cpu_set(0, cpu_callin_map); } diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 897fb2b4751c..b6cca01ff82b 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -290,7 +290,7 @@ static void smtc_configure_tlb(void) * possibly leave some TCs/VPEs as "slave" processors. * * Use c0_MVPConf0 to find out how many TCs are available, setting up - * phys_cpu_present_map and the logical/physical mappings. + * cpu_possible_map and the logical/physical mappings. */ int __init smtc_build_cpu_map(int start_cpu_slot) @@ -304,7 +304,7 @@ int __init smtc_build_cpu_map(int start_cpu_slot) */ ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; for (i=start_cpu_slot; i> MVPCONF0_PTC_SHIFT) + 1)) { - cpu_clear(tc, phys_cpu_present_map); + cpu_clear(tc, cpu_possible_map); cpu_clear(tc, cpu_present_map); tc++; } diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c index 3a7df647ca77..f78c29b68d77 100644 --- a/arch/mips/pmc-sierra/yosemite/smp.c +++ b/arch/mips/pmc-sierra/yosemite/smp.c @@ -141,7 +141,7 @@ static void __cpuinit yos_boot_secondary(int cpu, struct task_struct *idle) } /* - * Detect available CPUs, populate phys_cpu_present_map before smp_init + * Detect available CPUs, populate cpu_possible_map before smp_init * * We don't want to start the secondary CPU yet nor do we have a nice probing * feature in PMON so we just assume presence of the secondary core. @@ -150,10 +150,10 @@ static void __init yos_smp_setup(void) { int i; - cpus_clear(phys_cpu_present_map); + cpus_clear(cpu_possible_map); for (i = 0; i < 2; i++) { - cpu_set(i, phys_cpu_present_map); + cpu_set(i, cpu_possible_map); __cpu_number_map[i] = i; __cpu_logical_map[i] = i; } diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c index ba5cdebeaf0d..5b47d6b65275 100644 --- a/arch/mips/sgi-ip27/ip27-smp.c +++ b/arch/mips/sgi-ip27/ip27-smp.c @@ -76,7 +76,7 @@ static int do_cpumask(cnodeid_t cnode, nasid_t nasid, int highest) /* Only let it join in if it's marked enabled */ if ((acpu->cpu_info.flags & KLINFO_ENABLE) && (tot_cpus_found != NR_CPUS)) { - cpu_set(cpuid, phys_cpu_present_map); + cpu_set(cpuid, cpu_possible_map); alloc_cpupda(cpuid, tot_cpus_found); cpus_found++; tot_cpus_found++; diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c index bd9eeb43ed0e..dddfda8e8294 100644 --- a/arch/mips/sibyte/bcm1480/smp.c +++ b/arch/mips/sibyte/bcm1480/smp.c @@ -136,7 +136,7 @@ static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle) /* * Use CFE to find out how many CPUs are available, setting up - * phys_cpu_present_map and the logical/physical mappings. + * cpu_possible_map and the logical/physical mappings. * XXXKW will the boot CPU ever not be physical 0? * * Common setup before any secondaries are started @@ -145,14 +145,14 @@ static void __init bcm1480_smp_setup(void) { int i, num; - cpus_clear(phys_cpu_present_map); - cpu_set(0, phys_cpu_present_map); + cpus_clear(cpu_possible_map); + cpu_set(0, cpu_possible_map); __cpu_number_map[0] = 0; __cpu_logical_map[0] = 0; for (i = 1, num = 0; i < NR_CPUS; i++) { if (cfe_cpu_stop(i) == 0) { - cpu_set(i, phys_cpu_present_map); + cpu_set(i, cpu_possible_map); __cpu_number_map[i] = ++num; __cpu_logical_map[num] = i; } diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c index 0734b933e969..5950a288a7da 100644 --- a/arch/mips/sibyte/sb1250/smp.c +++ b/arch/mips/sibyte/sb1250/smp.c @@ -124,7 +124,7 @@ static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle) /* * Use CFE to find out how many CPUs are available, setting up - * phys_cpu_present_map and the logical/physical mappings. + * cpu_possible_map and the logical/physical mappings. * XXXKW will the boot CPU ever not be physical 0? * * Common setup before any secondaries are started @@ -133,14 +133,14 @@ static void __init sb1250_smp_setup(void) { int i, num; - cpus_clear(phys_cpu_present_map); - cpu_set(0, phys_cpu_present_map); + cpus_clear(cpu_possible_map); + cpu_set(0, cpu_possible_map); __cpu_number_map[0] = 0; __cpu_logical_map[0] = 0; for (i = 1, num = 0; i < NR_CPUS; i++) { if (cfe_cpu_stop(i) == 0) { - cpu_set(i, phys_cpu_present_map); + cpu_set(i, cpu_possible_map); __cpu_number_map[i] = ++num; __cpu_logical_map[num] = i; } diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 644a70b1b04e..aacf11d33723 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -11,6 +11,7 @@ config PARISC select HAVE_OPROFILE select RTC_CLASS select RTC_DRV_PARISC + select INIT_ALL_POSSIBLE help The PA-RISC microprocessor is designed by Hewlett-Packard and used in many of their workstations & servers (HP9000 700 and 800 series, diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index d47f3975c9c6..80bc000523fa 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -67,21 +67,6 @@ static volatile int cpu_now_booting __read_mostly = 0; /* track which CPU is boo static int parisc_max_cpus __read_mostly = 1; -/* online cpus are ones that we've managed to bring up completely - * possible cpus are all valid cpu - * present cpus are all detected cpu - * - * On startup we bring up the "possible" cpus. Since we discover - * CPUs later, we add them as hotplug, so the possible cpu mask is - * empty in the beginning. - */ - -cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; /* Bitmap of online CPUs */ -cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; /* Bitmap of Present CPUs */ - -EXPORT_SYMBOL(cpu_online_map); -EXPORT_SYMBOL(cpu_possible_map); - DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; enum ipi_message_type { diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index ff9f7010097d..d1165566f064 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -60,13 +60,9 @@ int smp_hw_index[NR_CPUS]; struct thread_info *secondary_ti; -cpumask_t cpu_possible_map = CPU_MASK_NONE; -cpumask_t cpu_online_map = CPU_MASK_NONE; DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE; -EXPORT_SYMBOL(cpu_online_map); -EXPORT_SYMBOL(cpu_possible_map); EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); EXPORT_PER_CPU_SYMBOL(cpu_core_map); diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 8116a3328a19..b4aa5869c7f9 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -75,6 +75,7 @@ config S390 select HAVE_KRETPROBES select HAVE_KVM if 64BIT select HAVE_ARCH_TRACEHOOK + select INIT_ALL_POSSIBLE source "init/Kconfig" diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index b5595688a477..f03914b8ed2f 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -52,12 +52,6 @@ struct _lowcore *lowcore_ptr[NR_CPUS]; EXPORT_SYMBOL(lowcore_ptr); -cpumask_t cpu_online_map = CPU_MASK_NONE; -EXPORT_SYMBOL(cpu_online_map); - -cpumask_t cpu_possible_map = CPU_MASK_ALL; -EXPORT_SYMBOL(cpu_possible_map); - static struct task_struct *current_set[NR_CPUS]; static u8 smp_cpu_type; diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 3c5ad1660bbc..593937d0c495 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -31,12 +31,6 @@ int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ -cpumask_t cpu_possible_map; -EXPORT_SYMBOL(cpu_possible_map); - -cpumask_t cpu_online_map; -EXPORT_SYMBOL(cpu_online_map); - static inline void __init smp_store_cpu_info(unsigned int cpu) { struct sh_cpuinfo *c = cpu_data + cpu; diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h index a8180e546a48..8408d9d2a662 100644 --- a/arch/sparc/include/asm/smp_32.h +++ b/arch/sparc/include/asm/smp_32.h @@ -29,8 +29,6 @@ */ extern unsigned char boot_cpu_id; -extern cpumask_t phys_cpu_present_map; -#define cpu_possible_map phys_cpu_present_map typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c index e396c1f17a92..1e5ac4e282e1 100644 --- a/arch/sparc/kernel/smp.c +++ b/arch/sparc/kernel/smp.c @@ -39,8 +39,6 @@ volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,}; unsigned char boot_cpu_id = 0; unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */ -cpumask_t cpu_online_map = CPU_MASK_NONE; -cpumask_t phys_cpu_present_map = CPU_MASK_NONE; cpumask_t smp_commenced_mask = CPU_MASK_NONE; /* The only guaranteed locking primitive available on all Sparc @@ -334,7 +332,7 @@ void __init smp_setup_cpu_possible_map(void) instance = 0; while (!cpu_find_by_instance(instance, NULL, &mid)) { if (mid < NR_CPUS) { - cpu_set(mid, phys_cpu_present_map); + cpu_set(mid, cpu_possible_map); cpu_set(mid, cpu_present_map); } instance++; @@ -354,7 +352,7 @@ void __init smp_prepare_boot_cpu(void) current_thread_info()->cpu = cpuid; cpu_set(cpuid, cpu_online_map); - cpu_set(cpuid, phys_cpu_present_map); + cpu_set(cpuid, cpu_possible_map); } int __cpuinit __cpu_up(unsigned int cpu) diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c index b0dfff848653..32d11a5fe3a8 100644 --- a/arch/sparc/kernel/sparc_ksyms.c +++ b/arch/sparc/kernel/sparc_ksyms.c @@ -113,10 +113,6 @@ EXPORT_PER_CPU_SYMBOL(__cpu_data); #ifdef CONFIG_SMP /* IRQ implementation. */ EXPORT_SYMBOL(synchronize_irq); - -/* CPU online map and active count. */ -EXPORT_SYMBOL(cpu_online_map); -EXPORT_SYMBOL(phys_cpu_present_map); #endif EXPORT_SYMBOL(__udelay); diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index f500b0618bb0..a97b8822c22c 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -49,14 +49,10 @@ int sparc64_multi_core __read_mostly; -cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE; -cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; cpumask_t cpu_core_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; -EXPORT_SYMBOL(cpu_possible_map); -EXPORT_SYMBOL(cpu_online_map); EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); EXPORT_SYMBOL(cpu_core_map); diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c index 045772142844..98351c78bc81 100644 --- a/arch/um/kernel/smp.c +++ b/arch/um/kernel/smp.c @@ -25,13 +25,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); #include "irq_user.h" #include "os.h" -/* CPU online map, set by smp_boot_cpus */ -cpumask_t cpu_online_map = CPU_MASK_NONE; -cpumask_t cpu_possible_map = CPU_MASK_NONE; - -EXPORT_SYMBOL(cpu_online_map); -EXPORT_SYMBOL(cpu_possible_map); - /* Per CPU bogomips and other parameters * The only piece used here is the ipi pipe, which is set before SMP is * started and never changed. diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 7b1093397319..468c2f9d47ae 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -101,14 +101,8 @@ EXPORT_SYMBOL(smp_num_siblings); /* Last level cache ID of each logical CPU */ DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; -/* bitmap of online cpus */ -cpumask_t cpu_online_map __read_mostly; -EXPORT_SYMBOL(cpu_online_map); - cpumask_t cpu_callin_map; cpumask_t cpu_callout_map; -cpumask_t cpu_possible_map; -EXPORT_SYMBOL(cpu_possible_map); /* representing HT siblings of each logical CPU */ DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 52145007bd7e..9c990185e9f2 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -63,11 +63,6 @@ static int voyager_extended_cpus = 1; /* Used for the invalidate map that's also checked in the spinlock */ static volatile unsigned long smp_invalidate_needed; -/* Bitmask of currently online CPUs - used by setup.c for - /proc/cpuinfo, visible externally but still physical */ -cpumask_t cpu_online_map = CPU_MASK_NONE; -EXPORT_SYMBOL(cpu_online_map); - /* Bitmask of CPUs present in the system - exported by i386_syms.c, used * by scheduler but indexed physically */ cpumask_t phys_cpu_present_map = CPU_MASK_NONE; @@ -218,8 +213,6 @@ static cpumask_t smp_commenced_mask = CPU_MASK_NONE; /* This is for the new dynamic CPU boot code */ cpumask_t cpu_callin_map = CPU_MASK_NONE; cpumask_t cpu_callout_map = CPU_MASK_NONE; -cpumask_t cpu_possible_map = CPU_MASK_NONE; -EXPORT_SYMBOL(cpu_possible_map); /* The per processor IRQ masks (these are usually kept in sync) */ static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h index c5dd66916692..b96a6d2ffbc3 100644 --- a/include/asm-m32r/smp.h +++ b/include/asm-m32r/smp.h @@ -63,8 +63,6 @@ extern volatile int cpu_2_physid[NR_CPUS]; #define raw_smp_processor_id() (current_thread_info()->cpu) extern cpumask_t cpu_callout_map; -extern cpumask_t cpu_possible_map; -extern cpumask_t cpu_present_map; static __inline__ int hard_smp_processor_id(void) { diff --git a/init/Kconfig b/init/Kconfig index f763762d544a..7656623f5006 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -916,6 +916,15 @@ config KMOD endif # MODULES +config INIT_ALL_POSSIBLE + bool + help + Back when each arch used to define their own cpu_online_map and + cpu_possible_map, some of them chose to initialize cpu_possible_map + with all 1s, and others with all 0s. When they were centralised, + it was better to provide this option than to break all the archs + and have several arch maintainers persuing me down dark alleys. + config STOP_MACHINE bool default y diff --git a/kernel/cpu.c b/kernel/cpu.c index 8ea32e8d68b0..bae131a1211b 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -24,19 +24,20 @@ cpumask_t cpu_present_map __read_mostly; EXPORT_SYMBOL(cpu_present_map); -#ifndef CONFIG_SMP - /* * Represents all cpu's that are currently online. */ -cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL; +cpumask_t cpu_online_map __read_mostly; EXPORT_SYMBOL(cpu_online_map); +#ifdef CONFIG_INIT_ALL_POSSIBLE cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; +#else +cpumask_t cpu_possible_map __read_mostly; +#endif EXPORT_SYMBOL(cpu_possible_map); -#else /* CONFIG_SMP */ - +#ifdef CONFIG_SMP /* Serializes the updates to cpu_online_map, cpu_present_map */ static DEFINE_MUTEX(cpu_add_remove_lock); -- cgit v1.2.3 From 29c0177e6a4ac094302bed54a1d4bbb6b740a9ef Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Sat, 13 Dec 2008 21:20:25 +1030 Subject: cpumask: change cpumask_scnprintf, cpumask_parse_user, cpulist_parse, and cpulist_scnprintf to take pointers. Impact: change calling convention of existing cpumask APIs Most cpumask functions started with cpus_: these have been replaced by cpumask_ ones which take struct cpumask pointers as expected. These four functions don't have good replacement names; fortunately they're rarely used, so we just change them over. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Acked-by: Ingo Molnar Cc: paulus@samba.org Cc: mingo@redhat.com Cc: tony.luck@intel.com Cc: ralf@linux-mips.org Cc: Greg Kroah-Hartman Cc: cl@linux-foundation.org Cc: srostedt@redhat.com --- arch/ia64/kernel/topology.c | 2 +- arch/mips/kernel/smp-cmp.c | 4 +- arch/powerpc/platforms/pseries/xics.c | 2 +- arch/x86/kernel/cpu/intel_cacheinfo.c | 4 +- arch/x86/kernel/setup_percpu.c | 2 +- drivers/base/cpu.c | 2 +- drivers/base/node.c | 4 +- drivers/base/topology.c | 4 +- drivers/pci/pci-sysfs.c | 4 +- drivers/pci/probe.c | 4 +- include/linux/cpumask.h | 87 +++++++++++++++++++++++------------ kernel/cpuset.c | 4 +- kernel/irq/proc.c | 4 +- kernel/profile.c | 4 +- kernel/sched.c | 4 +- kernel/sched_stats.h | 2 +- kernel/taskstats.c | 2 +- kernel/trace/trace.c | 4 +- mm/slub.c | 2 +- 19 files changed, 86 insertions(+), 59 deletions(-) (limited to 'arch/x86') diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index c75b914f2d6b..a8d61a3e9a94 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -219,7 +219,7 @@ static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf) cpumask_t shared_cpu_map; cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map); - len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map); + len = cpumask_scnprintf(buf, NR_CPUS+1, &shared_cpu_map); len += sprintf(buf+len, "\n"); return len; } diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index 6789c1a12120..f27beca4b26d 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c @@ -51,10 +51,10 @@ static int __init allowcpus(char *str) int len; cpus_clear(cpu_allow_map); - if (cpulist_parse(str, cpu_allow_map) == 0) { + if (cpulist_parse(str, &cpu_allow_map) == 0) { cpu_set(0, cpu_allow_map); cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map); - len = cpulist_scnprintf(buf, sizeof(buf)-1, cpu_possible_map); + len = cpulist_scnprintf(buf, sizeof(buf)-1, &cpu_possible_map); buf[len] = '\0'; pr_debug("Allowable CPUs: %s\n", buf); return 1; diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index e1904774a70f..64d24310ce7e 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -358,7 +358,7 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) irq_server = get_irq_server(virq, 1); if (irq_server == -1) { char cpulist[128]; - cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); + cpumask_scnprintf(cpulist, sizeof(cpulist), &cpumask); printk(KERN_WARNING "%s: No online cpus in the mask %s for irq %d\n", __func__, cpulist, virq); diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 3f46afbb1cf1..43ea612d3e9d 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -626,8 +626,8 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, cpumask_t *mask = &this_leaf->shared_cpu_map; n = type? - cpulist_scnprintf(buf, len-2, *mask): - cpumask_scnprintf(buf, len-2, *mask); + cpulist_scnprintf(buf, len-2, mask) : + cpumask_scnprintf(buf, len-2, mask); buf[n++] = '\n'; buf[n] = '\0'; } diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index ae0c0d3bb770..1c2084291f97 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -282,7 +282,7 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable) else cpu_clear(cpu, *mask); - cpulist_scnprintf(buf, sizeof(buf), *mask); + cpulist_scnprintf(buf, sizeof(buf), mask); printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf); } diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 64f5d54f7edc..4259072f5bd0 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -109,7 +109,7 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL); */ static ssize_t print_cpus_map(char *buf, cpumask_t *map) { - int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *map); + int n = cpulist_scnprintf(buf, PAGE_SIZE-2, map); buf[n++] = '\n'; buf[n] = '\0'; diff --git a/drivers/base/node.c b/drivers/base/node.c index f5207090885a..91636cd8b6c9 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -30,8 +30,8 @@ static ssize_t node_read_cpumap(struct sys_device *dev, int type, char *buf) BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); len = type? - cpulist_scnprintf(buf, PAGE_SIZE-2, *mask): - cpumask_scnprintf(buf, PAGE_SIZE-2, *mask); + cpulist_scnprintf(buf, PAGE_SIZE-2, mask) : + cpumask_scnprintf(buf, PAGE_SIZE-2, mask); buf[len++] = '\n'; buf[len] = '\0'; return len; diff --git a/drivers/base/topology.c b/drivers/base/topology.c index 199cd97e32e6..a8bc1cbcfa7c 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -49,8 +49,8 @@ static ssize_t show_cpumap(int type, cpumask_t *mask, char *buf) if (len > 1) { n = type? - cpulist_scnprintf(buf, len-2, *mask): - cpumask_scnprintf(buf, len-2, *mask); + cpulist_scnprintf(buf, len-2, mask) : + cpumask_scnprintf(buf, len-2, mask); buf[n++] = '\n'; buf[n] = '\0'; } diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 5d72866897a8..c88485860a0a 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -74,7 +74,7 @@ static ssize_t local_cpus_show(struct device *dev, int len; mask = pcibus_to_cpumask(to_pci_dev(dev)->bus); - len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask); + len = cpumask_scnprintf(buf, PAGE_SIZE-2, &mask); buf[len++] = '\n'; buf[len] = '\0'; return len; @@ -88,7 +88,7 @@ static ssize_t local_cpulist_show(struct device *dev, int len; mask = pcibus_to_cpumask(to_pci_dev(dev)->bus); - len = cpulist_scnprintf(buf, PAGE_SIZE-2, mask); + len = cpulist_scnprintf(buf, PAGE_SIZE-2, &mask); buf[len++] = '\n'; buf[len] = '\0'; return len; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 003a9b3c293f..5b3f5937ecf5 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -55,8 +55,8 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev, cpumask = pcibus_to_cpumask(to_pci_bus(dev)); ret = type? - cpulist_scnprintf(buf, PAGE_SIZE-2, cpumask): - cpumask_scnprintf(buf, PAGE_SIZE-2, cpumask); + cpulist_scnprintf(buf, PAGE_SIZE-2, &cpumask) : + cpumask_scnprintf(buf, PAGE_SIZE-2, &cpumask); buf[ret++] = '\n'; buf[ret] = '\0'; return ret; diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 21e1dd43e52a..94a2ab88ae85 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -339,36 +339,6 @@ extern cpumask_t cpu_mask_all; #endif #define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v) -#define cpumask_scnprintf(buf, len, src) \ - __cpumask_scnprintf((buf), (len), &(src), NR_CPUS) -static inline int __cpumask_scnprintf(char *buf, int len, - const cpumask_t *srcp, int nbits) -{ - return bitmap_scnprintf(buf, len, srcp->bits, nbits); -} - -#define cpumask_parse_user(ubuf, ulen, dst) \ - __cpumask_parse_user((ubuf), (ulen), &(dst), NR_CPUS) -static inline int __cpumask_parse_user(const char __user *buf, int len, - cpumask_t *dstp, int nbits) -{ - return bitmap_parse_user(buf, len, dstp->bits, nbits); -} - -#define cpulist_scnprintf(buf, len, src) \ - __cpulist_scnprintf((buf), (len), &(src), NR_CPUS) -static inline int __cpulist_scnprintf(char *buf, int len, - const cpumask_t *srcp, int nbits) -{ - return bitmap_scnlistprintf(buf, len, srcp->bits, nbits); -} - -#define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS) -static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits) -{ - return bitmap_parselist(buf, dstp->bits, nbits); -} - #define cpu_remap(oldbit, old, new) \ __cpu_remap((oldbit), &(old), &(new), NR_CPUS) static inline int __cpu_remap(int oldbit, @@ -945,6 +915,63 @@ static inline void cpumask_copy(struct cpumask *dstp, */ #define cpumask_of(cpu) (get_cpu_mask(cpu)) +/** + * cpumask_scnprintf - print a cpumask into a string as comma-separated hex + * @buf: the buffer to sprintf into + * @len: the length of the buffer + * @srcp: the cpumask to print + * + * If len is zero, returns zero. Otherwise returns the length of the + * (nul-terminated) @buf string. + */ +static inline int cpumask_scnprintf(char *buf, int len, + const struct cpumask *srcp) +{ + return bitmap_scnprintf(buf, len, srcp->bits, nr_cpumask_bits); +} + +/** + * cpumask_parse_user - extract a cpumask from a user string + * @buf: the buffer to extract from + * @len: the length of the buffer + * @dstp: the cpumask to set. + * + * Returns -errno, or 0 for success. + */ +static inline int cpumask_parse_user(const char __user *buf, int len, + struct cpumask *dstp) +{ + return bitmap_parse_user(buf, len, dstp->bits, nr_cpumask_bits); +} + +/** + * cpulist_scnprintf - print a cpumask into a string as comma-separated list + * @buf: the buffer to sprintf into + * @len: the length of the buffer + * @srcp: the cpumask to print + * + * If len is zero, returns zero. Otherwise returns the length of the + * (nul-terminated) @buf string. + */ +static inline int cpulist_scnprintf(char *buf, int len, + const struct cpumask *srcp) +{ + return bitmap_scnlistprintf(buf, len, srcp->bits, nr_cpumask_bits); +} + +/** + * cpulist_parse_user - extract a cpumask from a user string of ranges + * @buf: the buffer to extract from + * @len: the length of the buffer + * @dstp: the cpumask to set. + * + * Returns -errno, or 0 for success. + */ +static inline int cpulist_parse(const char *buf, struct cpumask *dstp) +{ + return bitmap_parselist(buf, dstp->bits, nr_cpumask_bits); +} + /** * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * * @bitmap: the bitmap diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 96c0ba13b8cd..39c1a4c1c5a9 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -896,7 +896,7 @@ static int update_cpumask(struct cpuset *cs, const char *buf) if (!*buf) { cpus_clear(trialcs.cpus_allowed); } else { - retval = cpulist_parse(buf, trialcs.cpus_allowed); + retval = cpulist_parse(buf, &trialcs.cpus_allowed); if (retval < 0) return retval; @@ -1482,7 +1482,7 @@ static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs) mask = cs->cpus_allowed; mutex_unlock(&callback_mutex); - return cpulist_scnprintf(page, PAGE_SIZE, mask); + return cpulist_scnprintf(page, PAGE_SIZE, &mask); } static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index d257e7d6a8a4..f293349d49d0 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -47,7 +47,7 @@ static ssize_t irq_affinity_proc_write(struct file *file, irq_balancing_disabled(irq)) return -EIO; - err = cpumask_parse_user(buffer, count, new_value); + err = cpumask_parse_user(buffer, count, &new_value); if (err) return err; @@ -95,7 +95,7 @@ static ssize_t default_affinity_write(struct file *file, cpumask_t new_value; int err; - err = cpumask_parse_user(buffer, count, new_value); + err = cpumask_parse_user(buffer, count, &new_value); if (err) return err; diff --git a/kernel/profile.c b/kernel/profile.c index dc41827fbfee..7d620dfdde59 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -442,7 +442,7 @@ void profile_tick(int type) static int prof_cpu_mask_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { - int len = cpumask_scnprintf(page, count, *(cpumask_t *)data); + int len = cpumask_scnprintf(page, count, (cpumask_t *)data); if (count - len < 2) return -EINVAL; len += sprintf(page + len, "\n"); @@ -456,7 +456,7 @@ static int prof_cpu_mask_write_proc(struct file *file, unsigned long full_count = count, err; cpumask_t new_value; - err = cpumask_parse_user(buffer, count, new_value); + err = cpumask_parse_user(buffer, count, &new_value); if (err) return err; diff --git a/kernel/sched.c b/kernel/sched.c index e4bb1dd7b308..d2d16d1273b1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -6666,7 +6666,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, struct sched_group *group = sd->groups; char str[256]; - cpulist_scnprintf(str, sizeof(str), sd->span); + cpulist_scnprintf(str, sizeof(str), &sd->span); cpus_clear(*groupmask); printk(KERN_DEBUG "%*s domain %d: ", level, "", level); @@ -6720,7 +6720,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, cpus_or(*groupmask, *groupmask, group->cpumask); - cpulist_scnprintf(str, sizeof(str), group->cpumask); + cpulist_scnprintf(str, sizeof(str), &group->cpumask); printk(KERN_CONT " %s", str); group = group->next; diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index 7dbf72a2b02c..6beff1e4eeae 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h @@ -42,7 +42,7 @@ static int show_schedstat(struct seq_file *seq, void *v) for_each_domain(cpu, sd) { enum cpu_idle_type itype; - cpumask_scnprintf(mask_str, mask_len, sd->span); + cpumask_scnprintf(mask_str, mask_len, &sd->span); seq_printf(seq, "domain%d %s", dcount++, mask_str); for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; itype++) { diff --git a/kernel/taskstats.c b/kernel/taskstats.c index bd6be76303cf..6d7dc4ec4aa5 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -352,7 +352,7 @@ static int parse(struct nlattr *na, cpumask_t *mask) if (!data) return -ENOMEM; nla_strlcpy(data, na, len); - ret = cpulist_parse(data, *mask); + ret = cpulist_parse(data, mask); kfree(data); return ret; } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d86e3252f300..d2e75479dc50 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2126,7 +2126,7 @@ tracing_cpumask_read(struct file *filp, char __user *ubuf, mutex_lock(&tracing_cpumask_update_lock); - len = cpumask_scnprintf(mask_str, count, tracing_cpumask); + len = cpumask_scnprintf(mask_str, count, &tracing_cpumask); if (count - len < 2) { count = -EINVAL; goto out_err; @@ -2147,7 +2147,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, int err, cpu; mutex_lock(&tracing_cpumask_update_lock); - err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); + err = cpumask_parse_user(ubuf, count, &tracing_cpumask_new); if (err) goto err_unlock; diff --git a/mm/slub.c b/mm/slub.c index a2cd47d89e0a..8e516e29f989 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3626,7 +3626,7 @@ static int list_locations(struct kmem_cache *s, char *buf, len < PAGE_SIZE - 60) { len += sprintf(buf + len, " cpus="); len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, - l->cpus); + &l->cpus); } if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && -- cgit v1.2.3 From 0de26520c7cabf36e1de090ea8092f011a6106ce Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Sat, 13 Dec 2008 21:20:26 +1030 Subject: cpumask: make irq_set_affinity() take a const struct cpumask Impact: change existing irq_chip API Not much point with gentle transition here: the struct irq_chip's setaffinity method signature needs to change. Fortunately, not widely used code, but hits a few architectures. Note: In irq_select_affinity() I save a temporary in by mangling irq_desc[irq].affinity directly. Ingo, does this break anything? (Folded in fix from KOSAKI Motohiro) Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Reviewed-by: Grant Grundler Acked-by: Ingo Molnar Cc: ralf@linux-mips.org Cc: grundler@parisc-linux.org Cc: jeremy@xensource.com Cc: KOSAKI Motohiro --- arch/alpha/kernel/irq.c | 2 +- arch/alpha/kernel/sys_dp264.c | 8 ++-- arch/alpha/kernel/sys_titan.c | 4 +- arch/arm/common/gic.c | 4 +- arch/arm/kernel/irq.c | 2 +- arch/arm/oprofile/op_model_mpcore.c | 4 +- arch/cris/arch-v32/kernel/irq.c | 4 +- arch/ia64/hp/sim/hpsim_irq.c | 2 +- arch/ia64/kernel/iosapic.c | 12 +++--- arch/ia64/kernel/irq.c | 9 ++-- arch/ia64/kernel/msi_ia64.c | 12 +++--- arch/ia64/kernel/smpboot.c | 4 +- arch/ia64/sn/kernel/irq.c | 6 +-- arch/ia64/sn/kernel/msi_sn.c | 7 +-- arch/mips/include/asm/irq.h | 3 +- arch/mips/kernel/cevt-bcm1480.c | 2 +- arch/mips/kernel/cevt-sb1250.c | 2 +- arch/mips/kernel/irq-gic.c | 6 +-- arch/mips/mti-malta/malta-smtc.c | 6 +-- arch/mips/sibyte/bcm1480/irq.c | 8 ++-- arch/mips/sibyte/sb1250/irq.c | 8 ++-- arch/parisc/kernel/irq.c | 6 +-- arch/powerpc/kernel/irq.c | 2 +- arch/powerpc/platforms/pseries/xics.c | 6 +-- arch/powerpc/sysdev/mpic.c | 4 +- arch/powerpc/sysdev/mpic.h | 2 +- arch/sparc64/kernel/irq.c | 11 +++-- arch/sparc64/kernel/of_device.c | 2 +- arch/sparc64/kernel/pci_msi.c | 2 +- arch/x86/kernel/hpet.c | 4 +- arch/x86/kernel/io_apic.c | 81 +++++++++++++++++------------------ arch/x86/kernel/irq_32.c | 2 +- arch/x86/kernel/irq_64.c | 2 +- drivers/parisc/iosapic.c | 7 +-- drivers/xen/events.c | 6 +-- include/linux/interrupt.h | 4 +- include/linux/irq.h | 3 +- kernel/irq/chip.c | 2 +- kernel/irq/manage.c | 22 +++++----- kernel/irq/migration.c | 14 +++--- kernel/irq/proc.c | 29 ++++++++----- kernel/time/tick-common.c | 6 +-- 42 files changed, 171 insertions(+), 161 deletions(-) (limited to 'arch/x86') diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index c626a821cdcb..d0f1620007f7 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq) last_cpu = cpu; irq_desc[irq].affinity = cpumask_of_cpu(cpu); - irq_desc[irq].chip->set_affinity(irq, cpumask_of_cpu(cpu)); + irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu)); return 0; } #endif /* CONFIG_SMP */ diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c index c71b0fd7a61f..ab44c164d9d4 100644 --- a/arch/alpha/kernel/sys_dp264.c +++ b/arch/alpha/kernel/sys_dp264.c @@ -177,19 +177,19 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) } static void -dp264_set_affinity(unsigned int irq, cpumask_t affinity) +dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) { spin_lock(&dp264_irq_lock); - cpu_set_irq_affinity(irq, affinity); + cpu_set_irq_affinity(irq, *affinity); tsunami_update_irq_hw(cached_irq_mask); spin_unlock(&dp264_irq_lock); } static void -clipper_set_affinity(unsigned int irq, cpumask_t affinity) +clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) { spin_lock(&dp264_irq_lock); - cpu_set_irq_affinity(irq - 16, affinity); + cpu_set_irq_affinity(irq - 16, *affinity); tsunami_update_irq_hw(cached_irq_mask); spin_unlock(&dp264_irq_lock); } diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index 52c91ccc1648..27f840a4ad3d 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c @@ -158,10 +158,10 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) } static void -titan_set_irq_affinity(unsigned int irq, cpumask_t affinity) +titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) { spin_lock(&titan_irq_lock); - titan_cpu_set_irq_affinity(irq - 16, affinity); + titan_cpu_set_irq_affinity(irq - 16, *affinity); titan_update_irq_hw(titan_cached_irq_mask); spin_unlock(&titan_irq_lock); } diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 7fc9860a97d7..c6884ba1d5ed 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c @@ -109,11 +109,11 @@ static void gic_unmask_irq(unsigned int irq) } #ifdef CONFIG_SMP -static void gic_set_cpu(unsigned int irq, cpumask_t mask_val) +static void gic_set_cpu(unsigned int irq, const struct cpumask *mask_val) { void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3); unsigned int shift = (irq % 4) * 8; - unsigned int cpu = first_cpu(mask_val); + unsigned int cpu = cpumask_first(mask_val); u32 val; spin_lock(&irq_controller_lock); diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 2f3eb795fa6e..7141cee1fab7 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -174,7 +174,7 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu); spin_lock_irq(&desc->lock); - desc->chip->set_affinity(irq, cpumask_of_cpu(cpu)); + desc->chip->set_affinity(irq, cpumask_of(cpu)); spin_unlock_irq(&desc->lock); } diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c index 4de366e8b4c5..6d6bd5899240 100644 --- a/arch/arm/oprofile/op_model_mpcore.c +++ b/arch/arm/oprofile/op_model_mpcore.c @@ -260,10 +260,10 @@ static void em_stop(void) static void em_route_irq(int irq, unsigned int cpu) { struct irq_desc *desc = irq_desc + irq; - cpumask_t mask = cpumask_of_cpu(cpu); + const struct cpumask *mask = cpumask_of(cpu); spin_lock_irq(&desc->lock); - desc->affinity = mask; + desc->affinity = *mask; desc->chip->set_affinity(irq, mask); spin_unlock_irq(&desc->lock); } diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c index 173c141ac9ba..295131fee710 100644 --- a/arch/cris/arch-v32/kernel/irq.c +++ b/arch/cris/arch-v32/kernel/irq.c @@ -325,11 +325,11 @@ static void end_crisv32_irq(unsigned int irq) { } -void set_affinity_crisv32_irq(unsigned int irq, cpumask_t dest) +void set_affinity_crisv32_irq(unsigned int irq, const struct cpumask *dest) { unsigned long flags; spin_lock_irqsave(&irq_lock, flags); - irq_allocations[irq - FIRST_IRQ].mask = dest; + irq_allocations[irq - FIRST_IRQ].mask = *dest; spin_unlock_irqrestore(&irq_lock, flags); } diff --git a/arch/ia64/hp/sim/hpsim_irq.c b/arch/ia64/hp/sim/hpsim_irq.c index c2f58ff364e7..cc0a3182db3c 100644 --- a/arch/ia64/hp/sim/hpsim_irq.c +++ b/arch/ia64/hp/sim/hpsim_irq.c @@ -22,7 +22,7 @@ hpsim_irq_noop (unsigned int irq) } static void -hpsim_set_affinity_noop (unsigned int a, cpumask_t b) +hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b) { } diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 5c4674ae8aea..c8adecd5b416 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -330,25 +330,25 @@ unmask_irq (unsigned int irq) static void -iosapic_set_affinity (unsigned int irq, cpumask_t mask) +iosapic_set_affinity(unsigned int irq, const struct cpumask *mask) { #ifdef CONFIG_SMP u32 high32, low32; - int dest, rte_index; + int cpu, dest, rte_index; int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; struct iosapic_rte_info *rte; struct iosapic *iosapic; irq &= (~IA64_IRQ_REDIRECTED); - cpus_and(mask, mask, cpu_online_map); - if (cpus_empty(mask)) + cpu = cpumask_first_and(cpu_online_mask, mask); + if (cpu >= nr_cpu_ids) return; - if (irq_prepare_move(irq, first_cpu(mask))) + if (irq_prepare_move(irq, cpu)) return; - dest = cpu_physical_id(first_cpu(mask)); + dest = cpu_physical_id(cpu); if (!iosapic_intr_info[irq].count) return; /* not an IOSAPIC interrupt */ diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 7fd18f54c056..0b6db53fedcf 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -133,7 +133,6 @@ unsigned int vectors_in_migration[NR_IRQS]; */ static void migrate_irqs(void) { - cpumask_t mask; irq_desc_t *desc; int irq, new_cpu; @@ -152,15 +151,14 @@ static void migrate_irqs(void) if (desc->status == IRQ_PER_CPU) continue; - cpus_and(mask, irq_desc[irq].affinity, cpu_online_map); - if (any_online_cpu(mask) == NR_CPUS) { + if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask) + >= nr_cpu_ids) { /* * Save it for phase 2 processing */ vectors_in_migration[irq] = irq; new_cpu = any_online_cpu(cpu_online_map); - mask = cpumask_of_cpu(new_cpu); /* * Al three are essential, currently WARN_ON.. maybe panic? @@ -168,7 +166,8 @@ static void migrate_irqs(void) if (desc->chip && desc->chip->disable && desc->chip->enable && desc->chip->set_affinity) { desc->chip->disable(irq); - desc->chip->set_affinity(irq, mask); + desc->chip->set_affinity(irq, + cpumask_of(new_cpu)); desc->chip->enable(irq); } else { WARN_ON((!(desc->chip) || !(desc->chip->disable) || diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 702a09c13238..890339339035 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c @@ -49,11 +49,12 @@ static struct irq_chip ia64_msi_chip; #ifdef CONFIG_SMP -static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) +static void ia64_set_msi_irq_affinity(unsigned int irq, + const cpumask_t *cpu_mask) { struct msi_msg msg; u32 addr, data; - int cpu = first_cpu(cpu_mask); + int cpu = first_cpu(*cpu_mask); if (!cpu_online(cpu)) return; @@ -166,12 +167,11 @@ void arch_teardown_msi_irq(unsigned int irq) #ifdef CONFIG_DMAR #ifdef CONFIG_SMP -static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) +static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) { struct irq_cfg *cfg = irq_cfg + irq; struct msi_msg msg; - int cpu = first_cpu(mask); - + int cpu = cpumask_first(mask); if (!cpu_online(cpu)) return; @@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); dmar_msi_write(irq, &msg); - irq_desc[irq].affinity = mask; + irq_desc[irq].affinity = *mask; } #endif /* CONFIG_SMP */ diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 4ede6e571c38..11463994a7d5 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -682,7 +682,7 @@ int migrate_platform_irqs(unsigned int cpu) { int new_cpei_cpu; irq_desc_t *desc = NULL; - cpumask_t mask; + const struct cpumask *mask; int retval = 0; /* @@ -695,7 +695,7 @@ int migrate_platform_irqs(unsigned int cpu) * Now re-target the CPEI to a different processor */ new_cpei_cpu = any_online_cpu(cpu_online_map); - mask = cpumask_of_cpu(new_cpei_cpu); + mask = cpumask_of(new_cpei_cpu); set_cpei_target_cpu(new_cpei_cpu); desc = irq_desc + ia64_cpe_irq; /* diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 0c66dbdd1d72..66fd705e82c0 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c @@ -227,14 +227,14 @@ finish_up: return new_irq_info; } -static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) +static void sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask) { struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; nasid_t nasid; int slice; - nasid = cpuid_to_nasid(first_cpu(mask)); - slice = cpuid_to_slice(first_cpu(mask)); + nasid = cpuid_to_nasid(cpumask_first(mask)); + slice = cpuid_to_slice(cpumask_first(mask)); list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, sn_irq_lh[irq], list) diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c index 83f190ffe350..ca553b0429ce 100644 --- a/arch/ia64/sn/kernel/msi_sn.c +++ b/arch/ia64/sn/kernel/msi_sn.c @@ -151,7 +151,8 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry) } #ifdef CONFIG_SMP -static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) +static void sn_set_msi_irq_affinity(unsigned int irq, + const struct cpumask *cpu_mask) { struct msi_msg msg; int slice; @@ -164,7 +165,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) struct sn_pcibus_provider *provider; unsigned int cpu; - cpu = first_cpu(cpu_mask); + cpu = cpumask_first(cpu_mask); sn_irq_info = sn_msi_info[irq].sn_irq_info; if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) return; @@ -204,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); write_msi_msg(irq, &msg); - irq_desc[irq].affinity = cpu_mask; + irq_desc[irq].affinity = *cpu_mask; } #endif /* CONFIG_SMP */ diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index a58f0eecc68f..abc62aa744ac 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h @@ -49,7 +49,8 @@ static inline void smtc_im_ack_irq(unsigned int irq) #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF #include -extern void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity); +extern void plat_set_irq_affinity(unsigned int irq, + const struct cpumask *affinity); extern void smtc_forward_irq(unsigned int irq); /* diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c index 0a57f86945f1..d7e21bc8cd21 100644 --- a/arch/mips/kernel/cevt-bcm1480.c +++ b/arch/mips/kernel/cevt-bcm1480.c @@ -148,6 +148,6 @@ void __cpuinit sb1480_clockevent_init(void) action->name = name; action->dev_id = cd; - irq_set_affinity(irq, cpumask_of_cpu(cpu)); + irq_set_affinity(irq, cpumask_of(cpu)); setup_irq(irq, action); } diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c index 63ac3ad462bc..0f188cd46e03 100644 --- a/arch/mips/kernel/cevt-sb1250.c +++ b/arch/mips/kernel/cevt-sb1250.c @@ -147,6 +147,6 @@ void __cpuinit sb1250_clockevent_init(void) action->name = name; action->dev_id = cd; - irq_set_affinity(irq, cpumask_of_cpu(cpu)); + irq_set_affinity(irq, cpumask_of(cpu)); setup_irq(irq, action); } diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index f0a4bb19e096..494a49a317e9 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c @@ -155,7 +155,7 @@ static void gic_unmask_irq(unsigned int irq) static DEFINE_SPINLOCK(gic_lock); -static void gic_set_affinity(unsigned int irq, cpumask_t cpumask) +static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) { cpumask_t tmp = CPU_MASK_NONE; unsigned long flags; @@ -164,7 +164,7 @@ static void gic_set_affinity(unsigned int irq, cpumask_t cpumask) pr_debug(KERN_DEBUG "%s called\n", __func__); irq -= _irqbase; - cpus_and(tmp, cpumask, cpu_online_map); + cpumask_and(&tmp, cpumask, cpu_online_mask); if (cpus_empty(tmp)) return; @@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, cpumask_t cpumask) set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); } - irq_desc[irq].affinity = cpumask; + irq_desc[irq].affinity = *cpumask; spin_unlock_irqrestore(&gic_lock, flags); } diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c index f84a46a8ae6e..aabd7274507b 100644 --- a/arch/mips/mti-malta/malta-smtc.c +++ b/arch/mips/mti-malta/malta-smtc.c @@ -114,9 +114,9 @@ struct plat_smp_ops msmtc_smp_ops = { */ -void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity) +void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) { - cpumask_t tmask = affinity; + cpumask_t tmask = *affinity; int cpu = 0; void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); @@ -139,7 +139,7 @@ void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity) * be made to forward to an offline "CPU". */ - for_each_cpu_mask(cpu, affinity) { + for_each_cpu(cpu, affinity) { if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) cpu_clear(cpu, tmask); } diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c index a35818ed4263..12b465d404df 100644 --- a/arch/mips/sibyte/bcm1480/irq.c +++ b/arch/mips/sibyte/bcm1480/irq.c @@ -50,7 +50,7 @@ static void enable_bcm1480_irq(unsigned int irq); static void disable_bcm1480_irq(unsigned int irq); static void ack_bcm1480_irq(unsigned int irq); #ifdef CONFIG_SMP -static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask); +static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask); #endif #ifdef CONFIG_PCI @@ -109,7 +109,7 @@ void bcm1480_unmask_irq(int cpu, int irq) } #ifdef CONFIG_SMP -static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask) +static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask) { int i = 0, old_cpu, cpu, int_on, k; u64 cur_ints; @@ -117,11 +117,11 @@ static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask) unsigned long flags; unsigned int irq_dirty; - if (cpus_weight(mask) != 1) { + if (cpumask_weight(mask) != 1) { printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); return; } - i = first_cpu(mask); + i = cpumask_first(mask); /* Convert logical CPU to physical CPU */ cpu = cpu_logical_map(i); diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c index a5158483986e..808ac2959b8c 100644 --- a/arch/mips/sibyte/sb1250/irq.c +++ b/arch/mips/sibyte/sb1250/irq.c @@ -50,7 +50,7 @@ static void enable_sb1250_irq(unsigned int irq); static void disable_sb1250_irq(unsigned int irq); static void ack_sb1250_irq(unsigned int irq); #ifdef CONFIG_SMP -static void sb1250_set_affinity(unsigned int irq, cpumask_t mask); +static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask); #endif #ifdef CONFIG_SIBYTE_HAS_LDT @@ -103,16 +103,16 @@ void sb1250_unmask_irq(int cpu, int irq) } #ifdef CONFIG_SMP -static void sb1250_set_affinity(unsigned int irq, cpumask_t mask) +static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask) { int i = 0, old_cpu, cpu, int_on; u64 cur_ints; struct irq_desc *desc = irq_desc + irq; unsigned long flags; - i = first_cpu(mask); + i = cpumask_first(mask); - if (cpus_weight(mask) > 1) { + if (cpumask_weight(mask) > 1) { printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); return; } diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 23ef950df008..4cea935e2f99 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -131,12 +131,12 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest) return 0; } -static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest) +static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) { - if (cpu_check_affinity(irq, &dest)) + if (cpu_check_affinity(irq, dest)) return; - irq_desc[irq].affinity = dest; + irq_desc[irq].affinity = *dest; } #endif diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index ac222d0ab12e..23b8b5e36f98 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -237,7 +237,7 @@ void fixup_irqs(cpumask_t map) mask = map; } if (irq_desc[irq].chip->set_affinity) - irq_desc[irq].chip->set_affinity(irq, mask); + irq_desc[irq].chip->set_affinity(irq, &mask); else if (irq_desc[irq].action && !(warned++)) printk("Cannot set affinity for irq %i\n", irq); } diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 64d24310ce7e..424b335a71c8 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -332,7 +332,7 @@ static void xics_eoi_lpar(unsigned int virq) lpar_xirr_info_set((0xff << 24) | irq); } -static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) +static void xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) { unsigned int irq; int status; @@ -358,7 +358,7 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) irq_server = get_irq_server(virq, 1); if (irq_server == -1) { char cpulist[128]; - cpumask_scnprintf(cpulist, sizeof(cpulist), &cpumask); + cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); printk(KERN_WARNING "%s: No online cpus in the mask %s for irq %d\n", __func__, cpulist, virq); @@ -845,7 +845,7 @@ void xics_migrate_irqs_away(void) /* Reset affinity to all cpus */ irq_desc[virq].affinity = CPU_MASK_ALL; - desc->chip->set_affinity(virq, CPU_MASK_ALL); + desc->chip->set_affinity(virq, cpu_all_mask); unlock: spin_unlock_irqrestore(&desc->lock, flags); } diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 1890fb085cde..5d7f9f0c93c3 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -817,7 +817,7 @@ static void mpic_end_ipi(unsigned int irq) #endif /* CONFIG_SMP */ -void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) +void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask) { struct mpic *mpic = mpic_from_irq(irq); unsigned int src = mpic_irq_to_hw(irq); @@ -829,7 +829,7 @@ void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) } else { cpumask_t tmp; - cpus_and(tmp, cpumask, cpu_online_map); + cpumask_and(&tmp, cpumask, cpu_online_mask); mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), mpic_physmask(cpus_addr(tmp)[0])); diff --git a/arch/powerpc/sysdev/mpic.h b/arch/powerpc/sysdev/mpic.h index 6209c62a426d..3cef2af10f42 100644 --- a/arch/powerpc/sysdev/mpic.h +++ b/arch/powerpc/sysdev/mpic.h @@ -36,6 +36,6 @@ static inline int mpic_pasemi_msi_init(struct mpic *mpic) extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type); extern void mpic_set_vector(unsigned int virq, unsigned int vector); -extern void mpic_set_affinity(unsigned int irq, cpumask_t cpumask); +extern void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask); #endif /* _POWERPC_SYSDEV_MPIC_H */ diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 52fc836f464d..4aaf18e83c8c 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -312,7 +312,8 @@ static void sun4u_irq_enable(unsigned int virt_irq) } } -static void sun4u_set_affinity(unsigned int virt_irq, cpumask_t mask) +static void sun4u_set_affinity(unsigned int virt_irq, + const struct cpumask *mask) { sun4u_irq_enable(virt_irq); } @@ -362,7 +363,8 @@ static void sun4v_irq_enable(unsigned int virt_irq) ino, err); } -static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask) +static void sun4v_set_affinity(unsigned int virt_irq, + const struct cpumask *mask) { unsigned int ino = virt_irq_table[virt_irq].dev_ino; unsigned long cpuid = irq_choose_cpu(virt_irq); @@ -429,7 +431,8 @@ static void sun4v_virq_enable(unsigned int virt_irq) dev_handle, dev_ino, err); } -static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask) +static void sun4v_virt_set_affinity(unsigned int virt_irq, + const struct cpumask *mask) { unsigned long cpuid, dev_handle, dev_ino; int err; @@ -788,7 +791,7 @@ void fixup_irqs(void) !(irq_desc[irq].status & IRQ_PER_CPU)) { if (irq_desc[irq].chip->set_affinity) irq_desc[irq].chip->set_affinity(irq, - irq_desc[irq].affinity); + &irq_desc[irq].affinity); } spin_unlock_irqrestore(&irq_desc[irq].lock, flags); } diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c index 0f616ae3246c..df2efb7fc14c 100644 --- a/arch/sparc64/kernel/of_device.c +++ b/arch/sparc64/kernel/of_device.c @@ -780,7 +780,7 @@ out: if (nid != -1) { cpumask_t numa_mask = node_to_cpumask(nid); - irq_set_affinity(irq, numa_mask); + irq_set_affinity(irq, &numa_mask); } return irq; diff --git a/arch/sparc64/kernel/pci_msi.c b/arch/sparc64/kernel/pci_msi.c index 2e680f34f727..0d0cd815e83e 100644 --- a/arch/sparc64/kernel/pci_msi.c +++ b/arch/sparc64/kernel/pci_msi.c @@ -288,7 +288,7 @@ static int bringup_one_msi_queue(struct pci_pbm_info *pbm, if (nid != -1) { cpumask_t numa_mask = node_to_cpumask(nid); - irq_set_affinity(irq, numa_mask); + irq_set_affinity(irq, &numa_mask); } err = request_irq(irq, sparc64_msiq_interrupt, 0, "MSIQ", diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 067d8de913f6..940f25851e1e 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -301,7 +301,7 @@ static void hpet_set_mode(enum clock_event_mode mode, struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); hpet_setup_msi_irq(hdev->irq); disable_irq(hdev->irq); - irq_set_affinity(hdev->irq, cpumask_of_cpu(hdev->cpu)); + irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); enable_irq(hdev->irq); } break; @@ -449,7 +449,7 @@ static int hpet_setup_irq(struct hpet_dev *dev) return -1; disable_irq(dev->irq); - irq_set_affinity(dev->irq, cpumask_of_cpu(dev->cpu)); + irq_set_affinity(dev->irq, cpumask_of(dev->cpu)); enable_irq(dev->irq); printk(KERN_DEBUG "hpet: %s irq %d for MSI\n", diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 9043251210fb..1184210e6d0c 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -361,7 +361,8 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector) static int assign_irq_vector(int irq, cpumask_t mask); -static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) +static void set_ioapic_affinity_irq(unsigned int irq, + const struct cpumask *mask) { struct irq_cfg *cfg; unsigned long flags; @@ -369,15 +370,14 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) cpumask_t tmp; struct irq_desc *desc; - cpus_and(tmp, mask, cpu_online_map); - if (cpus_empty(tmp)) + if (!cpumask_intersects(mask, cpu_online_mask)) return; cfg = irq_cfg(irq); - if (assign_irq_vector(irq, mask)) + if (assign_irq_vector(irq, *mask)) return; - cpus_and(tmp, cfg->domain, mask); + cpumask_and(&tmp, &cfg->domain, mask); dest = cpu_mask_to_apicid(tmp); /* * Only the high 8 bits are valid. @@ -387,7 +387,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) desc = irq_to_desc(irq); spin_lock_irqsave(&ioapic_lock, flags); __target_IO_APIC_irq(irq, dest, cfg->vector); - desc->affinity = mask; + cpumask_copy(&desc->affinity, mask); spin_unlock_irqrestore(&ioapic_lock, flags); } #endif /* CONFIG_SMP */ @@ -2189,7 +2189,7 @@ static void ir_irq_migration(struct work_struct *work) continue; } - desc->chip->set_affinity(irq, desc->pending_mask); + desc->chip->set_affinity(irq, &desc->pending_mask); spin_unlock_irqrestore(&desc->lock, flags); } } @@ -2198,18 +2198,19 @@ static void ir_irq_migration(struct work_struct *work) /* * Migrates the IRQ destination in the process context. */ -static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) +static void set_ir_ioapic_affinity_irq(unsigned int irq, + const struct cpumask *mask) { struct irq_desc *desc = irq_to_desc(irq); if (desc->status & IRQ_LEVEL) { desc->status |= IRQ_MOVE_PENDING; - desc->pending_mask = mask; + cpumask_copy(&desc->pending_mask, mask); migrate_irq_remapped_level(irq); return; } - migrate_ioapic_irq(irq, mask); + migrate_ioapic_irq(irq, *mask); } #endif @@ -3027,7 +3028,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms } #ifdef CONFIG_SMP -static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) +static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) { struct irq_cfg *cfg; struct msi_msg msg; @@ -3035,15 +3036,14 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) cpumask_t tmp; struct irq_desc *desc; - cpus_and(tmp, mask, cpu_online_map); - if (cpus_empty(tmp)) + if (!cpumask_intersects(mask, cpu_online_mask)) return; - if (assign_irq_vector(irq, mask)) + if (assign_irq_vector(irq, *mask)) return; cfg = irq_cfg(irq); - cpus_and(tmp, cfg->domain, mask); + cpumask_and(&tmp, &cfg->domain, mask); dest = cpu_mask_to_apicid(tmp); read_msi_msg(irq, &msg); @@ -3055,7 +3055,7 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) write_msi_msg(irq, &msg); desc = irq_to_desc(irq); - desc->affinity = mask; + cpumask_copy(&desc->affinity, mask); } #ifdef CONFIG_INTR_REMAP @@ -3063,7 +3063,8 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) * Migrate the MSI irq to another cpumask. This migration is * done in the process context using interrupt-remapping hardware. */ -static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) +static void ir_set_msi_irq_affinity(unsigned int irq, + const struct cpumask *mask) { struct irq_cfg *cfg; unsigned int dest; @@ -3071,18 +3072,17 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) struct irte irte; struct irq_desc *desc; - cpus_and(tmp, mask, cpu_online_map); - if (cpus_empty(tmp)) + if (!cpumask_intersects(mask, cpu_online_mask)) return; if (get_irte(irq, &irte)) return; - if (assign_irq_vector(irq, mask)) + if (assign_irq_vector(irq, *mask)) return; cfg = irq_cfg(irq); - cpus_and(tmp, cfg->domain, mask); + cpumask_and(&tmp, &cfg->domain, mask); dest = cpu_mask_to_apicid(tmp); irte.vector = cfg->vector; @@ -3106,7 +3106,7 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) } desc = irq_to_desc(irq); - desc->affinity = mask; + cpumask_copy(&desc->affinity, mask); } #endif #endif /* CONFIG_SMP */ @@ -3308,7 +3308,7 @@ void arch_teardown_msi_irq(unsigned int irq) #ifdef CONFIG_DMAR #ifdef CONFIG_SMP -static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) +static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) { struct irq_cfg *cfg; struct msi_msg msg; @@ -3316,15 +3316,14 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) cpumask_t tmp; struct irq_desc *desc; - cpus_and(tmp, mask, cpu_online_map); - if (cpus_empty(tmp)) + if (!cpumask_intersects(mask, cpu_online_mask)) return; - if (assign_irq_vector(irq, mask)) + if (assign_irq_vector(irq, *mask)) return; cfg = irq_cfg(irq); - cpus_and(tmp, cfg->domain, mask); + cpumask_and(&tmp, &cfg->domain, mask); dest = cpu_mask_to_apicid(tmp); dmar_msi_read(irq, &msg); @@ -3336,7 +3335,7 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) dmar_msi_write(irq, &msg); desc = irq_to_desc(irq); - desc->affinity = mask; + cpumask_copy(&desc->affinity, mask); } #endif /* CONFIG_SMP */ @@ -3369,7 +3368,7 @@ int arch_setup_dmar_msi(unsigned int irq) #ifdef CONFIG_HPET_TIMER #ifdef CONFIG_SMP -static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) +static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) { struct irq_cfg *cfg; struct irq_desc *desc; @@ -3377,15 +3376,14 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) unsigned int dest; cpumask_t tmp; - cpus_and(tmp, mask, cpu_online_map); - if (cpus_empty(tmp)) + if (!cpumask_intersects(mask, cpu_online_mask)) return; - if (assign_irq_vector(irq, mask)) + if (assign_irq_vector(irq, *mask)) return; cfg = irq_cfg(irq); - cpus_and(tmp, cfg->domain, mask); + cpumask_and(&tmp, &cfg->domain, mask); dest = cpu_mask_to_apicid(tmp); hpet_msi_read(irq, &msg); @@ -3397,7 +3395,7 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) hpet_msi_write(irq, &msg); desc = irq_to_desc(irq); - desc->affinity = mask; + cpumask_copy(&desc->affinity, mask); } #endif /* CONFIG_SMP */ @@ -3451,27 +3449,26 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) write_ht_irq_msg(irq, &msg); } -static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) +static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) { struct irq_cfg *cfg; unsigned int dest; cpumask_t tmp; struct irq_desc *desc; - cpus_and(tmp, mask, cpu_online_map); - if (cpus_empty(tmp)) + if (!cpumask_intersects(mask, cpu_online_mask)) return; - if (assign_irq_vector(irq, mask)) + if (assign_irq_vector(irq, *mask)) return; cfg = irq_cfg(irq); - cpus_and(tmp, cfg->domain, mask); + cpumask_and(&tmp, &cfg->domain, mask); dest = cpu_mask_to_apicid(tmp); target_ht_irq(irq, dest, cfg->vector); desc = irq_to_desc(irq); - desc->affinity = mask; + cpumask_copy(&desc->affinity, mask); } #endif @@ -3794,10 +3791,10 @@ void __init setup_ioapic_dest(void) #ifdef CONFIG_INTR_REMAP if (intr_remapping_enabled) - set_ir_ioapic_affinity_irq(irq, mask); + set_ir_ioapic_affinity_irq(irq, &mask); else #endif - set_ioapic_affinity_irq(irq, mask); + set_ioapic_affinity_irq(irq, &mask); } } diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index a51382672de0..87870a49be4e 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -251,7 +251,7 @@ void fixup_irqs(cpumask_t map) mask = map; } if (desc->chip->set_affinity) - desc->chip->set_affinity(irq, mask); + desc->chip->set_affinity(irq, &mask); else if (desc->action && !(warned++)) printk("Cannot set affinity for irq %i\n", irq); } diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 60eb84eb77a0..7d37f847544d 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -116,7 +116,7 @@ void fixup_irqs(cpumask_t map) desc->chip->mask(irq); if (desc->chip->set_affinity) - desc->chip->set_affinity(irq, mask); + desc->chip->set_affinity(irq, &mask); else if (!(warned++)) set_affinity = 0; diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 7beffcab2745..9dedbbd218c3 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c @@ -704,16 +704,17 @@ static unsigned int iosapic_startup_irq(unsigned int irq) } #ifdef CONFIG_SMP -static void iosapic_set_affinity_irq(unsigned int irq, cpumask_t dest) +static void iosapic_set_affinity_irq(unsigned int irq, + const struct cpumask *dest) { struct vector_info *vi = iosapic_get_vector(irq); u32 d0, d1, dummy_d0; unsigned long flags; - if (cpu_check_affinity(irq, &dest)) + if (cpu_check_affinity(irq, dest)) return; - vi->txn_addr = txn_affinity_addr(irq, first_cpu(dest)); + vi->txn_addr = txn_affinity_addr(irq, cpumask_first(dest)); spin_lock_irqsave(&iosapic_lock, flags); /* d1 contains the destination CPU, so only want to set that diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 1e3b934a4cf7..eba5ec5b020e 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -579,7 +579,7 @@ void rebind_evtchn_irq(int evtchn, int irq) spin_unlock(&irq_mapping_update_lock); /* new event channels are always bound to cpu 0 */ - irq_set_affinity(irq, cpumask_of_cpu(0)); + irq_set_affinity(irq, cpumask_of(0)); /* Unmask the event channel. */ enable_irq(irq); @@ -608,9 +608,9 @@ static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu) } -static void set_affinity_irq(unsigned irq, cpumask_t dest) +static void set_affinity_irq(unsigned irq, const struct cpumask *dest) { - unsigned tcpu = first_cpu(dest); + unsigned tcpu = cpumask_first(dest); rebind_irq_to_cpu(irq, tcpu); } diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index f58a0cf8929a..48e63934fabe 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -109,13 +109,13 @@ extern void enable_irq(unsigned int irq); extern cpumask_t irq_default_affinity; -extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask); +extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); extern int irq_can_set_affinity(unsigned int irq); extern int irq_select_affinity(unsigned int irq); #else /* CONFIG_SMP */ -static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask) +static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) { return -EINVAL; } diff --git a/include/linux/irq.h b/include/linux/irq.h index 3dddfa703ebd..ab70fd604d3a 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -113,7 +113,8 @@ struct irq_chip { void (*eoi)(unsigned int irq); void (*end)(unsigned int irq); - void (*set_affinity)(unsigned int irq, cpumask_t dest); + void (*set_affinity)(unsigned int irq, + const struct cpumask *dest); int (*retrigger)(unsigned int irq); int (*set_type)(unsigned int irq, unsigned int flow_type); int (*set_wake)(unsigned int irq, unsigned int on); diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 10b5092e9bfe..58d8e31daa49 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -45,7 +45,7 @@ void dynamic_irq_init(unsigned int irq) desc->irq_count = 0; desc->irqs_unhandled = 0; #ifdef CONFIG_SMP - cpus_setall(desc->affinity); + cpumask_setall(&desc->affinity); #endif spin_unlock_irqrestore(&desc->lock, flags); } diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 801addda3c43..10ad2f87ed9a 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -79,7 +79,7 @@ int irq_can_set_affinity(unsigned int irq) * @cpumask: cpumask * */ -int irq_set_affinity(unsigned int irq, cpumask_t cpumask) +int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; @@ -91,14 +91,14 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask) #ifdef CONFIG_GENERIC_PENDING_IRQ if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { - desc->affinity = cpumask; + cpumask_copy(&desc->affinity, cpumask); desc->chip->set_affinity(irq, cpumask); } else { desc->status |= IRQ_MOVE_PENDING; - desc->pending_mask = cpumask; + cpumask_copy(&desc->pending_mask, cpumask); } #else - desc->affinity = cpumask; + cpumask_copy(&desc->affinity, cpumask); desc->chip->set_affinity(irq, cpumask); #endif desc->status |= IRQ_AFFINITY_SET; @@ -112,26 +112,24 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask) */ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) { - cpumask_t mask; - if (!irq_can_set_affinity(irq)) return 0; - cpus_and(mask, cpu_online_map, irq_default_affinity); - /* * Preserve an userspace affinity setup, but make sure that * one of the targets is online. */ if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { - if (cpus_intersects(desc->affinity, cpu_online_map)) - mask = desc->affinity; + if (cpumask_any_and(&desc->affinity, cpu_online_mask) + < nr_cpu_ids) + goto set_affinity; else desc->status &= ~IRQ_AFFINITY_SET; } - desc->affinity = mask; - desc->chip->set_affinity(irq, mask); + cpumask_and(&desc->affinity, cpu_online_mask, &irq_default_affinity); +set_affinity: + desc->chip->set_affinity(irq, &desc->affinity); return 0; } diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 9db681d95814..bd72329e630c 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c @@ -4,7 +4,6 @@ void move_masked_irq(int irq) { struct irq_desc *desc = irq_to_desc(irq); - cpumask_t tmp; if (likely(!(desc->status & IRQ_MOVE_PENDING))) return; @@ -19,7 +18,7 @@ void move_masked_irq(int irq) desc->status &= ~IRQ_MOVE_PENDING; - if (unlikely(cpus_empty(desc->pending_mask))) + if (unlikely(cpumask_empty(&desc->pending_mask))) return; if (!desc->chip->set_affinity) @@ -27,8 +26,6 @@ void move_masked_irq(int irq) assert_spin_locked(&desc->lock); - cpus_and(tmp, desc->pending_mask, cpu_online_map); - /* * If there was a valid mask to work with, please * do the disable, re-program, enable sequence. @@ -41,10 +38,13 @@ void move_masked_irq(int irq) * For correct operation this depends on the caller * masking the irqs. */ - if (likely(!cpus_empty(tmp))) { - desc->chip->set_affinity(irq,tmp); + if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask) + < nr_cpu_ids)) { + cpumask_and(&desc->affinity, + &desc->pending_mask, cpu_online_mask); + desc->chip->set_affinity(irq, &desc->affinity); } - cpus_clear(desc->pending_mask); + cpumask_clear(&desc->pending_mask); } void move_native_irq(int irq) diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index f293349d49d0..8e91c9762520 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -40,33 +40,42 @@ static ssize_t irq_affinity_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; - cpumask_t new_value; + cpumask_var_t new_value; int err; if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || irq_balancing_disabled(irq)) return -EIO; - err = cpumask_parse_user(buffer, count, &new_value); + if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) + return -ENOMEM; + + err = cpumask_parse_user(buffer, count, new_value); if (err) - return err; + goto free_cpumask; - if (!is_affinity_mask_valid(new_value)) - return -EINVAL; + if (!is_affinity_mask_valid(*new_value)) { + err = -EINVAL; + goto free_cpumask; + } /* * Do not allow disabling IRQs completely - it's a too easy * way to make the system unusable accidentally :-) At least * one online CPU still has to be targeted. */ - if (!cpus_intersects(new_value, cpu_online_map)) + if (!cpumask_intersects(new_value, cpu_online_mask)) { /* Special case for empty set - allow the architecture code to set default SMP affinity. */ - return irq_select_affinity_usr(irq) ? -EINVAL : count; - - irq_set_affinity(irq, new_value); + err = irq_select_affinity_usr(irq) ? -EINVAL : count; + } else { + irq_set_affinity(irq, new_value); + err = count; + } - return count; +free_cpumask: + free_cpumask_var(new_value); + return err; } static int irq_affinity_proc_open(struct inode *inode, struct file *file) diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index df12434b43ca..ab65d217583f 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -136,7 +136,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) */ static void tick_setup_device(struct tick_device *td, struct clock_event_device *newdev, int cpu, - const cpumask_t *cpumask) + const struct cpumask *cpumask) { ktime_t next_event; void (*handler)(struct clock_event_device *) = NULL; @@ -171,8 +171,8 @@ static void tick_setup_device(struct tick_device *td, * When the device is not per cpu, pin the interrupt to the * current cpu: */ - if (!cpus_equal(newdev->cpumask, *cpumask)) - irq_set_affinity(newdev->irq, *cpumask); + if (!cpumask_equal(&newdev->cpumask, cpumask)) + irq_set_affinity(newdev->irq, cpumask); /* * When global broadcasting is active, check if the current -- cgit v1.2.3 From 320ab2b0b1e08e3805a3e1084a2f0eb1938d5d67 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Sat, 13 Dec 2008 21:20:26 +1030 Subject: cpumask: convert struct clock_event_device to cpumask pointers. Impact: change calling convention of existing clock_event APIs struct clock_event_timer's cpumask field gets changed to take pointer, as does the ->broadcast function. Another single-patch change. For safety, we BUG_ON() in clockevents_register_device() if it's not set. Signed-off-by: Rusty Russell Cc: Ingo Molnar --- arch/arm/mach-at91/at91rm9200_time.c | 3 +-- arch/arm/mach-at91/at91sam926x_time.c | 2 +- arch/arm/mach-davinci/time.c | 2 +- arch/arm/mach-imx/time.c | 2 +- arch/arm/mach-ixp4xx/common.c | 2 +- arch/arm/mach-msm/timer.c | 2 +- arch/arm/mach-ns9xxx/time-ns9360.c | 2 +- arch/arm/mach-omap1/time.c | 2 +- arch/arm/mach-omap1/timer32k.c | 2 +- arch/arm/mach-omap2/timer-gp.c | 2 +- arch/arm/mach-pxa/time.c | 2 +- arch/arm/mach-realview/core.c | 2 +- arch/arm/mach-realview/localtimer.c | 4 ++-- arch/arm/mach-sa1100/time.c | 2 +- arch/arm/mach-versatile/core.c | 2 +- arch/arm/plat-mxc/time.c | 2 +- arch/arm/plat-orion/time.c | 2 +- arch/avr32/kernel/time.c | 2 +- arch/blackfin/kernel/time-ts.c | 2 +- arch/m68knommu/platform/coldfire/pit.c | 2 +- arch/mips/jazz/irq.c | 2 +- arch/mips/kernel/cevt-bcm1480.c | 2 +- arch/mips/kernel/cevt-ds1287.c | 2 +- arch/mips/kernel/cevt-gt641xx.c | 2 +- arch/mips/kernel/cevt-r4k.c | 2 +- arch/mips/kernel/cevt-sb1250.c | 2 +- arch/mips/kernel/cevt-smtc.c | 2 +- arch/mips/kernel/cevt-txx9.c | 2 +- arch/mips/kernel/i8253.c | 2 +- arch/mips/nxp/pnx8550/common/time.c | 1 + arch/mips/sgi-ip27/ip27-timer.c | 2 +- arch/mips/sni/time.c | 2 +- arch/powerpc/kernel/time.c | 2 +- arch/s390/kernel/time.c | 2 +- arch/sh/include/asm/smp.h | 2 +- arch/sh/kernel/smp.c | 4 ++-- arch/sh/kernel/timers/timer-broadcast.c | 2 +- arch/sh/kernel/timers/timer-tmu.c | 2 +- arch/sparc64/kernel/time.c | 2 +- arch/um/kernel/time.c | 2 +- arch/x86/kernel/apic.c | 8 ++++---- arch/x86/kernel/hpet.c | 4 ++-- arch/x86/kernel/i8253.c | 2 +- arch/x86/kernel/mfgpt_32.c | 2 +- arch/x86/kernel/vmiclock_32.c | 2 +- arch/x86/lguest/boot.c | 2 +- arch/x86/xen/time.c | 2 +- drivers/clocksource/tcb_clksrc.c | 2 +- include/linux/clockchips.h | 4 ++-- kernel/time/clockevents.c | 2 ++ kernel/time/tick-broadcast.c | 2 +- kernel/time/tick-common.c | 8 ++++---- 52 files changed, 63 insertions(+), 61 deletions(-) (limited to 'arch/x86') diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c index a72e798a2a40..72f51d39202c 100644 --- a/arch/arm/mach-at91/at91rm9200_time.c +++ b/arch/arm/mach-at91/at91rm9200_time.c @@ -169,7 +169,6 @@ static struct clock_event_device clkevt = { .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .shift = 32, .rating = 150, - .cpumask = CPU_MASK_CPU0, .set_next_event = clkevt32k_next_event, .set_mode = clkevt32k_mode, }; @@ -197,7 +196,7 @@ void __init at91rm9200_timer_init(void) clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift); clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt); clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1; - clkevt.cpumask = cpumask_of_cpu(0); + clkevt.cpumask = cpumask_of(0); clockevents_register_device(&clkevt); /* register clocksource */ diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c index 122fd77ed580..b63e1d5f1bad 100644 --- a/arch/arm/mach-at91/at91sam926x_time.c +++ b/arch/arm/mach-at91/at91sam926x_time.c @@ -91,7 +91,6 @@ static struct clock_event_device pit_clkevt = { .features = CLOCK_EVT_FEAT_PERIODIC, .shift = 32, .rating = 100, - .cpumask = CPU_MASK_CPU0, .set_mode = pit_clkevt_mode, }; @@ -173,6 +172,7 @@ static void __init at91sam926x_pit_init(void) /* Set up and register clockevents */ pit_clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, pit_clkevt.shift); + pit_clkevt.cpumask = cpumask_of(0); clockevents_register_device(&pit_clkevt); } diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c index 3b9a296b5c4b..f8bcd29d17a6 100644 --- a/arch/arm/mach-davinci/time.c +++ b/arch/arm/mach-davinci/time.c @@ -322,7 +322,7 @@ static void __init davinci_timer_init(void) clockevent_davinci.min_delta_ns = clockevent_delta2ns(1, &clockevent_davinci); - clockevent_davinci.cpumask = cpumask_of_cpu(0); + clockevent_davinci.cpumask = cpumask_of(0); clockevents_register_device(&clockevent_davinci); } diff --git a/arch/arm/mach-imx/time.c b/arch/arm/mach-imx/time.c index a11765f5f23b..aff0ebcfa847 100644 --- a/arch/arm/mach-imx/time.c +++ b/arch/arm/mach-imx/time.c @@ -184,7 +184,7 @@ static int __init imx_clockevent_init(unsigned long rate) clockevent_imx.min_delta_ns = clockevent_delta2ns(0xf, &clockevent_imx); - clockevent_imx.cpumask = cpumask_of_cpu(0); + clockevent_imx.cpumask = cpumask_of(0); clockevents_register_device(&clockevent_imx); diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 7766f469456b..f4656d2ac8a8 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c @@ -487,7 +487,7 @@ static int __init ixp4xx_clockevent_init(void) clockevent_delta2ns(0xfffffffe, &clockevent_ixp4xx); clockevent_ixp4xx.min_delta_ns = clockevent_delta2ns(0xf, &clockevent_ixp4xx); - clockevent_ixp4xx.cpumask = cpumask_of_cpu(0); + clockevent_ixp4xx.cpumask = cpumask_of(0); clockevents_register_device(&clockevent_ixp4xx); return 0; diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 345a14cb73c3..444d9c0f5ca6 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c @@ -182,7 +182,7 @@ static void __init msm_timer_init(void) clockevent_delta2ns(0xf0000000 >> clock->shift, ce); /* 4 gets rounded down to 3 */ ce->min_delta_ns = clockevent_delta2ns(4, ce); - ce->cpumask = cpumask_of_cpu(0); + ce->cpumask = cpumask_of(0); cs->mult = clocksource_hz2mult(clock->freq, cs->shift); res = clocksource_register(cs); diff --git a/arch/arm/mach-ns9xxx/time-ns9360.c b/arch/arm/mach-ns9xxx/time-ns9360.c index a63424d083d9..41df69721769 100644 --- a/arch/arm/mach-ns9xxx/time-ns9360.c +++ b/arch/arm/mach-ns9xxx/time-ns9360.c @@ -173,7 +173,7 @@ static void __init ns9360_timer_init(void) ns9360_clockevent_device.min_delta_ns = clockevent_delta2ns(1, &ns9360_clockevent_device); - ns9360_clockevent_device.cpumask = cpumask_of_cpu(0); + ns9360_clockevent_device.cpumask = cpumask_of(0); clockevents_register_device(&ns9360_clockevent_device); setup_irq(IRQ_NS9360_TIMER0 + TIMER_CLOCKEVENT, diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c index 2cf7e32bd293..495a32c287b4 100644 --- a/arch/arm/mach-omap1/time.c +++ b/arch/arm/mach-omap1/time.c @@ -173,7 +173,7 @@ static __init void omap_init_mpu_timer(unsigned long rate) clockevent_mpu_timer1.min_delta_ns = clockevent_delta2ns(1, &clockevent_mpu_timer1); - clockevent_mpu_timer1.cpumask = cpumask_of_cpu(0); + clockevent_mpu_timer1.cpumask = cpumask_of(0); clockevents_register_device(&clockevent_mpu_timer1); } diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c index 705367ece174..fd3f7396e162 100644 --- a/arch/arm/mach-omap1/timer32k.c +++ b/arch/arm/mach-omap1/timer32k.c @@ -187,7 +187,7 @@ static __init void omap_init_32k_timer(void) clockevent_32k_timer.min_delta_ns = clockevent_delta2ns(1, &clockevent_32k_timer); - clockevent_32k_timer.cpumask = cpumask_of_cpu(0); + clockevent_32k_timer.cpumask = cpumask_of(0); clockevents_register_device(&clockevent_32k_timer); } diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c index 589393bedade..ae6036300f60 100644 --- a/arch/arm/mach-omap2/timer-gp.c +++ b/arch/arm/mach-omap2/timer-gp.c @@ -120,7 +120,7 @@ static void __init omap2_gp_clockevent_init(void) clockevent_gpt.min_delta_ns = clockevent_delta2ns(1, &clockevent_gpt); - clockevent_gpt.cpumask = cpumask_of_cpu(0); + clockevent_gpt.cpumask = cpumask_of(0); clockevents_register_device(&clockevent_gpt); } diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c index f8a9a62959e5..bf3c9a4aad50 100644 --- a/arch/arm/mach-pxa/time.c +++ b/arch/arm/mach-pxa/time.c @@ -122,7 +122,6 @@ static struct clock_event_device ckevt_pxa_osmr0 = { .features = CLOCK_EVT_FEAT_ONESHOT, .shift = 32, .rating = 200, - .cpumask = CPU_MASK_CPU0, .set_next_event = pxa_osmr0_set_next_event, .set_mode = pxa_osmr0_set_mode, }; @@ -170,6 +169,7 @@ static void __init pxa_timer_init(void) clockevent_delta2ns(0x7fffffff, &ckevt_pxa_osmr0); ckevt_pxa_osmr0.min_delta_ns = clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_pxa_osmr0) + 1; + ckevt_pxa_osmr0.cpumask = cpumask_of(0); cksrc_pxa_oscr0.mult = clocksource_hz2mult(clock_tick_rate, cksrc_pxa_oscr0.shift); diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c index 2f04d54711e7..b07cb9b7adb1 100644 --- a/arch/arm/mach-realview/core.c +++ b/arch/arm/mach-realview/core.c @@ -511,7 +511,7 @@ static struct clock_event_device timer0_clockevent = { .set_mode = timer_set_mode, .set_next_event = timer_set_next_event, .rating = 300, - .cpumask = CPU_MASK_ALL, + .cpumask = cpu_all_mask, }; static void __init realview_clockevents_init(unsigned int timer_irq) diff --git a/arch/arm/mach-realview/localtimer.c b/arch/arm/mach-realview/localtimer.c index 44d178cd5733..504961ef343c 100644 --- a/arch/arm/mach-realview/localtimer.c +++ b/arch/arm/mach-realview/localtimer.c @@ -161,7 +161,7 @@ void __cpuinit local_timer_setup(unsigned int cpu) clk->set_mode = local_timer_set_mode; clk->set_next_event = local_timer_set_next_event; clk->irq = IRQ_LOCALTIMER; - clk->cpumask = cpumask_of_cpu(cpu); + clk->cpumask = cpumask_of(cpu); clk->shift = 20; clk->mult = div_sc(mpcore_timer_rate, NSEC_PER_SEC, clk->shift); clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); @@ -199,7 +199,7 @@ void __cpuinit local_timer_setup(unsigned int cpu) clk->rating = 200; clk->set_mode = dummy_timer_set_mode; clk->broadcast = smp_timer_broadcast; - clk->cpumask = cpumask_of_cpu(cpu); + clk->cpumask = cpumask_of(cpu); clockevents_register_device(clk); } diff --git a/arch/arm/mach-sa1100/time.c b/arch/arm/mach-sa1100/time.c index 24c0a4bae850..1cac4ac0b4b8 100644 --- a/arch/arm/mach-sa1100/time.c +++ b/arch/arm/mach-sa1100/time.c @@ -73,7 +73,6 @@ static struct clock_event_device ckevt_sa1100_osmr0 = { .features = CLOCK_EVT_FEAT_ONESHOT, .shift = 32, .rating = 200, - .cpumask = CPU_MASK_CPU0, .set_next_event = sa1100_osmr0_set_next_event, .set_mode = sa1100_osmr0_set_mode, }; @@ -110,6 +109,7 @@ static void __init sa1100_timer_init(void) clockevent_delta2ns(0x7fffffff, &ckevt_sa1100_osmr0); ckevt_sa1100_osmr0.min_delta_ns = clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_sa1100_osmr0) + 1; + ckevt_sa1100_osmr0.cpumask = cpumask_of(0); cksrc_sa1100_oscr.mult = clocksource_hz2mult(CLOCK_TICK_RATE, cksrc_sa1100_oscr.shift); diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index 565e0ba0d67e..a3f1933434e2 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c @@ -965,7 +965,7 @@ static void __init versatile_timer_init(void) timer0_clockevent.min_delta_ns = clockevent_delta2ns(0xf, &timer0_clockevent); - timer0_clockevent.cpumask = cpumask_of_cpu(0); + timer0_clockevent.cpumask = cpumask_of(0); clockevents_register_device(&timer0_clockevent); } diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c index fd28f5194f71..758a1293bcfa 100644 --- a/arch/arm/plat-mxc/time.c +++ b/arch/arm/plat-mxc/time.c @@ -190,7 +190,7 @@ static int __init mxc_clockevent_init(void) clockevent_mxc.min_delta_ns = clockevent_delta2ns(0xff, &clockevent_mxc); - clockevent_mxc.cpumask = cpumask_of_cpu(0); + clockevent_mxc.cpumask = cpumask_of(0); clockevents_register_device(&clockevent_mxc); diff --git a/arch/arm/plat-orion/time.c b/arch/arm/plat-orion/time.c index 544d6b327f3a..6fa2923e6dca 100644 --- a/arch/arm/plat-orion/time.c +++ b/arch/arm/plat-orion/time.c @@ -149,7 +149,6 @@ static struct clock_event_device orion_clkevt = { .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, .shift = 32, .rating = 300, - .cpumask = CPU_MASK_CPU0, .set_next_event = orion_clkevt_next_event, .set_mode = orion_clkevt_mode, }; @@ -199,5 +198,6 @@ void __init orion_time_init(unsigned int irq, unsigned int tclk) orion_clkevt.mult = div_sc(tclk, NSEC_PER_SEC, orion_clkevt.shift); orion_clkevt.max_delta_ns = clockevent_delta2ns(0xfffffffe, &orion_clkevt); orion_clkevt.min_delta_ns = clockevent_delta2ns(1, &orion_clkevt); + orion_clkevt.cpumask = cpumask_of(0); clockevents_register_device(&orion_clkevt); } diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c index 283481d74a5b..0ff46bf873b0 100644 --- a/arch/avr32/kernel/time.c +++ b/arch/avr32/kernel/time.c @@ -106,7 +106,6 @@ static struct clock_event_device comparator = { .features = CLOCK_EVT_FEAT_ONESHOT, .shift = 16, .rating = 50, - .cpumask = CPU_MASK_CPU0, .set_next_event = comparator_next_event, .set_mode = comparator_mode, }; @@ -134,6 +133,7 @@ void __init time_init(void) comparator.mult = div_sc(counter_hz, NSEC_PER_SEC, comparator.shift); comparator.max_delta_ns = clockevent_delta2ns((u32)~0, &comparator); comparator.min_delta_ns = clockevent_delta2ns(50, &comparator) + 1; + comparator.cpumask = cpumask_of(0); sysreg_write(COMPARE, 0); timer_irqaction.dev_id = &comparator; diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c index e887efc86c29..0ed2badfd746 100644 --- a/arch/blackfin/kernel/time-ts.c +++ b/arch/blackfin/kernel/time-ts.c @@ -162,7 +162,6 @@ static struct clock_event_device clockevent_bfin = { .name = "bfin_core_timer", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .shift = 32, - .cpumask = CPU_MASK_CPU0, .set_next_event = bfin_timer_set_next_event, .set_mode = bfin_timer_set_mode, }; @@ -193,6 +192,7 @@ static int __init bfin_clockevent_init(void) clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift); clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin); clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin); + clockevent_bfin.cpumask = cpumask_of(0); clockevents_register_device(&clockevent_bfin); return 0; diff --git a/arch/m68knommu/platform/coldfire/pit.c b/arch/m68knommu/platform/coldfire/pit.c index c5b916700b22..2a12e7fa9748 100644 --- a/arch/m68knommu/platform/coldfire/pit.c +++ b/arch/m68knommu/platform/coldfire/pit.c @@ -156,7 +156,7 @@ void hw_timer_init(void) { u32 imr; - cf_pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); + cf_pit_clockevent.cpumask = cpumask_of(smp_processor_id()); cf_pit_clockevent.mult = div_sc(FREQ, NSEC_PER_SEC, 32); cf_pit_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFF, &cf_pit_clockevent); diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c index d7f8a782aae4..03965cb1b252 100644 --- a/arch/mips/jazz/irq.c +++ b/arch/mips/jazz/irq.c @@ -146,7 +146,7 @@ void __init plat_time_init(void) BUG_ON(HZ != 100); - cd->cpumask = cpumask_of_cpu(cpu); + cd->cpumask = cpumask_of(cpu); clockevents_register_device(cd); action->dev_id = cd; setup_irq(JAZZ_TIMER_IRQ, action); diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c index d7e21bc8cd21..b820661678b0 100644 --- a/arch/mips/kernel/cevt-bcm1480.c +++ b/arch/mips/kernel/cevt-bcm1480.c @@ -126,7 +126,7 @@ void __cpuinit sb1480_clockevent_init(void) cd->min_delta_ns = clockevent_delta2ns(2, cd); cd->rating = 200; cd->irq = irq; - cd->cpumask = cpumask_of_cpu(cpu); + cd->cpumask = cpumask_of(cpu); cd->set_next_event = sibyte_next_event; cd->set_mode = sibyte_set_mode; clockevents_register_device(cd); diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c index df4acb68bfb5..1ada45ea0700 100644 --- a/arch/mips/kernel/cevt-ds1287.c +++ b/arch/mips/kernel/cevt-ds1287.c @@ -88,7 +88,6 @@ static void ds1287_event_handler(struct clock_event_device *dev) static struct clock_event_device ds1287_clockevent = { .name = "ds1287", .features = CLOCK_EVT_FEAT_PERIODIC, - .cpumask = CPU_MASK_CPU0, .set_next_event = ds1287_set_next_event, .set_mode = ds1287_set_mode, .event_handler = ds1287_event_handler, @@ -122,6 +121,7 @@ int __init ds1287_clockevent_init(int irq) clockevent_set_clock(cd, 32768); cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); cd->min_delta_ns = clockevent_delta2ns(0x300, cd); + cd->cpumask = cpumask_of(0); clockevents_register_device(&ds1287_clockevent); diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c index 6e2f58520afb..e9b787feedcb 100644 --- a/arch/mips/kernel/cevt-gt641xx.c +++ b/arch/mips/kernel/cevt-gt641xx.c @@ -96,7 +96,6 @@ static void gt641xx_timer0_event_handler(struct clock_event_device *dev) static struct clock_event_device gt641xx_timer0_clockevent = { .name = "gt641xx-timer0", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .cpumask = CPU_MASK_CPU0, .irq = GT641XX_TIMER0_IRQ, .set_next_event = gt641xx_timer0_set_next_event, .set_mode = gt641xx_timer0_set_mode, @@ -132,6 +131,7 @@ static int __init gt641xx_timer0_clockevent_init(void) clockevent_set_clock(cd, gt641xx_base_clock); cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); cd->min_delta_ns = clockevent_delta2ns(0x300, cd); + cd->cpumask = cpumask_of(0); clockevents_register_device(>641xx_timer0_clockevent); diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 4a4c59f2737a..e1ec83b68031 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -195,7 +195,7 @@ int __cpuinit mips_clockevent_init(void) cd->rating = 300; cd->irq = irq; - cd->cpumask = cpumask_of_cpu(cpu); + cd->cpumask = cpumask_of(cpu); cd->set_next_event = mips_next_event; cd->set_mode = mips_set_clock_mode; cd->event_handler = mips_event_handler; diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c index 0f188cd46e03..a2eebaafda52 100644 --- a/arch/mips/kernel/cevt-sb1250.c +++ b/arch/mips/kernel/cevt-sb1250.c @@ -125,7 +125,7 @@ void __cpuinit sb1250_clockevent_init(void) cd->min_delta_ns = clockevent_delta2ns(2, cd); cd->rating = 200; cd->irq = irq; - cd->cpumask = cpumask_of_cpu(cpu); + cd->cpumask = cpumask_of(cpu); cd->set_next_event = sibyte_next_event; cd->set_mode = sibyte_set_mode; clockevents_register_device(cd); diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c index 5162fe4b5952..6d45e24db5bf 100644 --- a/arch/mips/kernel/cevt-smtc.c +++ b/arch/mips/kernel/cevt-smtc.c @@ -292,7 +292,7 @@ int __cpuinit mips_clockevent_init(void) cd->rating = 300; cd->irq = irq; - cd->cpumask = cpumask_of_cpu(cpu); + cd->cpumask = cpumask_of(cpu); cd->set_next_event = mips_next_event; cd->set_mode = mips_set_clock_mode; cd->event_handler = mips_event_handler; diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c index b5fc4eb412d2..eccf7d6096bd 100644 --- a/arch/mips/kernel/cevt-txx9.c +++ b/arch/mips/kernel/cevt-txx9.c @@ -112,7 +112,6 @@ static struct clock_event_device txx9tmr_clock_event_device = { .name = "TXx9", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .rating = 200, - .cpumask = CPU_MASK_CPU0, .set_mode = txx9tmr_set_mode, .set_next_event = txx9tmr_set_next_event, }; @@ -150,6 +149,7 @@ void __init txx9_clockevent_init(unsigned long baseaddr, int irq, clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd); cd->min_delta_ns = clockevent_delta2ns(0xf, cd); cd->irq = irq; + cd->cpumask = cpumask_of(0), clockevents_register_device(cd); setup_irq(irq, &txx9tmr_irq); printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n", diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c index b6ac55162b9a..f4d187825f96 100644 --- a/arch/mips/kernel/i8253.c +++ b/arch/mips/kernel/i8253.c @@ -115,7 +115,7 @@ void __init setup_pit_timer(void) * Start pit with the boot cpu mask and make it global after the * IO_APIC has been initialized. */ - cd->cpumask = cpumask_of_cpu(cpu); + cd->cpumask = cpumask_of(cpu); clockevent_set_clock(cd, CLOCK_TICK_RATE); cd->max_delta_ns = clockevent_delta2ns(0x7FFF, cd); cd->min_delta_ns = clockevent_delta2ns(0xF, cd); diff --git a/arch/mips/nxp/pnx8550/common/time.c b/arch/mips/nxp/pnx8550/common/time.c index 62f495b57f93..cf293b279098 100644 --- a/arch/mips/nxp/pnx8550/common/time.c +++ b/arch/mips/nxp/pnx8550/common/time.c @@ -102,6 +102,7 @@ __init void plat_time_init(void) unsigned int p; unsigned int pow2p; + pnx8xxx_clockevent.cpumask = cpu_none_mask; clockevents_register_device(&pnx8xxx_clockevent); clocksource_register(&pnx_clocksource); diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c index 1327c2746fb7..f024057a35f8 100644 --- a/arch/mips/sgi-ip27/ip27-timer.c +++ b/arch/mips/sgi-ip27/ip27-timer.c @@ -134,7 +134,7 @@ void __cpuinit hub_rt_clock_event_init(void) cd->min_delta_ns = clockevent_delta2ns(0x300, cd); cd->rating = 200; cd->irq = irq; - cd->cpumask = cpumask_of_cpu(cpu); + cd->cpumask = cpumask_of(cpu); cd->set_next_event = rt_next_event; cd->set_mode = rt_set_mode; clockevents_register_device(cd); diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c index 796e3ce28720..69f5f88711cc 100644 --- a/arch/mips/sni/time.c +++ b/arch/mips/sni/time.c @@ -80,7 +80,7 @@ static void __init sni_a20r_timer_setup(void) struct irqaction *action = &a20r_irqaction; unsigned int cpu = smp_processor_id(); - cd->cpumask = cpumask_of_cpu(cpu); + cd->cpumask = cpumask_of(cpu); clockevents_register_device(cd); action->dev_id = cd; setup_irq(SNI_A20R_IRQ_TIMER, &a20r_irqaction); diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index e2ee66b5831d..6f39d35d6f55 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -869,7 +869,7 @@ static void register_decrementer_clockevent(int cpu) struct clock_event_device *dec = &per_cpu(decrementers, cpu).event; *dec = decrementer_clockevent; - dec->cpumask = cpumask_of_cpu(cpu); + dec->cpumask = cpumask_of(cpu); printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n", dec->name, dec->mult, dec->shift, cpu); diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index eccefbbff887..f5bd141c8443 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -154,7 +154,7 @@ void init_cpu_timer(void) cd->min_delta_ns = 1; cd->max_delta_ns = LONG_MAX; cd->rating = 400; - cd->cpumask = cpumask_of_cpu(cpu); + cd->cpumask = cpumask_of(cpu); cd->set_next_event = s390_next_event; cd->set_mode = s390_set_mode; diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h index 85b660c17eb0..c24e9c6a1736 100644 --- a/arch/sh/include/asm/smp.h +++ b/arch/sh/include/asm/smp.h @@ -31,7 +31,7 @@ enum { }; void smp_message_recv(unsigned int msg); -void smp_timer_broadcast(cpumask_t mask); +void smp_timer_broadcast(const struct cpumask *mask); void local_timer_interrupt(void); void local_timer_setup(unsigned int cpu); diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 593937d0c495..8f4027412614 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -184,11 +184,11 @@ void arch_send_call_function_single_ipi(int cpu) plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); } -void smp_timer_broadcast(cpumask_t mask) +void smp_timer_broadcast(const struct cpumask *mask) { int cpu; - for_each_cpu_mask(cpu, mask) + for_each_cpu(cpu, mask) plat_send_ipi(cpu, SMP_MSG_TIMER); } diff --git a/arch/sh/kernel/timers/timer-broadcast.c b/arch/sh/kernel/timers/timer-broadcast.c index c2317635230f..96e8eaea1e62 100644 --- a/arch/sh/kernel/timers/timer-broadcast.c +++ b/arch/sh/kernel/timers/timer-broadcast.c @@ -51,7 +51,7 @@ void __cpuinit local_timer_setup(unsigned int cpu) clk->mult = 1; clk->set_mode = dummy_timer_set_mode; clk->broadcast = smp_timer_broadcast; - clk->cpumask = cpumask_of_cpu(cpu); + clk->cpumask = cpumask_of(cpu); clockevents_register_device(clk); } diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c index 3c61ddd4d43e..0db3f9510336 100644 --- a/arch/sh/kernel/timers/timer-tmu.c +++ b/arch/sh/kernel/timers/timer-tmu.c @@ -263,7 +263,7 @@ static int tmu_timer_init(void) tmu0_clockevent.min_delta_ns = clockevent_delta2ns(1, &tmu0_clockevent); - tmu0_clockevent.cpumask = cpumask_of_cpu(0); + tmu0_clockevent.cpumask = cpumask_of(0); clockevents_register_device(&tmu0_clockevent); diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index 141da3759091..9df8f095a8b1 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c @@ -763,7 +763,7 @@ void __devinit setup_sparc64_timer(void) sevt = &__get_cpu_var(sparc64_events); memcpy(sevt, &sparc64_clockevent, sizeof(*sevt)); - sevt->cpumask = cpumask_of_cpu(smp_processor_id()); + sevt->cpumask = cpumask_of(smp_processor_id()); clockevents_register_device(sevt); } diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index 47f04f4a3464..b13a87a3ec95 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c @@ -50,7 +50,7 @@ static int itimer_next_event(unsigned long delta, static struct clock_event_device itimer_clockevent = { .name = "itimer", .rating = 250, - .cpumask = CPU_MASK_ALL, + .cpumask = cpu_all_mask, .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_mode = itimer_set_mode, .set_next_event = itimer_next_event, diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 16f94879b525..b2cef49f3085 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -141,7 +141,7 @@ static int lapic_next_event(unsigned long delta, struct clock_event_device *evt); static void lapic_timer_setup(enum clock_event_mode mode, struct clock_event_device *evt); -static void lapic_timer_broadcast(cpumask_t mask); +static void lapic_timer_broadcast(const struct cpumask *mask); static void apic_pm_activate(void); /* @@ -453,10 +453,10 @@ static void lapic_timer_setup(enum clock_event_mode mode, /* * Local APIC timer broadcast function */ -static void lapic_timer_broadcast(cpumask_t mask) +static void lapic_timer_broadcast(const struct cpumask *mask) { #ifdef CONFIG_SMP - send_IPI_mask(mask, LOCAL_TIMER_VECTOR); + send_IPI_mask(*mask, LOCAL_TIMER_VECTOR); #endif } @@ -469,7 +469,7 @@ static void __cpuinit setup_APIC_timer(void) struct clock_event_device *levt = &__get_cpu_var(lapic_events); memcpy(levt, &lapic_clockevent, sizeof(*levt)); - levt->cpumask = cpumask_of_cpu(smp_processor_id()); + levt->cpumask = cpumask_of(smp_processor_id()); clockevents_register_device(levt); } diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 940f25851e1e..e76d7e272974 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -246,7 +246,7 @@ static void hpet_legacy_clockevent_register(void) * Start hpet with the boot cpu mask and make it * global after the IO_APIC has been initialized. */ - hpet_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); + hpet_clockevent.cpumask = cpumask_of(smp_processor_id()); clockevents_register_device(&hpet_clockevent); global_clock_event = &hpet_clockevent; printk(KERN_DEBUG "hpet clockevent registered\n"); @@ -500,7 +500,7 @@ static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) /* 5 usec minimum reprogramming delta. */ evt->min_delta_ns = 5000; - evt->cpumask = cpumask_of_cpu(hdev->cpu); + evt->cpumask = cpumask_of(hdev->cpu); clockevents_register_device(evt); } diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index c1b5e3ece1f2..10f92fb532f3 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c @@ -114,7 +114,7 @@ void __init setup_pit_timer(void) * Start pit with the boot cpu mask and make it global after the * IO_APIC has been initialized. */ - pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); + pit_clockevent.cpumask = cpumask_of(smp_processor_id()); pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, pit_clockevent.shift); pit_clockevent.max_delta_ns = diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c index 3b599518c322..c12314c9e86f 100644 --- a/arch/x86/kernel/mfgpt_32.c +++ b/arch/x86/kernel/mfgpt_32.c @@ -287,7 +287,7 @@ static struct clock_event_device mfgpt_clockevent = { .set_mode = mfgpt_set_mode, .set_next_event = mfgpt_next_event, .rating = 250, - .cpumask = CPU_MASK_ALL, + .cpumask = cpu_all_mask, .shift = 32 }; diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index 254ee07f8635..c4c1f9e09402 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c @@ -226,7 +226,7 @@ static void __devinit vmi_time_init_clockevent(void) /* Upper bound is clockevent's use of ulong for cycle deltas. */ evt->max_delta_ns = clockevent_delta2ns(ULONG_MAX, evt); evt->min_delta_ns = clockevent_delta2ns(1, evt); - evt->cpumask = cpumask_of_cpu(cpu); + evt->cpumask = cpumask_of(cpu); printk(KERN_WARNING "vmi: registering clock event %s. mult=%lu shift=%u\n", evt->name, evt->mult, evt->shift); diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index a5d8e1ace1cf..104c8220a383 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -737,7 +737,7 @@ static void lguest_time_init(void) /* We can't set cpumask in the initializer: damn C limitations! Set it * here and register our timer device. */ - lguest_clockevent.cpumask = cpumask_of_cpu(0); + lguest_clockevent.cpumask = cpumask_of(0); clockevents_register_device(&lguest_clockevent); /* Finally, we unblock the timer interrupt. */ diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index c9f7cda48ed7..65d75a6be0ba 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -437,7 +437,7 @@ void xen_setup_timer(int cpu) evt = &per_cpu(xen_clock_events, cpu); memcpy(evt, xen_clockevent, sizeof(*evt)); - evt->cpumask = cpumask_of_cpu(cpu); + evt->cpumask = cpumask_of(cpu); evt->irq = irq; setup_runstate_info(cpu); diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index f450588e5858..254f1064d973 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -154,7 +154,6 @@ static struct tc_clkevt_device clkevt = { .shift = 32, /* Should be lower than at91rm9200's system timer */ .rating = 125, - .cpumask = CPU_MASK_CPU0, .set_next_event = tc_next_event, .set_mode = tc_mode, }, @@ -195,6 +194,7 @@ static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) clkevt.clkevt.max_delta_ns = clockevent_delta2ns(0xffff, &clkevt.clkevt); clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1; + clkevt.clkevt.cpumask = cpumask_of(0); setup_irq(irq, &tc_irqaction); diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index ed3a5d473e52..cea153697ec7 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -82,13 +82,13 @@ struct clock_event_device { int shift; int rating; int irq; - cpumask_t cpumask; + const struct cpumask *cpumask; int (*set_next_event)(unsigned long evt, struct clock_event_device *); void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *); void (*event_handler)(struct clock_event_device *); - void (*broadcast)(cpumask_t mask); + void (*broadcast)(const struct cpumask *mask); struct list_head list; enum clock_event_mode mode; ktime_t next_event; diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index f8d968063cea..ea2f48af83cf 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -166,6 +166,8 @@ static void clockevents_notify_released(void) void clockevents_register_device(struct clock_event_device *dev) { BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); + BUG_ON(!dev->cpumask); + /* * A nsec2cyc multiplicator of 0 is invalid and we'd crash * on it, so fix it up and emit a warning: diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index f98a1b7b16e9..9590af2327be 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -150,7 +150,7 @@ static void tick_do_broadcast(cpumask_t mask) */ cpu = first_cpu(mask); td = &per_cpu(tick_cpu_device, cpu); - td->evtdev->broadcast(mask); + td->evtdev->broadcast(&mask); } } diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index ab65d217583f..f8372be74122 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -171,7 +171,7 @@ static void tick_setup_device(struct tick_device *td, * When the device is not per cpu, pin the interrupt to the * current cpu: */ - if (!cpumask_equal(&newdev->cpumask, cpumask)) + if (!cpumask_equal(newdev->cpumask, cpumask)) irq_set_affinity(newdev->irq, cpumask); /* @@ -202,14 +202,14 @@ static int tick_check_new_device(struct clock_event_device *newdev) spin_lock_irqsave(&tick_device_lock, flags); cpu = smp_processor_id(); - if (!cpu_isset(cpu, newdev->cpumask)) + if (!cpumask_test_cpu(cpu, newdev->cpumask)) goto out_bc; td = &per_cpu(tick_cpu_device, cpu); curdev = td->evtdev; /* cpu local device ? */ - if (!cpus_equal(newdev->cpumask, cpumask_of_cpu(cpu))) { + if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) { /* * If the cpu affinity of the device interrupt can not @@ -222,7 +222,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) * If we have a cpu local device already, do not replace it * by a non cpu local device */ - if (curdev && cpus_equal(curdev->cpumask, cpumask_of_cpu(cpu))) + if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu))) goto out_bc; } -- cgit v1.2.3 From c8cae544bba6aee0f5cb0756dbab1a71d2c68737 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 16 Dec 2008 09:13:11 -0800 Subject: x86: fix build error with post-merge of tip/cpus4096 and rr-for-ingo/master. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ingo Molnar wrote: > allyes64 build failure: > > arch/x86/kernel/io_apic.c: In function ‘set_ir_ioapic_affinity_irq_desc’: > arch/x86/kernel/io_apic.c:2295: error: incompatible type for argument 2 of > ‘migrate_ioapic_irq_desc’ > arch/x86/kernel/io_apic.c: In function ‘ir_set_msi_irq_affinity’: > arch/x86/kernel/io_apic.c:3205: error: incompatible type for argument 2 of > ‘set_extra_move_desc’ > make[1]: *** wait: No child processes. Stop. Here's a small patch to correct the build error with the post-merge tree. Built and boot-tested. I'll will reset the follow on patches in my brand new git tree to accommodate this change. Fix two references in io_apic.c that were incorrect. Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index d7f0993b8056..3d7d0d55253f 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -2292,7 +2292,7 @@ static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, return; } - migrate_ioapic_irq_desc(desc, mask); + migrate_ioapic_irq_desc(desc, *mask); } static void set_ir_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) @@ -3203,7 +3203,7 @@ static void ir_set_msi_irq_affinity(unsigned int irq, if (assign_irq_vector(irq, cfg, *mask)) return; - set_extra_move_desc(desc, mask); + set_extra_move_desc(desc, *mask); cpumask_and(&tmp, &cfg->domain, mask); dest = cpu_mask_to_apicid(tmp); -- cgit v1.2.3 From 36f5101a60de8f79c0d1ca06e50660bf5129e02c Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 16 Dec 2008 17:33:51 -0800 Subject: x86: enable MAXSMP Impact: activates new off-stack cpumask code on MAXSMP (non-default) x86 configs Set MAXSMP to enable CONFIG_CPUMASK_OFFSTACK which moves cpumask's off the stack (and in structs) when using cpumask_var_t. Signed-off-by: Mike Travis Signed-off-by: Rusty Russell Acked-by: Ingo Molnar --- arch/x86/Kconfig | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d99eeb7915c6..1fd44352f27c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -591,16 +591,17 @@ config IOMMU_HELPER config MAXSMP bool "Configure Maximum number of SMP Processors and NUMA Nodes" - depends on X86_64 && SMP && BROKEN + depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL + select CPUMASK_OFFSTACK default n help Configure maximum number of CPUS and NUMA Nodes for this architecture. If unsure, say N. config NR_CPUS - int "Maximum number of CPUs (2-512)" if !MAXSMP - range 2 512 depends on SMP + int "Maximum number of CPUs" if SMP && !MAXSMP + range 2 512 if SMP && !MAXSMP default "4096" if MAXSMP default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000 default "8" -- cgit v1.2.3 From e7986739a76cde5079da08809d8bbc6878387ae0 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 16 Dec 2008 17:33:52 -0800 Subject: x86 smp: modify send_IPI_mask interface to accept cpumask_t pointers Impact: cleanup, change parameter passing * Change genapic interfaces to accept cpumask_t pointers where possible. * Modify external callers to use cpumask_t pointers in function calls. * Create new send_IPI_mask_allbutself which is the same as the send_IPI_mask functions but removes smp_processor_id() from list. This removes another common need for a temporary cpumask_t variable. * Functions that used a temp cpumask_t variable for: cpumask_t allbutme = cpu_online_map; cpu_clear(smp_processor_id(), allbutme); if (!cpus_empty(allbutme)) ... become: if (!cpus_equal(cpu_online_map, cpumask_of_cpu(cpu))) ... * Other minor code optimizations (like using cpus_clear instead of CPU_MASK_NONE, etc.) Applies to linux-2.6.tip/master. Signed-off-by: Mike Travis Signed-off-by: Rusty Russell Acked-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 14 +-- arch/x86/include/asm/bigsmp/ipi.h | 9 +- arch/x86/include/asm/es7000/apic.h | 38 +++---- arch/x86/include/asm/es7000/ipi.h | 9 +- arch/x86/include/asm/genapic_32.h | 9 +- arch/x86/include/asm/genapic_64.h | 11 +- arch/x86/include/asm/ipi.h | 21 +++- arch/x86/include/asm/mach-default/mach_apic.h | 17 ++- arch/x86/include/asm/mach-default/mach_ipi.h | 18 ++-- arch/x86/include/asm/numaq/apic.h | 6 +- arch/x86/include/asm/numaq/ipi.h | 9 +- arch/x86/include/asm/smp.h | 6 +- arch/x86/include/asm/summit/apic.h | 12 +-- arch/x86/include/asm/summit/ipi.h | 9 +- arch/x86/kernel/apic.c | 6 +- arch/x86/kernel/crash.c | 5 +- arch/x86/kernel/genapic_flat_64.c | 76 +++++++++----- arch/x86/kernel/genx2apic_cluster.c | 60 +++++++---- arch/x86/kernel/genx2apic_phys.c | 55 +++++++--- arch/x86/kernel/genx2apic_uv_x.c | 43 +++++--- arch/x86/kernel/io_apic.c | 145 +++++++++++++------------- arch/x86/kernel/ipi.c | 26 +++-- arch/x86/kernel/smp.c | 8 +- arch/x86/kernel/tlb_32.c | 2 +- arch/x86/kernel/tlb_64.c | 2 +- arch/x86/mach-generic/bigsmp.c | 5 +- arch/x86/mach-generic/es7000.c | 5 +- arch/x86/mach-generic/numaq.c | 5 +- arch/x86/mach-generic/summit.c | 5 +- arch/x86/xen/smp.c | 17 ++- 30 files changed, 380 insertions(+), 273 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index ce547f24a1cd..dc6225ca48ad 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -9,12 +9,12 @@ static inline int apic_id_registered(void) return (1); } -static inline cpumask_t target_cpus(void) +static inline const cpumask_t *target_cpus(void) { #ifdef CONFIG_SMP - return cpu_online_map; + return &cpu_online_map; #else - return cpumask_of_cpu(0); + return &cpumask_of_cpu(0); #endif } @@ -79,7 +79,7 @@ static inline int apicid_to_node(int logical_apicid) static inline int cpu_present_to_apicid(int mps_cpu) { - if (mps_cpu < NR_CPUS) + if (mps_cpu < nr_cpu_ids) return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); return BAD_APICID; @@ -94,7 +94,7 @@ extern u8 cpu_2_logical_apicid[]; /* Mapping from cpu number to logical apicid */ static inline int cpu_to_logical_apicid(int cpu) { - if (cpu >= NR_CPUS) + if (cpu >= nr_cpu_ids) return BAD_APICID; return cpu_physical_id(cpu); } @@ -119,12 +119,12 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) } /* As we are using single CPU as destination, pick only one CPU here */ -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) +static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) { int cpu; int apicid; - cpu = first_cpu(cpumask); + cpu = first_cpu(*cpumask); apicid = cpu_to_logical_apicid(cpu); return apicid; } diff --git a/arch/x86/include/asm/bigsmp/ipi.h b/arch/x86/include/asm/bigsmp/ipi.h index 9404c535b7ec..63553e9f22b2 100644 --- a/arch/x86/include/asm/bigsmp/ipi.h +++ b/arch/x86/include/asm/bigsmp/ipi.h @@ -1,9 +1,10 @@ #ifndef __ASM_MACH_IPI_H #define __ASM_MACH_IPI_H -void send_IPI_mask_sequence(cpumask_t mask, int vector); +void send_IPI_mask_sequence(const cpumask_t *mask, int vector); +void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); -static inline void send_IPI_mask(cpumask_t mask, int vector) +static inline void send_IPI_mask(const cpumask_t *mask, int vector) { send_IPI_mask_sequence(mask, vector); } @@ -14,12 +15,12 @@ static inline void send_IPI_allbutself(int vector) cpu_clear(smp_processor_id(), mask); if (!cpus_empty(mask)) - send_IPI_mask(mask, vector); + send_IPI_mask(&mask, vector); } static inline void send_IPI_all(int vector) { - send_IPI_mask(cpu_online_map, vector); + send_IPI_mask(&cpu_online_map, vector); } #endif /* __ASM_MACH_IPI_H */ diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index e24ef876915f..4cac0837bb40 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -9,14 +9,14 @@ static inline int apic_id_registered(void) return (1); } -static inline cpumask_t target_cpus_cluster(void) +static inline const cpumask_t *target_cpus_cluster(void) { - return CPU_MASK_ALL; + return &CPU_MASK_ALL; } -static inline cpumask_t target_cpus(void) +static inline const cpumask_t *target_cpus(void) { - return cpumask_of_cpu(smp_processor_id()); + return &cpumask_of_cpu(smp_processor_id()); } #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) @@ -80,9 +80,10 @@ extern int apic_version [MAX_APICS]; static inline void setup_apic_routing(void) { int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); - printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", + printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", (apic_version[apic] == 0x14) ? - "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(target_cpus())[0]); + "Physical Cluster" : "Logical Cluster", + nr_ioapics, cpus_addr(*target_cpus())[0]); } static inline int multi_timer_check(int apic, int irq) @@ -100,7 +101,7 @@ static inline int cpu_present_to_apicid(int mps_cpu) { if (!mps_cpu) return boot_cpu_physical_apicid; - else if (mps_cpu < NR_CPUS) + else if (mps_cpu < nr_cpu_ids) return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); else return BAD_APICID; @@ -120,9 +121,9 @@ extern u8 cpu_2_logical_apicid[]; static inline int cpu_to_logical_apicid(int cpu) { #ifdef CONFIG_SMP - if (cpu >= NR_CPUS) - return BAD_APICID; - return (int)cpu_2_logical_apicid[cpu]; + if (cpu >= nr_cpu_ids) + return BAD_APICID; + return (int)cpu_2_logical_apicid[cpu]; #else return logical_smp_processor_id(); #endif @@ -146,14 +147,15 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid) return (1); } -static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) +static inline unsigned int +cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) { int num_bits_set; int cpus_found = 0; int cpu; int apicid; - num_bits_set = cpus_weight(cpumask); + num_bits_set = cpumask_weight(cpumask); /* Return id to all */ if (num_bits_set == NR_CPUS) return 0xFF; @@ -161,10 +163,10 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of TARGET_CPUS. */ - cpu = first_cpu(cpumask); + cpu = cpumask_first(cpumask); apicid = cpu_to_logical_apicid(cpu); while (cpus_found < num_bits_set) { - if (cpu_isset(cpu, cpumask)) { + if (cpumask_test_cpu(cpu, cpumask)) { int new_apicid = cpu_to_logical_apicid(cpu); if (apicid_cluster(apicid) != apicid_cluster(new_apicid)){ @@ -179,14 +181,14 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) return apicid; } -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) +static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) { int num_bits_set; int cpus_found = 0; int cpu; int apicid; - num_bits_set = cpus_weight(cpumask); + num_bits_set = cpus_weight(*cpumask); /* Return id to all */ if (num_bits_set == NR_CPUS) return cpu_to_logical_apicid(0); @@ -194,10 +196,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of TARGET_CPUS. */ - cpu = first_cpu(cpumask); + cpu = first_cpu(*cpumask); apicid = cpu_to_logical_apicid(cpu); while (cpus_found < num_bits_set) { - if (cpu_isset(cpu, cpumask)) { + if (cpu_isset(cpu, *cpumask)) { int new_apicid = cpu_to_logical_apicid(cpu); if (apicid_cluster(apicid) != apicid_cluster(new_apicid)){ diff --git a/arch/x86/include/asm/es7000/ipi.h b/arch/x86/include/asm/es7000/ipi.h index 632a955fcc0a..1a8507265f91 100644 --- a/arch/x86/include/asm/es7000/ipi.h +++ b/arch/x86/include/asm/es7000/ipi.h @@ -1,9 +1,10 @@ #ifndef __ASM_ES7000_IPI_H #define __ASM_ES7000_IPI_H -void send_IPI_mask_sequence(cpumask_t mask, int vector); +void send_IPI_mask_sequence(const cpumask_t *mask, int vector); +void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); -static inline void send_IPI_mask(cpumask_t mask, int vector) +static inline void send_IPI_mask(const cpumask_t *mask, int vector) { send_IPI_mask_sequence(mask, vector); } @@ -13,12 +14,12 @@ static inline void send_IPI_allbutself(int vector) cpumask_t mask = cpu_online_map; cpu_clear(smp_processor_id(), mask); if (!cpus_empty(mask)) - send_IPI_mask(mask, vector); + send_IPI_mask(&mask, vector); } static inline void send_IPI_all(int vector) { - send_IPI_mask(cpu_online_map, vector); + send_IPI_mask(&cpu_online_map, vector); } #endif /* __ASM_ES7000_IPI_H */ diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h index 0ac17d33a8c7..b21ed21c574d 100644 --- a/arch/x86/include/asm/genapic_32.h +++ b/arch/x86/include/asm/genapic_32.h @@ -24,7 +24,7 @@ struct genapic { int (*probe)(void); int (*apic_id_registered)(void); - cpumask_t (*target_cpus)(void); + const cpumask_t *(*target_cpus)(void); int int_delivery_mode; int int_dest_mode; int ESR_DISABLE; @@ -57,12 +57,13 @@ struct genapic { unsigned (*get_apic_id)(unsigned long x); unsigned long apic_id_mask; - unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); - cpumask_t (*vector_allocation_domain)(int cpu); + unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask); + void (*vector_allocation_domain)(int cpu, cpumask_t *retmask); #ifdef CONFIG_SMP /* ipi */ - void (*send_IPI_mask)(cpumask_t mask, int vector); + void (*send_IPI_mask)(const cpumask_t *mask, int vector); + void (*send_IPI_mask_allbutself)(const cpumask_t *mask, int vector); void (*send_IPI_allbutself)(int vector); void (*send_IPI_all)(int vector); #endif diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h index 2cae011668b7..a020e7d35a40 100644 --- a/arch/x86/include/asm/genapic_64.h +++ b/arch/x86/include/asm/genapic_64.h @@ -1,6 +1,8 @@ #ifndef _ASM_X86_GENAPIC_64_H #define _ASM_X86_GENAPIC_64_H +#include + /* * Copyright 2004 James Cleverdon, IBM. * Subject to the GNU Public License, v.2 @@ -18,16 +20,17 @@ struct genapic { u32 int_delivery_mode; u32 int_dest_mode; int (*apic_id_registered)(void); - cpumask_t (*target_cpus)(void); - cpumask_t (*vector_allocation_domain)(int cpu); + const cpumask_t *(*target_cpus)(void); + void (*vector_allocation_domain)(int cpu, cpumask_t *retmask); void (*init_apic_ldr)(void); /* ipi */ - void (*send_IPI_mask)(cpumask_t mask, int vector); + void (*send_IPI_mask)(const cpumask_t *mask, int vector); + void (*send_IPI_mask_allbutself)(const cpumask_t *mask, int vector); void (*send_IPI_allbutself)(int vector); void (*send_IPI_all)(int vector); void (*send_IPI_self)(int vector); /* */ - unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); + unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask); unsigned int (*phys_pkg_id)(int index_msb); unsigned int (*get_apic_id)(unsigned long x); unsigned long (*set_apic_id)(unsigned int id); diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h index f89dffb28aa9..24b6e613edfa 100644 --- a/arch/x86/include/asm/ipi.h +++ b/arch/x86/include/asm/ipi.h @@ -117,7 +117,7 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector, native_apic_mem_write(APIC_ICR, cfg); } -static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) +static inline void send_IPI_mask_sequence(const cpumask_t *mask, int vector) { unsigned long flags; unsigned long query_cpu; @@ -128,11 +128,28 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) * - mbligh */ local_irq_save(flags); - for_each_cpu_mask_nr(query_cpu, mask) { + for_each_cpu_mask_nr(query_cpu, *mask) { __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), vector, APIC_DEST_PHYSICAL); } local_irq_restore(flags); } +static inline void send_IPI_mask_allbutself(const cpumask_t *mask, int vector) +{ + unsigned long flags; + unsigned int query_cpu; + unsigned int this_cpu = smp_processor_id(); + + /* See Hack comment above */ + + local_irq_save(flags); + for_each_cpu_mask_nr(query_cpu, *mask) + if (query_cpu != this_cpu) + __send_IPI_dest_field( + per_cpu(x86_cpu_to_apicid, query_cpu), + vector, APIC_DEST_PHYSICAL); + local_irq_restore(flags); +} + #endif /* _ASM_X86_IPI_H */ diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 6cb3a467e067..c18896b0508c 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -8,12 +8,12 @@ #define APIC_DFR_VALUE (APIC_DFR_FLAT) -static inline cpumask_t target_cpus(void) +static inline const cpumask_t *target_cpus(void) { #ifdef CONFIG_SMP - return cpu_online_map; + return &cpu_online_map; #else - return cpumask_of_cpu(0); + return &cpumask_of_cpu(0); #endif } @@ -61,9 +61,9 @@ static inline int apic_id_registered(void) return physid_isset(read_apic_id(), phys_cpu_present_map); } -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) +static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) { - return cpus_addr(cpumask)[0]; + return cpus_addr(*cpumask)[0]; } static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) @@ -88,7 +88,7 @@ static inline int apicid_to_node(int logical_apicid) #endif } -static inline cpumask_t vector_allocation_domain(int cpu) +static inline void vector_allocation_domain(int cpu, cpumask_t *retmask) { /* Careful. Some cpus do not strictly honor the set of cpus * specified in the interrupt destination when using lowest @@ -98,8 +98,7 @@ static inline cpumask_t vector_allocation_domain(int cpu) * deliver interrupts to the wrong hyperthread when only one * hyperthread was specified in the interrupt desitination. */ - cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; - return domain; + *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } }; } #endif @@ -131,7 +130,7 @@ static inline int cpu_to_logical_apicid(int cpu) static inline int cpu_present_to_apicid(int mps_cpu) { - if (mps_cpu < NR_CPUS && cpu_present(mps_cpu)) + if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); else return BAD_APICID; diff --git a/arch/x86/include/asm/mach-default/mach_ipi.h b/arch/x86/include/asm/mach-default/mach_ipi.h index fabca01ebacf..9353ab854a10 100644 --- a/arch/x86/include/asm/mach-default/mach_ipi.h +++ b/arch/x86/include/asm/mach-default/mach_ipi.h @@ -4,7 +4,8 @@ /* Avoid include hell */ #define NMI_VECTOR 0x02 -void send_IPI_mask_bitmask(cpumask_t mask, int vector); +void send_IPI_mask_bitmask(const cpumask_t *mask, int vector); +void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); void __send_IPI_shortcut(unsigned int shortcut, int vector); extern int no_broadcast; @@ -12,28 +13,27 @@ extern int no_broadcast; #ifdef CONFIG_X86_64 #include #define send_IPI_mask (genapic->send_IPI_mask) +#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself) #else -static inline void send_IPI_mask(cpumask_t mask, int vector) +static inline void send_IPI_mask(const cpumask_t *mask, int vector) { send_IPI_mask_bitmask(mask, vector); } +void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); #endif static inline void __local_send_IPI_allbutself(int vector) { - if (no_broadcast || vector == NMI_VECTOR) { - cpumask_t mask = cpu_online_map; - - cpu_clear(smp_processor_id(), mask); - send_IPI_mask(mask, vector); - } else + if (no_broadcast || vector == NMI_VECTOR) + send_IPI_mask_allbutself(&cpu_online_map, vector); + else __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); } static inline void __local_send_IPI_all(int vector) { if (no_broadcast || vector == NMI_VECTOR) - send_IPI_mask(cpu_online_map, vector); + send_IPI_mask(&cpu_online_map, vector); else __send_IPI_shortcut(APIC_DEST_ALLINC, vector); } diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index 0bf2a06b7a4e..1df7ebe738e5 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -7,9 +7,9 @@ #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) -static inline cpumask_t target_cpus(void) +static inline const cpumask_t *target_cpus(void) { - return CPU_MASK_ALL; + return &CPU_MASK_ALL; } #define NO_BALANCE_IRQ (1) @@ -122,7 +122,7 @@ static inline void enable_apic_mode(void) * We use physical apicids here, not logical, so just return the default * physical broadcast to stop people from breaking us */ -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) +static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) { return (int) 0xF; } diff --git a/arch/x86/include/asm/numaq/ipi.h b/arch/x86/include/asm/numaq/ipi.h index 935588d286cf..c734d7acc430 100644 --- a/arch/x86/include/asm/numaq/ipi.h +++ b/arch/x86/include/asm/numaq/ipi.h @@ -1,9 +1,10 @@ #ifndef __ASM_NUMAQ_IPI_H #define __ASM_NUMAQ_IPI_H -void send_IPI_mask_sequence(cpumask_t, int vector); +void send_IPI_mask_sequence(const cpumask_t *mask, int vector); +void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); -static inline void send_IPI_mask(cpumask_t mask, int vector) +static inline void send_IPI_mask(const cpumask_t *mask, int vector) { send_IPI_mask_sequence(mask, vector); } @@ -14,12 +15,12 @@ static inline void send_IPI_allbutself(int vector) cpu_clear(smp_processor_id(), mask); if (!cpus_empty(mask)) - send_IPI_mask(mask, vector); + send_IPI_mask(&mask, vector); } static inline void send_IPI_all(int vector) { - send_IPI_mask(cpu_online_map, vector); + send_IPI_mask(&cpu_online_map, vector); } #endif /* __ASM_NUMAQ_IPI_H */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index d12811ce51d9..c4a9aa52df6e 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -60,7 +60,7 @@ struct smp_ops { void (*cpu_die)(unsigned int cpu); void (*play_dead)(void); - void (*send_call_func_ipi)(cpumask_t mask); + void (*send_call_func_ipi)(const cpumask_t *mask); void (*send_call_func_single_ipi)(int cpu); }; @@ -125,7 +125,7 @@ static inline void arch_send_call_function_single_ipi(int cpu) static inline void arch_send_call_function_ipi(cpumask_t mask) { - smp_ops.send_call_func_ipi(mask); + smp_ops.send_call_func_ipi(&mask); } void cpu_disable_common(void); @@ -138,7 +138,7 @@ void native_cpu_die(unsigned int cpu); void native_play_dead(void); void play_dead_common(void); -void native_send_call_func_ipi(cpumask_t mask); +void native_send_call_func_ipi(const cpumask_t *mask); void native_send_call_func_single_ipi(int cpu); extern void prefill_possible_map(void); diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 9b3070f1c2ac..437dc83725ca 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -14,13 +14,13 @@ #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) -static inline cpumask_t target_cpus(void) +static inline const cpumask_t *target_cpus(void) { /* CPU_MASK_ALL (0xff) has undefined behaviour with * dest_LowestPrio mode logical clustered apic interrupt routing * Just start on cpu 0. IRQ balancing will spread load */ - return cpumask_of_cpu(0); + return &cpumask_of_cpu(0); } #define INT_DELIVERY_MODE (dest_LowestPrio) @@ -137,14 +137,14 @@ static inline void enable_apic_mode(void) { } -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) +static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) { int num_bits_set; int cpus_found = 0; int cpu; int apicid; - num_bits_set = cpus_weight(cpumask); + num_bits_set = cpus_weight(*cpumask); /* Return id to all */ if (num_bits_set == NR_CPUS) return (int) 0xFF; @@ -152,10 +152,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of TARGET_CPUS. */ - cpu = first_cpu(cpumask); + cpu = first_cpu(*cpumask); apicid = cpu_to_logical_apicid(cpu); while (cpus_found < num_bits_set) { - if (cpu_isset(cpu, cpumask)) { + if (cpu_isset(cpu, *cpumask)) { int new_apicid = cpu_to_logical_apicid(cpu); if (apicid_cluster(apicid) != apicid_cluster(new_apicid)){ diff --git a/arch/x86/include/asm/summit/ipi.h b/arch/x86/include/asm/summit/ipi.h index 53bd1e7bd7b4..a8a2c24f50cc 100644 --- a/arch/x86/include/asm/summit/ipi.h +++ b/arch/x86/include/asm/summit/ipi.h @@ -1,9 +1,10 @@ #ifndef __ASM_SUMMIT_IPI_H #define __ASM_SUMMIT_IPI_H -void send_IPI_mask_sequence(cpumask_t mask, int vector); +void send_IPI_mask_sequence(const cpumask_t *mask, int vector); +void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); -static inline void send_IPI_mask(cpumask_t mask, int vector) +static inline void send_IPI_mask(const cpumask_t *mask, int vector) { send_IPI_mask_sequence(mask, vector); } @@ -14,12 +15,12 @@ static inline void send_IPI_allbutself(int vector) cpu_clear(smp_processor_id(), mask); if (!cpus_empty(mask)) - send_IPI_mask(mask, vector); + send_IPI_mask(&mask, vector); } static inline void send_IPI_all(int vector) { - send_IPI_mask(cpu_online_map, vector); + send_IPI_mask(&cpu_online_map, vector); } #endif /* __ASM_SUMMIT_IPI_H */ diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index b2cef49f3085..a375791c08ca 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -141,7 +141,7 @@ static int lapic_next_event(unsigned long delta, struct clock_event_device *evt); static void lapic_timer_setup(enum clock_event_mode mode, struct clock_event_device *evt); -static void lapic_timer_broadcast(const struct cpumask *mask); +static void lapic_timer_broadcast(const cpumask_t *mask); static void apic_pm_activate(void); /* @@ -453,10 +453,10 @@ static void lapic_timer_setup(enum clock_event_mode mode, /* * Local APIC timer broadcast function */ -static void lapic_timer_broadcast(const struct cpumask *mask) +static void lapic_timer_broadcast(const cpumask_t *mask) { #ifdef CONFIG_SMP - send_IPI_mask(*mask, LOCAL_TIMER_VECTOR); + send_IPI_mask(mask, LOCAL_TIMER_VECTOR); #endif } diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 268553817909..81e01f7b1d12 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -77,10 +77,7 @@ static int crash_nmi_callback(struct notifier_block *self, static void smp_send_nmi_allbutself(void) { - cpumask_t mask = cpu_online_map; - cpu_clear(safe_smp_processor_id(), mask); - if (!cpus_empty(mask)) - send_IPI_mask(mask, NMI_VECTOR); + send_IPI_allbutself(NMI_VECTOR); } static struct notifier_block crash_nmi_nb = { diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index c0262791bda4..50eebd0328fe 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -30,12 +30,12 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) return 1; } -static cpumask_t flat_target_cpus(void) +static const cpumask_t *flat_target_cpus(void) { - return cpu_online_map; + return &cpu_online_map; } -static cpumask_t flat_vector_allocation_domain(int cpu) +static void flat_vector_allocation_domain(int cpu, cpumask_t *retmask) { /* Careful. Some cpus do not strictly honor the set of cpus * specified in the interrupt destination when using lowest @@ -45,8 +45,7 @@ static cpumask_t flat_vector_allocation_domain(int cpu) * deliver interrupts to the wrong hyperthread when only one * hyperthread was specified in the interrupt desitination. */ - cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; - return domain; + *retmask = (cpumask_t) { {[0] = APIC_ALL_CPUS, } }; } /* @@ -69,9 +68,8 @@ static void flat_init_apic_ldr(void) apic_write(APIC_LDR, val); } -static void flat_send_IPI_mask(cpumask_t cpumask, int vector) +static inline void _flat_send_IPI_mask(unsigned long mask, int vector) { - unsigned long mask = cpus_addr(cpumask)[0]; unsigned long flags; local_irq_save(flags); @@ -79,20 +77,40 @@ static void flat_send_IPI_mask(cpumask_t cpumask, int vector) local_irq_restore(flags); } +static void flat_send_IPI_mask(const cpumask_t *cpumask, int vector) +{ + unsigned long mask = cpus_addr(*cpumask)[0]; + + _flat_send_IPI_mask(mask, vector); +} + +static void flat_send_IPI_mask_allbutself(const cpumask_t *cpumask, int vector) +{ + unsigned long mask = cpus_addr(*cpumask)[0]; + int cpu = smp_processor_id(); + + if (cpu < BITS_PER_LONG) + clear_bit(cpu, &mask); + _flat_send_IPI_mask(mask, vector); +} + static void flat_send_IPI_allbutself(int vector) { + int cpu = smp_processor_id(); #ifdef CONFIG_HOTPLUG_CPU int hotplug = 1; #else int hotplug = 0; #endif if (hotplug || vector == NMI_VECTOR) { - cpumask_t allbutme = cpu_online_map; + if (!cpus_equal(cpu_online_map, cpumask_of_cpu(cpu))) { + unsigned long mask = cpus_addr(cpu_online_map)[0]; - cpu_clear(smp_processor_id(), allbutme); + if (cpu < BITS_PER_LONG) + clear_bit(cpu, &mask); - if (!cpus_empty(allbutme)) - flat_send_IPI_mask(allbutme, vector); + _flat_send_IPI_mask(mask, vector); + } } else if (num_online_cpus() > 1) { __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); } @@ -101,7 +119,7 @@ static void flat_send_IPI_allbutself(int vector) static void flat_send_IPI_all(int vector) { if (vector == NMI_VECTOR) - flat_send_IPI_mask(cpu_online_map, vector); + flat_send_IPI_mask(&cpu_online_map, vector); else __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); } @@ -135,9 +153,9 @@ static int flat_apic_id_registered(void) return physid_isset(read_xapic_id(), phys_cpu_present_map); } -static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) +static unsigned int flat_cpu_mask_to_apicid(const cpumask_t *cpumask) { - return cpus_addr(cpumask)[0] & APIC_ALL_CPUS; + return cpus_addr(*cpumask)[0] & APIC_ALL_CPUS; } static unsigned int phys_pkg_id(int index_msb) @@ -157,6 +175,7 @@ struct genapic apic_flat = { .send_IPI_all = flat_send_IPI_all, .send_IPI_allbutself = flat_send_IPI_allbutself, .send_IPI_mask = flat_send_IPI_mask, + .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, .send_IPI_self = apic_send_IPI_self, .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, .phys_pkg_id = phys_pkg_id, @@ -188,35 +207,39 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) return 0; } -static cpumask_t physflat_target_cpus(void) +static const cpumask_t *physflat_target_cpus(void) { - return cpu_online_map; + return &cpu_online_map; } -static cpumask_t physflat_vector_allocation_domain(int cpu) +static void physflat_vector_allocation_domain(int cpu, cpumask_t *retmask) { - return cpumask_of_cpu(cpu); + cpus_clear(*retmask); + cpu_set(cpu, *retmask); } -static void physflat_send_IPI_mask(cpumask_t cpumask, int vector) +static void physflat_send_IPI_mask(const cpumask_t *cpumask, int vector) { send_IPI_mask_sequence(cpumask, vector); } -static void physflat_send_IPI_allbutself(int vector) +static void physflat_send_IPI_mask_allbutself(const cpumask_t *cpumask, + int vector) { - cpumask_t allbutme = cpu_online_map; + send_IPI_mask_allbutself(cpumask, vector); +} - cpu_clear(smp_processor_id(), allbutme); - physflat_send_IPI_mask(allbutme, vector); +static void physflat_send_IPI_allbutself(int vector) +{ + send_IPI_mask_allbutself(&cpu_online_map, vector); } static void physflat_send_IPI_all(int vector) { - physflat_send_IPI_mask(cpu_online_map, vector); + physflat_send_IPI_mask(&cpu_online_map, vector); } -static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) +static unsigned int physflat_cpu_mask_to_apicid(const cpumask_t *cpumask) { int cpu; @@ -224,7 +247,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - cpu = first_cpu(cpumask); + cpu = first_cpu(*cpumask); if ((unsigned)cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_apicid, cpu); else @@ -243,6 +266,7 @@ struct genapic apic_physflat = { .send_IPI_all = physflat_send_IPI_all, .send_IPI_allbutself = physflat_send_IPI_allbutself, .send_IPI_mask = physflat_send_IPI_mask, + .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, .send_IPI_self = apic_send_IPI_self, .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, .phys_pkg_id = phys_pkg_id, diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index f6a2c8eb48a6..f5fa9a91ad38 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -22,19 +22,18 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ -static cpumask_t x2apic_target_cpus(void) +static const cpumask_t *x2apic_target_cpus(void) { - return cpumask_of_cpu(0); + return &cpumask_of_cpu(0); } /* * for now each logical cpu is in its own vector allocation domain. */ -static cpumask_t x2apic_vector_allocation_domain(int cpu) +static void x2apic_vector_allocation_domain(int cpu, cpumask_t *retmask) { - cpumask_t domain = CPU_MASK_NONE; - cpu_set(cpu, domain); - return domain; + cpus_clear(*retmask); + cpu_set(cpu, *retmask); } static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, @@ -56,32 +55,52 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, * at once. We have 16 cpu's in a cluster. This will minimize IPI register * writes. */ -static void x2apic_send_IPI_mask(cpumask_t mask, int vector) +static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector) { unsigned long flags; unsigned long query_cpu; local_irq_save(flags); - for_each_cpu_mask(query_cpu, mask) { - __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_logical_apicid, query_cpu), - vector, APIC_DEST_LOGICAL); - } + for_each_cpu_mask_nr(query_cpu, *mask) + __x2apic_send_IPI_dest( + per_cpu(x86_cpu_to_logical_apicid, query_cpu), + vector, APIC_DEST_LOGICAL); local_irq_restore(flags); } -static void x2apic_send_IPI_allbutself(int vector) +static void x2apic_send_IPI_mask_allbutself(const cpumask_t *mask, int vector) { - cpumask_t mask = cpu_online_map; + unsigned long flags; + unsigned long query_cpu; + unsigned long this_cpu = smp_processor_id(); - cpu_clear(smp_processor_id(), mask); + local_irq_save(flags); + for_each_cpu_mask_nr(query_cpu, *mask) + if (query_cpu != this_cpu) + __x2apic_send_IPI_dest( + per_cpu(x86_cpu_to_logical_apicid, query_cpu), + vector, APIC_DEST_LOGICAL); + local_irq_restore(flags); +} - if (!cpus_empty(mask)) - x2apic_send_IPI_mask(mask, vector); +static void x2apic_send_IPI_allbutself(int vector) +{ + unsigned long flags; + unsigned long query_cpu; + unsigned long this_cpu = smp_processor_id(); + + local_irq_save(flags); + for_each_online_cpu(query_cpu) + if (query_cpu != this_cpu) + __x2apic_send_IPI_dest( + per_cpu(x86_cpu_to_logical_apicid, query_cpu), + vector, APIC_DEST_LOGICAL); + local_irq_restore(flags); } static void x2apic_send_IPI_all(int vector) { - x2apic_send_IPI_mask(cpu_online_map, vector); + x2apic_send_IPI_mask(&cpu_online_map, vector); } static int x2apic_apic_id_registered(void) @@ -89,7 +108,7 @@ static int x2apic_apic_id_registered(void) return 1; } -static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) +static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) { int cpu; @@ -97,8 +116,8 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - cpu = first_cpu(cpumask); - if ((unsigned)cpu < NR_CPUS) + cpu = first_cpu(*cpumask); + if ((unsigned)cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_logical_apicid, cpu); else return BAD_APICID; @@ -150,6 +169,7 @@ struct genapic apic_x2apic_cluster = { .send_IPI_all = x2apic_send_IPI_all, .send_IPI_allbutself = x2apic_send_IPI_allbutself, .send_IPI_mask = x2apic_send_IPI_mask, + .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, .send_IPI_self = x2apic_send_IPI_self, .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, .phys_pkg_id = phys_pkg_id, diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index d042211768b7..41c27b2f3d01 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -29,16 +29,15 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ -static cpumask_t x2apic_target_cpus(void) +static const cpumask_t *x2apic_target_cpus(void) { - return cpumask_of_cpu(0); + return &cpumask_of_cpu(0); } -static cpumask_t x2apic_vector_allocation_domain(int cpu) +static void x2apic_vector_allocation_domain(int cpu, cpumask_t *retmask) { - cpumask_t domain = CPU_MASK_NONE; - cpu_set(cpu, domain); - return domain; + cpus_clear(*retmask); + cpu_set(cpu, *retmask); } static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, @@ -54,32 +53,53 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, x2apic_icr_write(cfg, apicid); } -static void x2apic_send_IPI_mask(cpumask_t mask, int vector) +static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector) { unsigned long flags; unsigned long query_cpu; local_irq_save(flags); - for_each_cpu_mask(query_cpu, mask) { + for_each_cpu_mask_nr(query_cpu, *mask) { __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), vector, APIC_DEST_PHYSICAL); } local_irq_restore(flags); } -static void x2apic_send_IPI_allbutself(int vector) +static void x2apic_send_IPI_mask_allbutself(const cpumask_t *mask, int vector) { - cpumask_t mask = cpu_online_map; + unsigned long flags; + unsigned long query_cpu; + unsigned long this_cpu = smp_processor_id(); + + local_irq_save(flags); + for_each_cpu_mask_nr(query_cpu, *mask) { + if (query_cpu != this_cpu) + __x2apic_send_IPI_dest( + per_cpu(x86_cpu_to_apicid, query_cpu), + vector, APIC_DEST_PHYSICAL); + } + local_irq_restore(flags); +} - cpu_clear(smp_processor_id(), mask); +static void x2apic_send_IPI_allbutself(int vector) +{ + unsigned long flags; + unsigned long query_cpu; + unsigned long this_cpu = smp_processor_id(); - if (!cpus_empty(mask)) - x2apic_send_IPI_mask(mask, vector); + local_irq_save(flags); + for_each_online_cpu(query_cpu) + if (query_cpu != this_cpu) + __x2apic_send_IPI_dest( + per_cpu(x86_cpu_to_apicid, query_cpu), + vector, APIC_DEST_PHYSICAL); + local_irq_restore(flags); } static void x2apic_send_IPI_all(int vector) { - x2apic_send_IPI_mask(cpu_online_map, vector); + x2apic_send_IPI_mask(&cpu_online_map, vector); } static int x2apic_apic_id_registered(void) @@ -87,7 +107,7 @@ static int x2apic_apic_id_registered(void) return 1; } -static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) +static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) { int cpu; @@ -95,8 +115,8 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - cpu = first_cpu(cpumask); - if ((unsigned)cpu < NR_CPUS) + cpu = first_cpu(*cpumask); + if ((unsigned)cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_apicid, cpu); else return BAD_APICID; @@ -145,6 +165,7 @@ struct genapic apic_x2apic_phys = { .send_IPI_all = x2apic_send_IPI_all, .send_IPI_allbutself = x2apic_send_IPI_allbutself, .send_IPI_mask = x2apic_send_IPI_mask, + .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, .send_IPI_self = x2apic_send_IPI_self, .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, .phys_pkg_id = phys_pkg_id, diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 2c7dbdb98278..010659415ae4 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -75,16 +75,15 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second); /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ -static cpumask_t uv_target_cpus(void) +static const cpumask_t *uv_target_cpus(void) { - return cpumask_of_cpu(0); + return &cpumask_of_cpu(0); } -static cpumask_t uv_vector_allocation_domain(int cpu) +static void uv_vector_allocation_domain(int cpu, cpumask_t *retmask) { - cpumask_t domain = CPU_MASK_NONE; - cpu_set(cpu, domain); - return domain; + cpus_clear(*retmask); + cpu_set(cpu, *retmask); } int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) @@ -123,28 +122,37 @@ static void uv_send_IPI_one(int cpu, int vector) uv_write_global_mmr64(pnode, UVH_IPI_INT, val); } -static void uv_send_IPI_mask(cpumask_t mask, int vector) +static void uv_send_IPI_mask(const cpumask_t *mask, int vector) { unsigned int cpu; - for_each_possible_cpu(cpu) - if (cpu_isset(cpu, mask)) + for_each_cpu_mask_nr(cpu, *mask) + uv_send_IPI_one(cpu, vector); +} + +static void uv_send_IPI_mask_allbutself(const cpumask_t *mask, int vector) +{ + unsigned int cpu; + unsigned int this_cpu = smp_processor_id(); + + for_each_cpu_mask_nr(cpu, *mask) + if (cpu != this_cpu) uv_send_IPI_one(cpu, vector); } static void uv_send_IPI_allbutself(int vector) { - cpumask_t mask = cpu_online_map; - - cpu_clear(smp_processor_id(), mask); + unsigned int cpu; + unsigned int this_cpu = smp_processor_id(); - if (!cpus_empty(mask)) - uv_send_IPI_mask(mask, vector); + for_each_online_cpu(cpu) + if (cpu != this_cpu) + uv_send_IPI_one(cpu, vector); } static void uv_send_IPI_all(int vector) { - uv_send_IPI_mask(cpu_online_map, vector); + uv_send_IPI_mask(&cpu_online_map, vector); } static int uv_apic_id_registered(void) @@ -156,7 +164,7 @@ static void uv_init_apic_ldr(void) { } -static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) +static unsigned int uv_cpu_mask_to_apicid(const cpumask_t *cpumask) { int cpu; @@ -164,7 +172,7 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - cpu = first_cpu(cpumask); + cpu = first_cpu(*cpumask); if ((unsigned)cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_apicid, cpu); else @@ -218,6 +226,7 @@ struct genapic apic_x2apic_uv_x = { .send_IPI_all = uv_send_IPI_all, .send_IPI_allbutself = uv_send_IPI_allbutself, .send_IPI_mask = uv_send_IPI_mask, + .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself, .send_IPI_self = uv_send_IPI_self, .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, .phys_pkg_id = phys_pkg_id, diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 3d7d0d55253f..7f23ce7f5518 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -231,7 +231,8 @@ static struct irq_cfg *irq_cfg(unsigned int irq) #endif -static inline void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask) +static inline void +set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) { } @@ -396,7 +397,8 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq } } -static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask); +static int +assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) @@ -412,13 +414,13 @@ static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, irq = desc->irq; cfg = desc->chip_data; - if (assign_irq_vector(irq, cfg, *mask)) + if (assign_irq_vector(irq, cfg, mask)) return; - set_extra_move_desc(desc, *mask); + set_extra_move_desc(desc, mask); cpumask_and(&tmp, &cfg->domain, mask); - dest = cpu_mask_to_apicid(tmp); + dest = cpu_mask_to_apicid(&tmp); /* * Only the high 8 bits are valid. */ @@ -1099,7 +1101,8 @@ void unlock_vector_lock(void) spin_unlock(&vector_lock); } -static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) +static int +__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) { /* * NOTE! The local APIC isn't very good at handling @@ -1115,35 +1118,32 @@ static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; unsigned int old_vector; int cpu; + cpumask_t tmp_mask; if ((cfg->move_in_progress) || cfg->move_cleanup_count) return -EBUSY; - /* Only try and allocate irqs on cpus that are present */ - cpus_and(mask, mask, cpu_online_map); - old_vector = cfg->vector; if (old_vector) { - cpumask_t tmp; - cpus_and(tmp, cfg->domain, mask); - if (!cpus_empty(tmp)) + cpus_and(tmp_mask, *mask, cpu_online_map); + cpus_and(tmp_mask, cfg->domain, tmp_mask); + if (!cpus_empty(tmp_mask)) return 0; } - for_each_cpu_mask_nr(cpu, mask) { - cpumask_t domain, new_mask; + /* Only try and allocate irqs on cpus that are present */ + for_each_cpu_and(cpu, mask, &cpu_online_map) { int new_cpu; int vector, offset; - domain = vector_allocation_domain(cpu); - cpus_and(new_mask, domain, cpu_online_map); + vector_allocation_domain(cpu, &tmp_mask); vector = current_vector; offset = current_offset; next: vector += 8; if (vector >= first_system_vector) { - /* If we run out of vectors on large boxen, must share them. */ + /* If out of vectors on large boxen, must share them. */ offset = (offset + 1) % 8; vector = FIRST_DEVICE_VECTOR + offset; } @@ -1156,7 +1156,7 @@ next: if (vector == SYSCALL_VECTOR) goto next; #endif - for_each_cpu_mask_nr(new_cpu, new_mask) + for_each_cpu_and(new_cpu, &tmp_mask, &cpu_online_map) if (per_cpu(vector_irq, new_cpu)[vector] != -1) goto next; /* Found one! */ @@ -1166,16 +1166,17 @@ next: cfg->move_in_progress = 1; cfg->old_domain = cfg->domain; } - for_each_cpu_mask_nr(new_cpu, new_mask) + for_each_cpu_and(new_cpu, &tmp_mask, &cpu_online_map) per_cpu(vector_irq, new_cpu)[vector] = irq; cfg->vector = vector; - cfg->domain = domain; + cfg->domain = tmp_mask; return 0; } return -ENOSPC; } -static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) +static int +assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) { int err; unsigned long flags; @@ -1384,8 +1385,8 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de cfg = desc->chip_data; - mask = TARGET_CPUS; - if (assign_irq_vector(irq, cfg, mask)) + mask = *TARGET_CPUS; + if (assign_irq_vector(irq, cfg, &mask)) return; cpus_and(mask, cfg->domain, mask); @@ -1398,7 +1399,7 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, - cpu_mask_to_apicid(mask), trigger, polarity, + cpu_mask_to_apicid(&mask), trigger, polarity, cfg->vector)) { printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", mp_ioapics[apic].mp_apicid, pin); @@ -2121,7 +2122,7 @@ static int ioapic_retrigger_irq(unsigned int irq) unsigned long flags; spin_lock_irqsave(&vector_lock, flags); - send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector); + send_IPI_mask(&cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector); spin_unlock_irqrestore(&vector_lock, flags); return 1; @@ -2170,18 +2171,19 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration); * as simple as edge triggered migration and we can do the irq migration * with a simple atomic update to IO-APIC RTE. */ -static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) +static void +migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) { struct irq_cfg *cfg; - cpumask_t tmp, cleanup_mask; + cpumask_t tmpmask; struct irte irte; int modify_ioapic_rte; unsigned int dest; unsigned long flags; unsigned int irq; - cpus_and(tmp, mask, cpu_online_map); - if (cpus_empty(tmp)) + cpus_and(tmpmask, *mask, cpu_online_map); + if (cpus_empty(tmpmask)) return; irq = desc->irq; @@ -2194,8 +2196,8 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) set_extra_move_desc(desc, mask); - cpus_and(tmp, cfg->domain, mask); - dest = cpu_mask_to_apicid(tmp); + cpus_and(tmpmask, cfg->domain, *mask); + dest = cpu_mask_to_apicid(&tmpmask); modify_ioapic_rte = desc->status & IRQ_LEVEL; if (modify_ioapic_rte) { @@ -2213,13 +2215,13 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) modify_irte(irq, &irte); if (cfg->move_in_progress) { - cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); - cfg->move_cleanup_count = cpus_weight(cleanup_mask); - send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); + cpus_and(tmpmask, cfg->old_domain, cpu_online_map); + cfg->move_cleanup_count = cpus_weight(tmpmask); + send_IPI_mask(&tmpmask, IRQ_MOVE_CLEANUP_VECTOR); cfg->move_in_progress = 0; } - desc->affinity = mask; + desc->affinity = *mask; } static int migrate_irq_remapped_level_desc(struct irq_desc *desc) @@ -2241,7 +2243,7 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc) } /* everthing is clear. we have right of way */ - migrate_ioapic_irq_desc(desc, desc->pending_mask); + migrate_ioapic_irq_desc(desc, &desc->pending_mask); ret = 0; desc->status &= ~IRQ_MOVE_PENDING; @@ -2292,7 +2294,7 @@ static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, return; } - migrate_ioapic_irq_desc(desc, *mask); + migrate_ioapic_irq_desc(desc, mask); } static void set_ir_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) @@ -2359,7 +2361,7 @@ static void irq_complete_move(struct irq_desc **descp) cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); cfg->move_cleanup_count = cpus_weight(cleanup_mask); - send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); + send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); cfg->move_in_progress = 0; } } @@ -3089,13 +3091,13 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms cpumask_t tmp; cfg = irq_cfg(irq); - tmp = TARGET_CPUS; - err = assign_irq_vector(irq, cfg, tmp); + tmp = *TARGET_CPUS; + err = assign_irq_vector(irq, cfg, &tmp); if (err) return err; cpus_and(tmp, cfg->domain, tmp); - dest = cpu_mask_to_apicid(tmp); + dest = cpu_mask_to_apicid(&tmp); #ifdef CONFIG_INTR_REMAP if (irq_remapped(irq)) { @@ -3161,13 +3163,13 @@ static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) return; cfg = desc->chip_data; - if (assign_irq_vector(irq, cfg, *mask)) + if (assign_irq_vector(irq, cfg, mask)) return; - set_extra_move_desc(desc, *mask); + set_extra_move_desc(desc, mask); cpumask_and(&tmp, &cfg->domain, mask); - dest = cpu_mask_to_apicid(tmp); + dest = cpu_mask_to_apicid(&tmp); read_msi_msg_desc(desc, &msg); @@ -3184,8 +3186,8 @@ static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) * Migrate the MSI irq to another cpumask. This migration is * done in the process context using interrupt-remapping hardware. */ -static void ir_set_msi_irq_affinity(unsigned int irq, - const struct cpumask *mask) +static void +ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) { struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg; @@ -3200,13 +3202,13 @@ static void ir_set_msi_irq_affinity(unsigned int irq, return; cfg = desc->chip_data; - if (assign_irq_vector(irq, cfg, *mask)) + if (assign_irq_vector(irq, cfg, mask)) return; - set_extra_move_desc(desc, *mask); + set_extra_move_desc(desc, mask); cpumask_and(&tmp, &cfg->domain, mask); - dest = cpu_mask_to_apicid(tmp); + dest = cpu_mask_to_apicid(&tmp); irte.vector = cfg->vector; irte.dest_id = IRTE_DEST(dest); @@ -3224,7 +3226,7 @@ static void ir_set_msi_irq_affinity(unsigned int irq, if (cfg->move_in_progress) { cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); cfg->move_cleanup_count = cpus_weight(cleanup_mask); - send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); + send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); cfg->move_in_progress = 0; } @@ -3419,7 +3421,7 @@ void arch_teardown_msi_irq(unsigned int irq) #ifdef CONFIG_DMAR #ifdef CONFIG_SMP -static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) +static void dmar_msi_set_affinity(unsigned int irq, const cpumask_t *mask) { struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg; @@ -3431,13 +3433,13 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) return; cfg = desc->chip_data; - if (assign_irq_vector(irq, cfg, *mask)) + if (assign_irq_vector(irq, cfg, mask)) return; - set_extra_move_desc(desc, *mask); + set_extra_move_desc(desc, mask); cpumask_and(&tmp, &cfg->domain, mask); - dest = cpu_mask_to_apicid(tmp); + dest = cpu_mask_to_apicid(&tmp); dmar_msi_read(irq, &msg); @@ -3481,7 +3483,7 @@ int arch_setup_dmar_msi(unsigned int irq) #ifdef CONFIG_HPET_TIMER #ifdef CONFIG_SMP -static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) +static void hpet_msi_set_affinity(unsigned int irq, const cpumask_t *mask) { struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg; @@ -3493,13 +3495,13 @@ static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) return; cfg = desc->chip_data; - if (assign_irq_vector(irq, cfg, *mask)) + if (assign_irq_vector(irq, cfg, mask)) return; - set_extra_move_desc(desc, *mask); + set_extra_move_desc(desc, mask); cpumask_and(&tmp, &cfg->domain, mask); - dest = cpu_mask_to_apicid(tmp); + dest = cpu_mask_to_apicid(&tmp); hpet_msi_read(irq, &msg); @@ -3564,7 +3566,7 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) write_ht_irq_msg(irq, &msg); } -static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) +static void set_ht_irq_affinity(unsigned int irq, const cpumask_t *mask) { struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg; @@ -3575,13 +3577,13 @@ static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) return; cfg = desc->chip_data; - if (assign_irq_vector(irq, cfg, *mask)) + if (assign_irq_vector(irq, cfg, mask)) return; - set_extra_move_desc(desc, *mask); + set_extra_move_desc(desc, mask); cpumask_and(&tmp, &cfg->domain, mask); - dest = cpu_mask_to_apicid(tmp); + dest = cpu_mask_to_apicid(&tmp); target_ht_irq(irq, dest, cfg->vector); cpumask_copy(&desc->affinity, mask); @@ -3607,14 +3609,13 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) cpumask_t tmp; cfg = irq_cfg(irq); - tmp = TARGET_CPUS; - err = assign_irq_vector(irq, cfg, tmp); + err = assign_irq_vector(irq, cfg, TARGET_CPUS); if (!err) { struct ht_irq_msg msg; unsigned dest; cpus_and(tmp, cfg->domain, tmp); - dest = cpu_mask_to_apicid(tmp); + dest = cpu_mask_to_apicid(&tmp); msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); @@ -3650,7 +3651,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, unsigned long mmr_offset) { - const cpumask_t *eligible_cpu = get_cpu_mask(cpu); + const cpumask_t *eligible_cpu = &cpumask_of_cpu(cpu); struct irq_cfg *cfg; int mmr_pnode; unsigned long mmr_value; @@ -3660,7 +3661,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, cfg = irq_cfg(irq); - err = assign_irq_vector(irq, cfg, *eligible_cpu); + err = assign_irq_vector(irq, cfg, eligible_cpu); if (err != 0) return err; @@ -3679,7 +3680,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, entry->polarity = 0; entry->trigger = 0; entry->mask = 0; - entry->dest = cpu_mask_to_apicid(*eligible_cpu); + entry->dest = cpu_mask_to_apicid(eligible_cpu); mmr_pnode = uv_blade_to_pnode(mmr_blade); uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); @@ -3890,7 +3891,7 @@ void __init setup_ioapic_dest(void) int pin, ioapic, irq, irq_entry; struct irq_desc *desc; struct irq_cfg *cfg; - cpumask_t mask; + const cpumask_t *mask; if (skip_ioapic_setup == 1) return; @@ -3921,16 +3922,16 @@ void __init setup_ioapic_dest(void) */ if (desc->status & (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) - mask = desc->affinity; + mask = &desc->affinity; else mask = TARGET_CPUS; #ifdef CONFIG_INTR_REMAP if (intr_remapping_enabled) - set_ir_ioapic_affinity_irq_desc(desc, &mask); + set_ir_ioapic_affinity_irq_desc(desc, mask); else #endif - set_ioapic_affinity_irq_desc(desc, &mask); + set_ioapic_affinity_irq_desc(desc, mask); } } diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c index f1c688e46f35..86aa50fc65a1 100644 --- a/arch/x86/kernel/ipi.c +++ b/arch/x86/kernel/ipi.c @@ -116,9 +116,9 @@ static inline void __send_IPI_dest_field(unsigned long mask, int vector) /* * This is only used on smaller machines. */ -void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) +void send_IPI_mask_bitmask(const cpumask_t *cpumask, int vector) { - unsigned long mask = cpus_addr(cpumask)[0]; + unsigned long mask = cpus_addr(*cpumask)[0]; unsigned long flags; local_irq_save(flags); @@ -127,7 +127,7 @@ void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) local_irq_restore(flags); } -void send_IPI_mask_sequence(cpumask_t mask, int vector) +void send_IPI_mask_sequence(const cpumask_t *mask, int vector) { unsigned long flags; unsigned int query_cpu; @@ -139,12 +139,24 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector) */ local_irq_save(flags); - for_each_possible_cpu(query_cpu) { - if (cpu_isset(query_cpu, mask)) { + for_each_cpu_mask_nr(query_cpu, *mask) + __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector); + local_irq_restore(flags); +} + +void send_IPI_mask_allbutself(const cpumask_t *mask, int vector) +{ + unsigned long flags; + unsigned int query_cpu; + unsigned int this_cpu = smp_processor_id(); + + /* See Hack comment above */ + + local_irq_save(flags); + for_each_cpu_mask_nr(query_cpu, *mask) + if (query_cpu != this_cpu) __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector); - } - } local_irq_restore(flags); } diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 3f92b134ab90..341df946f9a9 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -118,22 +118,22 @@ static void native_smp_send_reschedule(int cpu) WARN_ON(1); return; } - send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); + send_IPI_mask(&cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); } void native_send_call_func_single_ipi(int cpu) { - send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); + send_IPI_mask(&cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); } -void native_send_call_func_ipi(cpumask_t mask) +void native_send_call_func_ipi(const cpumask_t *mask) { cpumask_t allbutself; allbutself = cpu_online_map; cpu_clear(smp_processor_id(), allbutself); - if (cpus_equal(mask, allbutself) && + if (cpus_equal(*mask, allbutself) && cpus_equal(cpu_online_map, cpu_callout_map)) send_IPI_allbutself(CALL_FUNCTION_VECTOR); else diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index f4049f3513b6..174ea90d1cbd 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c @@ -164,7 +164,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, * We have to send the IPI only to * CPUs affected. */ - send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR); + send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR); while (!cpus_empty(flush_cpumask)) /* nothing. lockup detection does not belong here */ diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index 8f919ca69494..de6f1bda0c50 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c @@ -191,7 +191,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, * We have to send the IPI only to * CPUs affected. */ - send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender); + send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender); while (!cpus_empty(f->flush_cpumask)) cpu_relax(); diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 3624a364b7f3..bc4c7840b2a8 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c @@ -42,9 +42,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = { { } }; -static cpumask_t vector_allocation_domain(int cpu) +static void vector_allocation_domain(int cpu, cpumask_t *retmask) { - return cpumask_of_cpu(cpu); + cpus_clear(*retmask); + cpu_set(cpu, *retmask); } static int probe_bigsmp(void) diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 7b4e6d0d1690..4ba5ccaa1584 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -87,7 +87,7 @@ static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) } #endif -static cpumask_t vector_allocation_domain(int cpu) +static void vector_allocation_domain(int cpu, cpumask_t *retmask) { /* Careful. Some cpus do not strictly honor the set of cpus * specified in the interrupt destination when using lowest @@ -97,8 +97,7 @@ static cpumask_t vector_allocation_domain(int cpu) * deliver interrupts to the wrong hyperthread when only one * hyperthread was specified in the interrupt desitination. */ - cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; - return domain; + *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; } struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000); diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 71a309b122e6..511d7941364f 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -38,7 +38,7 @@ static int acpi_madt_oem_check(char *oem_id, char *oem_table_id) return 0; } -static cpumask_t vector_allocation_domain(int cpu) +static void vector_allocation_domain(int cpu, cpumask_t *retmask) { /* Careful. Some cpus do not strictly honor the set of cpus * specified in the interrupt destination when using lowest @@ -48,8 +48,7 @@ static cpumask_t vector_allocation_domain(int cpu) * deliver interrupts to the wrong hyperthread when only one * hyperthread was specified in the interrupt desitination. */ - cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; - return domain; + *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; } struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq); diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 2c6d234e0009..2821ffc188b5 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c @@ -24,7 +24,7 @@ static int probe_summit(void) return 0; } -static cpumask_t vector_allocation_domain(int cpu) +static void vector_allocation_domain(int cpu, cpumask_t *retmask) { /* Careful. Some cpus do not strictly honor the set of cpus * specified in the interrupt destination when using lowest @@ -34,8 +34,7 @@ static cpumask_t vector_allocation_domain(int cpu) * deliver interrupts to the wrong hyperthread when only one * hyperthread was specified in the interrupt desitination. */ - cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; - return domain; + *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; } struct genapic apic_summit = APIC_INIT("summit", probe_summit); diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index acd9b6705e02..2cce362c9874 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -158,7 +158,7 @@ static void __init xen_fill_possible_map(void) { int i, rc; - for (i = 0; i < NR_CPUS; i++) { + for (i = 0; i < nr_cpu_ids; i++) { rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); if (rc >= 0) { num_processors++; @@ -196,7 +196,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) /* Restrict the possible_map according to max_cpus. */ while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { - for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--) + for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) continue; cpu_clear(cpu, cpu_possible_map); } @@ -408,24 +408,22 @@ static void xen_smp_send_reschedule(int cpu) xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); } -static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) +static void xen_send_IPI_mask(const cpumask_t *mask, enum ipi_vector vector) { unsigned cpu; - cpus_and(mask, mask, cpu_online_map); - - for_each_cpu_mask_nr(cpu, mask) + for_each_cpu_and(cpu, mask, &cpu_online_map) xen_send_IPI_one(cpu, vector); } -static void xen_smp_send_call_function_ipi(cpumask_t mask) +static void xen_smp_send_call_function_ipi(const cpumask_t *mask) { int cpu; xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); /* Make sure other vcpus get a chance to run if they need to. */ - for_each_cpu_mask_nr(cpu, mask) { + for_each_cpu_mask_nr(cpu, *mask) { if (xen_vcpu_stolen(cpu)) { HYPERVISOR_sched_op(SCHEDOP_yield, 0); break; @@ -435,7 +433,8 @@ static void xen_smp_send_call_function_ipi(cpumask_t mask) static void xen_smp_send_call_function_single_ipi(int cpu) { - xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); + xen_send_IPI_mask(&cpumask_of_cpu(cpu), + XEN_CALL_FUNCTION_SINGLE_VECTOR); } static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) -- cgit v1.2.3 From a1681965011916c2f1f0f1f87e70784f5d5d5be5 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 16 Dec 2008 17:33:53 -0800 Subject: x86: move and enhance debug printk for nr_cpu_ids etc. Impact: cleanup, better debugging This has proven useful in debugging, *before* we try to use for_each_possible_cpu(). It also now shows nr_cpumask_bits. Signed-off-by: Mike Travis Signed-off-by: Rusty Russell --- arch/x86/kernel/setup_percpu.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 1c2084291f97..0b63b08e7530 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -152,6 +152,11 @@ void __init setup_per_cpu_areas(void) old_size = PERCPU_ENOUGH_ROOM; align = max_t(unsigned long, PAGE_SIZE, align); size = roundup(old_size, align); + + printk(KERN_INFO + "NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", + NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); + printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", size); @@ -168,24 +173,24 @@ void __init setup_per_cpu_areas(void) "cpu %d has no node %d or node-local memory\n", cpu, node); if (ptr) - printk(KERN_DEBUG "per cpu data for cpu%d at %016lx\n", + printk(KERN_DEBUG + "per cpu data for cpu%d at %016lx\n", cpu, __pa(ptr)); } else { ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, __pa(MAX_DMA_ADDRESS)); if (ptr) - printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n", - cpu, node, __pa(ptr)); + printk(KERN_DEBUG + "per cpu data for cpu%d on node%d " + "at %016lx\n", + cpu, node, __pa(ptr)); } #endif per_cpu_offset(cpu) = ptr - __per_cpu_start; memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); } - printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n", - NR_CPUS, nr_cpu_ids, nr_node_ids); - /* Setup percpu data maps */ setup_per_cpu_maps(); -- cgit v1.2.3 From 95d313cf1c1ecedc8bec5727b09bdacbf67dfc45 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 16 Dec 2008 17:33:54 -0800 Subject: x86: Add cpu_mask_to_apicid_and Impact: new API Add a helper function that takes two cpumask's, and's them and then returns the apicid of the result. This removes a need in io_apic.c that uses a temporary cpumask to hold (mask & cfg->domain). Signed-off-by: Mike Travis Signed-off-by: Rusty Russell --- arch/x86/include/asm/bigsmp/apic.h | 16 +++++++++ arch/x86/include/asm/es7000/apic.h | 47 +++++++++++++++++++++++++++ arch/x86/include/asm/genapic_32.h | 3 ++ arch/x86/include/asm/genapic_64.h | 2 ++ arch/x86/include/asm/mach-default/mach_apic.h | 10 ++++++ arch/x86/include/asm/mach-generic/mach_apic.h | 1 + arch/x86/include/asm/numaq/apic.h | 6 ++++ arch/x86/include/asm/summit/apic.h | 39 ++++++++++++++++++++++ arch/x86/kernel/genapic_flat_64.c | 26 +++++++++++++++ arch/x86/kernel/genx2apic_cluster.c | 16 +++++++++ arch/x86/kernel/genx2apic_phys.c | 16 +++++++++ arch/x86/kernel/genx2apic_uv_x.c | 16 +++++++++ 12 files changed, 198 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index dc6225ca48ad..99f9abacf6a2 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -129,6 +129,22 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) return apicid; } +static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, + const cpumask_t *andmask) +{ + int cpu; + + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) + if (cpu_isset(cpu, *andmask)) + return cpu_to_logical_apicid(cpu); + + return BAD_APICID; +} + static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index 4cac0837bb40..c2bed772af88 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -214,6 +214,53 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) return apicid; } +static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, + const cpumask_t *andmask) +{ + int num_bits_set; + int num_bits_set2; + int cpus_found = 0; + int cpu; + int apicid = 0; + + num_bits_set = cpus_weight(*cpumask); + num_bits_set2 = cpus_weight(*andmask); + num_bits_set = min_t(int, num_bits_set, num_bits_set2); + /* Return id to all */ + if (num_bits_set >= nr_cpu_ids) +#if defined CONFIG_ES7000_CLUSTERED_APIC + return 0xFF; +#else + return cpu_to_logical_apicid(0); +#endif + /* + * The cpus in the mask must all be on the apic cluster. If are not + * on the same apicid cluster return default value of TARGET_CPUS. + */ + while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) + if (cpu_isset(cpu, *andmask) + apicid = cpu_to_logical_apicid(cpu); + while (cpus_found < num_bits_set) { + if (cpu_isset(cpu, *cpumask) && cpu_isset(cpu, *andmask)) { + int new_apicid = cpu_to_logical_apicid(cpu); + if (apicid_cluster(apicid) != + apicid_cluster(new_apicid)) { + printk(KERN_WARNING + "%s: Not a valid mask!\n", __func__); +#if defined CONFIG_ES7000_CLUSTERED_APIC + return 0xFF; +#else + return cpu_to_logical_apicid(0); +#endif + } + apicid = new_apicid; + cpus_found++; + } + cpu++; + } + return apicid; +} + static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h index b21ed21c574d..325298a82237 100644 --- a/arch/x86/include/asm/genapic_32.h +++ b/arch/x86/include/asm/genapic_32.h @@ -58,6 +58,8 @@ struct genapic { unsigned (*get_apic_id)(unsigned long x); unsigned long apic_id_mask; unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask); + unsigned int (*cpu_mask_to_apicid_and)(const cpumask_t *cpumask, + const cpumask_t *andmask); void (*vector_allocation_domain)(int cpu, cpumask_t *retmask); #ifdef CONFIG_SMP @@ -115,6 +117,7 @@ struct genapic { APICFUNC(get_apic_id) \ .apic_id_mask = APIC_ID_MASK, \ APICFUNC(cpu_mask_to_apicid) \ + APICFUNC(cpu_mask_to_apicid_and) \ APICFUNC(vector_allocation_domain) \ APICFUNC(acpi_madt_oem_check) \ IPIFUNC(send_IPI_mask) \ diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h index a020e7d35a40..301c7f41125d 100644 --- a/arch/x86/include/asm/genapic_64.h +++ b/arch/x86/include/asm/genapic_64.h @@ -31,6 +31,8 @@ struct genapic { void (*send_IPI_self)(int vector); /* */ unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask); + unsigned int (*cpu_mask_to_apicid_and)(const cpumask_t *cpumask, + const cpumask_t *andmask); unsigned int (*phys_pkg_id)(int index_msb); unsigned int (*get_apic_id)(unsigned long x); unsigned long (*set_apic_id)(unsigned int id); diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index c18896b0508c..229b605d104e 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -28,6 +28,7 @@ static inline const cpumask_t *target_cpus(void) #define apic_id_registered (genapic->apic_id_registered) #define init_apic_ldr (genapic->init_apic_ldr) #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) +#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and) #define phys_pkg_id (genapic->phys_pkg_id) #define vector_allocation_domain (genapic->vector_allocation_domain) #define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID))) @@ -66,6 +67,15 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) return cpus_addr(*cpumask)[0]; } +static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask, + const cpumask_t *andmask) +{ + unsigned long mask1 = cpus_addr(*cpumask)[0]; + unsigned long mask2 = cpus_addr(*andmask)[0]; + + return (unsigned int)(mask1 & mask2); +} + static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index e430f47df667..48553e958ad5 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h @@ -24,6 +24,7 @@ #define check_phys_apicid_present (genapic->check_phys_apicid_present) #define check_apicid_used (genapic->check_apicid_used) #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) +#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and) #define vector_allocation_domain (genapic->vector_allocation_domain) #define enable_apic_mode (genapic->enable_apic_mode) #define phys_pkg_id (genapic->phys_pkg_id) diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index 1df7ebe738e5..abf668ced503 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -127,6 +127,12 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) return (int) 0xF; } +static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, + const cpumask_t *andmask) +{ + return (int) 0xF; +} + /* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) { diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 437dc83725ca..cbcc2c7eb1d5 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -170,6 +170,45 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) return apicid; } +static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, + const cpumask_t *andmask) +{ + int num_bits_set; + int num_bits_set2; + int cpus_found = 0; + int cpu; + int apicid = 0; + + num_bits_set = cpus_weight(*cpumask); + num_bits_set2 = cpus_weight(*andmask); + num_bits_set = min_t(int, num_bits_set, num_bits_set2); + /* Return id to all */ + if (num_bits_set >= nr_cpu_ids) + return 0xFF; + /* + * The cpus in the mask must all be on the apic cluster. If are not + * on the same apicid cluster return default value of TARGET_CPUS. + */ + while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) + if (cpu_isset(cpu, *andmask) + apicid = cpu_to_logical_apicid(cpu); + while (cpus_found < num_bits_set) { + if (cpu_isset(cpu, *cpumask) && cpu_isset(cpu, *andmask)) { + int new_apicid = cpu_to_logical_apicid(cpu); + if (apicid_cluster(apicid) != + apicid_cluster(new_apicid)) { + printk(KERN_WARNING + "%s: Not a valid mask!\n", __func__); + return 0xFF; + } + apicid = apicid | new_apicid; + cpus_found++; + } + cpu++; + } + return apicid; +} + /* cpuid returns the value latched in the HW at reset, not the APIC ID * register's value. For any box whose BIOS changes APIC IDs, like * clustered APIC systems, we must use hard_smp_processor_id. diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 50eebd0328fe..1efecd206a74 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -158,6 +158,15 @@ static unsigned int flat_cpu_mask_to_apicid(const cpumask_t *cpumask) return cpus_addr(*cpumask)[0] & APIC_ALL_CPUS; } +static unsigned int flat_cpu_mask_to_apicid_and(const cpumask_t *cpumask, + const cpumask_t *andmask) +{ + unsigned long mask1 = cpus_addr(*cpumask)[0] & APIC_ALL_CPUS; + unsigned long mask2 = cpus_addr(*andmask)[0] & APIC_ALL_CPUS; + + return (int)(mask1 & mask2); +} + static unsigned int phys_pkg_id(int index_msb) { return hard_smp_processor_id() >> index_msb; @@ -178,6 +187,7 @@ struct genapic apic_flat = { .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, .send_IPI_self = apic_send_IPI_self, .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and, .phys_pkg_id = phys_pkg_id, .get_apic_id = get_apic_id, .set_apic_id = set_apic_id, @@ -254,6 +264,21 @@ static unsigned int physflat_cpu_mask_to_apicid(const cpumask_t *cpumask) return BAD_APICID; } +static unsigned int physflat_cpu_mask_to_apicid_and(const cpumask_t *cpumask, + const cpumask_t *andmask) +{ + int cpu; + + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) + if (cpu_isset(cpu, *andmask)) + return per_cpu(x86_cpu_to_apicid, cpu); + return BAD_APICID; +} + struct genapic apic_physflat = { .name = "physical flat", .acpi_madt_oem_check = physflat_acpi_madt_oem_check, @@ -269,6 +294,7 @@ struct genapic apic_physflat = { .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, .send_IPI_self = apic_send_IPI_self, .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and, .phys_pkg_id = phys_pkg_id, .get_apic_id = get_apic_id, .set_apic_id = set_apic_id, diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index f5fa9a91ad38..fd8047f4e455 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -123,6 +123,21 @@ static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) return BAD_APICID; } +static unsigned int x2apic_cpu_mask_to_apicid_and(const cpumask_t *cpumask, + const cpumask_t *andmask) +{ + int cpu; + + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) + if (cpu_isset(cpu, *andmask)) + return per_cpu(x86_cpu_to_apicid, cpu); + return BAD_APICID; +} + static unsigned int get_apic_id(unsigned long x) { unsigned int id; @@ -172,6 +187,7 @@ struct genapic apic_x2apic_cluster = { .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, .send_IPI_self = x2apic_send_IPI_self, .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, .phys_pkg_id = phys_pkg_id, .get_apic_id = get_apic_id, .set_apic_id = set_apic_id, diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index 41c27b2f3d01..d5578bb8f165 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -122,6 +122,21 @@ static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) return BAD_APICID; } +static unsigned int x2apic_cpu_mask_to_apicid_and(const cpumask_t *cpumask, + const cpumask_t *andmask) +{ + int cpu; + + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) + if (cpu_isset(cpu, *andmask)) + return per_cpu(x86_cpu_to_apicid, cpu); + return BAD_APICID; +} + static unsigned int get_apic_id(unsigned long x) { unsigned int id; @@ -168,6 +183,7 @@ struct genapic apic_x2apic_phys = { .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, .send_IPI_self = x2apic_send_IPI_self, .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, .phys_pkg_id = phys_pkg_id, .get_apic_id = get_apic_id, .set_apic_id = set_apic_id, diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 010659415ae4..53bd2570272c 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -179,6 +179,21 @@ static unsigned int uv_cpu_mask_to_apicid(const cpumask_t *cpumask) return BAD_APICID; } +static unsigned int uv_cpu_mask_to_apicid_and(const cpumask_t *cpumask, + const cpumask_t *andmask) +{ + int cpu; + + /* + * We're using fixed IRQ delivery, can only return one phys APIC ID. + * May as well be the first. + */ + while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) + if (cpu_isset(cpu, *andmask)) + return per_cpu(x86_cpu_to_apicid, cpu); + return BAD_APICID; +} + static unsigned int get_apic_id(unsigned long x) { unsigned int id; @@ -229,6 +244,7 @@ struct genapic apic_x2apic_uv_x = { .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself, .send_IPI_self = uv_send_IPI_self, .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, + .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and, .phys_pkg_id = phys_pkg_id, .get_apic_id = get_apic_id, .set_apic_id = set_apic_id, -- cgit v1.2.3 From 6eeb7c5a99434596c5953a95baa17d2f085664e3 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 16 Dec 2008 17:33:55 -0800 Subject: x86: update add-cpu_mask_to_apicid_and to use struct cpumask* Impact: use updated APIs Various API updates for x86:add-cpu_mask_to_apicid_and (Note: separate because previous patch has been "backported" to 2.6.27.) Signed-off-by: Rusty Russell Signed-off-by: Mike Travis --- arch/x86/include/asm/bigsmp/apic.h | 10 +++++----- arch/x86/include/asm/es7000/apic.h | 19 ++++++++++--------- arch/x86/include/asm/genapic_32.h | 4 ++-- arch/x86/include/asm/genapic_64.h | 4 ++-- arch/x86/include/asm/mach-default/mach_apic.h | 8 ++++---- arch/x86/include/asm/numaq/apic.h | 4 ++-- arch/x86/include/asm/summit/apic.h | 18 +++++++++--------- arch/x86/kernel/genapic_flat_64.c | 21 +++++++++++---------- arch/x86/kernel/genx2apic_cluster.c | 10 +++++----- arch/x86/kernel/genx2apic_phys.c | 10 +++++----- arch/x86/kernel/genx2apic_uv_x.c | 10 +++++----- 11 files changed, 60 insertions(+), 58 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index 99f9abacf6a2..976399debb3f 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -129,8 +129,8 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) return apicid; } -static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, - const cpumask_t *andmask) +static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) { int cpu; @@ -138,9 +138,9 @@ static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) - if (cpu_isset(cpu, *andmask)) - return cpu_to_logical_apicid(cpu); + cpu = cpumask_any_and(cpumask, andmask); + if (cpu < nr_cpu_ids) + return cpu_to_logical_apicid(cpu); return BAD_APICID; } diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index c2bed772af88..ba8423c5363f 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -214,8 +214,8 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) return apicid; } -static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, - const cpumask_t *andmask) +static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) { int num_bits_set; int num_bits_set2; @@ -223,9 +223,9 @@ static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, int cpu; int apicid = 0; - num_bits_set = cpus_weight(*cpumask); - num_bits_set2 = cpus_weight(*andmask); - num_bits_set = min_t(int, num_bits_set, num_bits_set2); + num_bits_set = cpumask_weight(cpumask); + num_bits_set2 = cpumask_weight(andmask); + num_bits_set = min(num_bits_set, num_bits_set2); /* Return id to all */ if (num_bits_set >= nr_cpu_ids) #if defined CONFIG_ES7000_CLUSTERED_APIC @@ -237,11 +237,12 @@ static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of TARGET_CPUS. */ - while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) - if (cpu_isset(cpu, *andmask) - apicid = cpu_to_logical_apicid(cpu); + cpu = cpumask_first_and(cpumask, andmask); + apicid = cpu_to_logical_apicid(cpu); + while (cpus_found < num_bits_set) { - if (cpu_isset(cpu, *cpumask) && cpu_isset(cpu, *andmask)) { + if (cpumask_test_cpu(cpu, cpumask) && + cpumask_test_cpu(cpu, andmask)) { int new_apicid = cpu_to_logical_apicid(cpu); if (apicid_cluster(apicid) != apicid_cluster(new_apicid)) { diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h index 325298a82237..eed6e305291f 100644 --- a/arch/x86/include/asm/genapic_32.h +++ b/arch/x86/include/asm/genapic_32.h @@ -58,8 +58,8 @@ struct genapic { unsigned (*get_apic_id)(unsigned long x); unsigned long apic_id_mask; unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask); - unsigned int (*cpu_mask_to_apicid_and)(const cpumask_t *cpumask, - const cpumask_t *andmask); + unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, + const struct cpumask *andmask); void (*vector_allocation_domain)(int cpu, cpumask_t *retmask); #ifdef CONFIG_SMP diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h index 301c7f41125d..244b71729ecb 100644 --- a/arch/x86/include/asm/genapic_64.h +++ b/arch/x86/include/asm/genapic_64.h @@ -31,8 +31,8 @@ struct genapic { void (*send_IPI_self)(int vector); /* */ unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask); - unsigned int (*cpu_mask_to_apicid_and)(const cpumask_t *cpumask, - const cpumask_t *andmask); + unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, + const struct cpumask *andmask); unsigned int (*phys_pkg_id)(int index_msb); unsigned int (*get_apic_id)(unsigned long x); unsigned long (*set_apic_id)(unsigned int id); diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 229b605d104e..df8e024c43c5 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -67,11 +67,11 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) return cpus_addr(*cpumask)[0]; } -static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask, - const cpumask_t *andmask) +static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) { - unsigned long mask1 = cpus_addr(*cpumask)[0]; - unsigned long mask2 = cpus_addr(*andmask)[0]; + unsigned long mask1 = cpumask_bits(cpumask)[0]; + unsigned long mask2 = cpumask_bits(andmask)[0]; return (unsigned int)(mask1 & mask2); } diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index abf668ced503..c80f00d29965 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -127,8 +127,8 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) return (int) 0xF; } -static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, - const cpumask_t *andmask) +static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) { return (int) 0xF; } diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index cbcc2c7eb1d5..651a93849341 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -170,8 +170,8 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) return apicid; } -static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, - const cpumask_t *andmask) +static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) { int num_bits_set; int num_bits_set2; @@ -179,9 +179,9 @@ static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, int cpu; int apicid = 0; - num_bits_set = cpus_weight(*cpumask); - num_bits_set2 = cpus_weight(*andmask); - num_bits_set = min_t(int, num_bits_set, num_bits_set2); + num_bits_set = cpumask_weight(cpumask); + num_bits_set2 = cpumask_weight(andmask); + num_bits_set = min(num_bits_set, num_bits_set2); /* Return id to all */ if (num_bits_set >= nr_cpu_ids) return 0xFF; @@ -189,11 +189,11 @@ static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask, * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of TARGET_CPUS. */ - while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) - if (cpu_isset(cpu, *andmask) - apicid = cpu_to_logical_apicid(cpu); + cpu = cpumask_first_and(cpumask, andmask); + apicid = cpu_to_logical_apicid(cpu); while (cpus_found < num_bits_set) { - if (cpu_isset(cpu, *cpumask) && cpu_isset(cpu, *andmask)) { + if (cpumask_test_cpu(cpu, cpumask) + && cpumask_test_cpu(cpu, andmask)) { int new_apicid = cpu_to_logical_apicid(cpu); if (apicid_cluster(apicid) != apicid_cluster(new_apicid)) { diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 1efecd206a74..c772bb10b173 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -158,13 +158,13 @@ static unsigned int flat_cpu_mask_to_apicid(const cpumask_t *cpumask) return cpus_addr(*cpumask)[0] & APIC_ALL_CPUS; } -static unsigned int flat_cpu_mask_to_apicid_and(const cpumask_t *cpumask, - const cpumask_t *andmask) +static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) { - unsigned long mask1 = cpus_addr(*cpumask)[0] & APIC_ALL_CPUS; - unsigned long mask2 = cpus_addr(*andmask)[0] & APIC_ALL_CPUS; + unsigned long mask1 = cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; + unsigned long mask2 = cpumask_bits(andmask)[0] & APIC_ALL_CPUS; - return (int)(mask1 & mask2); + return mask1 & mask2; } static unsigned int phys_pkg_id(int index_msb) @@ -264,8 +264,9 @@ static unsigned int physflat_cpu_mask_to_apicid(const cpumask_t *cpumask) return BAD_APICID; } -static unsigned int physflat_cpu_mask_to_apicid_and(const cpumask_t *cpumask, - const cpumask_t *andmask) +static unsigned int +physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) { int cpu; @@ -273,9 +274,9 @@ static unsigned int physflat_cpu_mask_to_apicid_and(const cpumask_t *cpumask, * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) - if (cpu_isset(cpu, *andmask)) - return per_cpu(x86_cpu_to_apicid, cpu); + cpu = cpumask_any_and(cpumask, andmask); + if (cpu < nr_cpu_ids) + return per_cpu(x86_cpu_to_apicid, cpu); return BAD_APICID; } diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index fd8047f4e455..e7d16f53b9cd 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -123,8 +123,8 @@ static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) return BAD_APICID; } -static unsigned int x2apic_cpu_mask_to_apicid_and(const cpumask_t *cpumask, - const cpumask_t *andmask) +static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) { int cpu; @@ -132,9 +132,9 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const cpumask_t *cpumask, * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) - if (cpu_isset(cpu, *andmask)) - return per_cpu(x86_cpu_to_apicid, cpu); + cpu = cpumask_any_and(cpumask, andmask); + if (cpu < nr_cpu_ids) + return per_cpu(x86_cpu_to_apicid, cpu); return BAD_APICID; } diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index d5578bb8f165..9d0386c7e798 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -122,8 +122,8 @@ static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) return BAD_APICID; } -static unsigned int x2apic_cpu_mask_to_apicid_and(const cpumask_t *cpumask, - const cpumask_t *andmask) +static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) { int cpu; @@ -131,9 +131,9 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const cpumask_t *cpumask, * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) - if (cpu_isset(cpu, *andmask)) - return per_cpu(x86_cpu_to_apicid, cpu); + cpu = cpumask_any_and(cpumask, andmask); + if (cpu < nr_cpu_ids) + return per_cpu(x86_cpu_to_apicid, cpu); return BAD_APICID; } diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 53bd2570272c..22596ec94c82 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -179,8 +179,8 @@ static unsigned int uv_cpu_mask_to_apicid(const cpumask_t *cpumask) return BAD_APICID; } -static unsigned int uv_cpu_mask_to_apicid_and(const cpumask_t *cpumask, - const cpumask_t *andmask) +static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, + const struct cpumask *andmask) { int cpu; @@ -188,9 +188,9 @@ static unsigned int uv_cpu_mask_to_apicid_and(const cpumask_t *cpumask, * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids) - if (cpu_isset(cpu, *andmask)) - return per_cpu(x86_cpu_to_apicid, cpu); + cpu = cpumask_any_and(cpumask, andmask); + if (cpu < nr_cpu_ids) + return per_cpu(x86_cpu_to_apicid, cpu); return BAD_APICID; } -- cgit v1.2.3 From 22f65d31b25a320a5246592160bcb102d2791c45 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 16 Dec 2008 17:33:56 -0800 Subject: x86: Update io_apic.c to use new cpumask API Impact: cleanup, consolidate patches, use new API Consolidate the following into a single patch to adapt to new sparseirq code in arch/x86/kernel/io_apic.c, add allocation of cpumask_var_t's in domain and old_domain, and reduce further merge conflicts. Only one file (arch/x86/kernel/io_apic.c) is changed in all of these patches. 0006-x86-io_apic-change-irq_cfg-domain-old_domain-to.patch 0007-x86-io_apic-set_desc_affinity.patch 0008-x86-io_apic-send_cleanup_vector.patch 0009-x86-io_apic-eliminate-remaining-cpumask_ts-from-st.patch 0021-x86-final-cleanups-in-io_apic-to-use-new-cpumask-AP.patch Signed-off-by: Rusty Russell Signed-off-by: Mike Travis --- arch/x86/kernel/io_apic.c | 302 ++++++++++++++++++++++------------------------ 1 file changed, 145 insertions(+), 157 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 7f23ce7f5518..60bb8b19f4cd 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -136,8 +136,8 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int cpu) struct irq_cfg { struct irq_pin_list *irq_2_pin; - cpumask_t domain; - cpumask_t old_domain; + cpumask_var_t domain; + cpumask_var_t old_domain; unsigned move_cleanup_count; u8 vector; u8 move_in_progress : 1; @@ -149,22 +149,22 @@ static struct irq_cfg irq_cfgx[] = { #else static struct irq_cfg irq_cfgx[NR_IRQS] = { #endif - [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, - [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, - [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, - [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, }, - [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, }, - [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, }, - [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, }, - [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, }, - [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, }, - [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, }, - [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, }, - [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, }, - [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, }, - [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, }, - [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, }, - [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, + [0] = { .vector = IRQ0_VECTOR, }, + [1] = { .vector = IRQ1_VECTOR, }, + [2] = { .vector = IRQ2_VECTOR, }, + [3] = { .vector = IRQ3_VECTOR, }, + [4] = { .vector = IRQ4_VECTOR, }, + [5] = { .vector = IRQ5_VECTOR, }, + [6] = { .vector = IRQ6_VECTOR, }, + [7] = { .vector = IRQ7_VECTOR, }, + [8] = { .vector = IRQ8_VECTOR, }, + [9] = { .vector = IRQ9_VECTOR, }, + [10] = { .vector = IRQ10_VECTOR, }, + [11] = { .vector = IRQ11_VECTOR, }, + [12] = { .vector = IRQ12_VECTOR, }, + [13] = { .vector = IRQ13_VECTOR, }, + [14] = { .vector = IRQ14_VECTOR, }, + [15] = { .vector = IRQ15_VECTOR, }, }; void __init arch_early_irq_init(void) @@ -180,6 +180,10 @@ void __init arch_early_irq_init(void) for (i = 0; i < count; i++) { desc = irq_to_desc(i); desc->chip_data = &cfg[i]; + alloc_bootmem_cpumask_var(&cfg[i].domain); + alloc_bootmem_cpumask_var(&cfg[i].old_domain); + if (i < NR_IRQS_LEGACY) + cpumask_setall(cfg[i].domain); } } @@ -204,6 +208,20 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu) node = cpu_to_node(cpu); cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); + if (cfg) { + /* FIXME: needs alloc_cpumask_var_node() */ + if (!alloc_cpumask_var(&cfg->domain, GFP_ATOMIC)) { + kfree(cfg); + cfg = NULL; + } else if (!alloc_cpumask_var(&cfg->old_domain, GFP_ATOMIC)) { + free_cpumask_var(cfg->domain); + kfree(cfg); + cfg = NULL; + } else { + cpumask_clear(cfg->domain); + cpumask_clear(cfg->old_domain); + } + } printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node); return cfg; @@ -362,6 +380,26 @@ static void ioapic_mask_entry(int apic, int pin) } #ifdef CONFIG_SMP +static void send_cleanup_vector(struct irq_cfg *cfg) +{ + cpumask_var_t cleanup_mask; + + if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { + unsigned int i; + cfg->move_cleanup_count = 0; + for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) + cfg->move_cleanup_count++; + for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) + send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); + } else { + cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); + cfg->move_cleanup_count = cpumask_weight(cleanup_mask); + send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); + free_cpumask_var(cleanup_mask); + } + cfg->move_in_progress = 0; +} + static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) { int apic, pin; @@ -400,40 +438,52 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq static int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); -static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, - const struct cpumask *mask) +/* + * Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid + * of that, or returns BAD_APICID and leaves desc->affinity untouched. + */ +static unsigned int +set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) { struct irq_cfg *cfg; - unsigned long flags; - unsigned int dest; - cpumask_t tmp; unsigned int irq; if (!cpumask_intersects(mask, cpu_online_mask)) - return; + return BAD_APICID; irq = desc->irq; cfg = desc->chip_data; if (assign_irq_vector(irq, cfg, mask)) - return; + return BAD_APICID; + cpumask_and(&desc->affinity, cfg->domain, mask); set_extra_move_desc(desc, mask); + return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask); +} - cpumask_and(&tmp, &cfg->domain, mask); - dest = cpu_mask_to_apicid(&tmp); - /* - * Only the high 8 bits are valid. - */ - dest = SET_APIC_LOGICAL_ID(dest); +static void +set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) +{ + struct irq_cfg *cfg; + unsigned long flags; + unsigned int dest; + unsigned int irq; + + irq = desc->irq; + cfg = desc->chip_data; spin_lock_irqsave(&ioapic_lock, flags); - __target_IO_APIC_irq(irq, dest, cfg); - cpumask_copy(&desc->affinity, mask); + dest = set_desc_affinity(desc, mask); + if (dest != BAD_APICID) { + /* Only the high 8 bits are valid. */ + dest = SET_APIC_LOGICAL_ID(dest); + __target_IO_APIC_irq(irq, dest, cfg); + } spin_unlock_irqrestore(&ioapic_lock, flags); } -static void set_ioapic_affinity_irq(unsigned int irq, - const struct cpumask *mask) +static void +set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) { struct irq_desc *desc; @@ -1117,26 +1167,32 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) */ static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; unsigned int old_vector; - int cpu; - cpumask_t tmp_mask; + int cpu, err; + cpumask_var_t tmp_mask; if ((cfg->move_in_progress) || cfg->move_cleanup_count) return -EBUSY; + if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) + return -ENOMEM; + old_vector = cfg->vector; if (old_vector) { - cpus_and(tmp_mask, *mask, cpu_online_map); - cpus_and(tmp_mask, cfg->domain, tmp_mask); - if (!cpus_empty(tmp_mask)) + cpumask_and(tmp_mask, mask, cpu_online_mask); + cpumask_and(tmp_mask, cfg->domain, tmp_mask); + if (!cpumask_empty(tmp_mask)) { + free_cpumask_var(tmp_mask); return 0; + } } /* Only try and allocate irqs on cpus that are present */ - for_each_cpu_and(cpu, mask, &cpu_online_map) { + err = -ENOSPC; + for_each_cpu_and(cpu, mask, cpu_online_mask) { int new_cpu; int vector, offset; - vector_allocation_domain(cpu, &tmp_mask); + vector_allocation_domain(cpu, tmp_mask); vector = current_vector; offset = current_offset; @@ -1156,7 +1212,7 @@ next: if (vector == SYSCALL_VECTOR) goto next; #endif - for_each_cpu_and(new_cpu, &tmp_mask, &cpu_online_map) + for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) if (per_cpu(vector_irq, new_cpu)[vector] != -1) goto next; /* Found one! */ @@ -1164,15 +1220,17 @@ next: current_offset = offset; if (old_vector) { cfg->move_in_progress = 1; - cfg->old_domain = cfg->domain; + cpumask_copy(cfg->old_domain, cfg->domain); } - for_each_cpu_and(new_cpu, &tmp_mask, &cpu_online_map) + for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) per_cpu(vector_irq, new_cpu)[vector] = irq; cfg->vector = vector; - cfg->domain = tmp_mask; - return 0; + cpumask_copy(cfg->domain, tmp_mask); + err = 0; + break; } - return -ENOSPC; + free_cpumask_var(tmp_mask); + return err; } static int @@ -1189,23 +1247,20 @@ assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) static void __clear_irq_vector(int irq, struct irq_cfg *cfg) { - cpumask_t mask; int cpu, vector; BUG_ON(!cfg->vector); vector = cfg->vector; - cpus_and(mask, cfg->domain, cpu_online_map); - for_each_cpu_mask_nr(cpu, mask) + for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) per_cpu(vector_irq, cpu)[vector] = -1; cfg->vector = 0; - cpus_clear(cfg->domain); + cpumask_clear(cfg->domain); if (likely(!cfg->move_in_progress)) return; - cpus_and(mask, cfg->old_domain, cpu_online_map); - for_each_cpu_mask_nr(cpu, mask) { + for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { if (per_cpu(vector_irq, cpu)[vector] != irq) @@ -1230,7 +1285,7 @@ void __setup_vector_irq(int cpu) if (!desc) continue; cfg = desc->chip_data; - if (!cpu_isset(cpu, cfg->domain)) + if (!cpumask_test_cpu(cpu, cfg->domain)) continue; vector = cfg->vector; per_cpu(vector_irq, cpu)[vector] = irq; @@ -1242,7 +1297,7 @@ void __setup_vector_irq(int cpu) continue; cfg = irq_cfg(irq); - if (!cpu_isset(cpu, cfg->domain)) + if (!cpumask_test_cpu(cpu, cfg->domain)) per_cpu(vector_irq, cpu)[vector] = -1; } } @@ -1378,18 +1433,17 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de { struct irq_cfg *cfg; struct IO_APIC_route_entry entry; - cpumask_t mask; + unsigned int dest; if (!IO_APIC_IRQ(irq)) return; cfg = desc->chip_data; - mask = *TARGET_CPUS; - if (assign_irq_vector(irq, cfg, &mask)) + if (assign_irq_vector(irq, cfg, TARGET_CPUS)) return; - cpus_and(mask, cfg->domain, mask); + dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " @@ -1399,8 +1453,7 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, - cpu_mask_to_apicid(&mask), trigger, polarity, - cfg->vector)) { + dest, trigger, polarity, cfg->vector)) { printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", mp_ioapics[apic].mp_apicid, pin); __clear_irq_vector(irq, cfg); @@ -2122,7 +2175,7 @@ static int ioapic_retrigger_irq(unsigned int irq) unsigned long flags; spin_lock_irqsave(&vector_lock, flags); - send_IPI_mask(&cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector); + send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); spin_unlock_irqrestore(&vector_lock, flags); return 1; @@ -2175,15 +2228,13 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) { struct irq_cfg *cfg; - cpumask_t tmpmask; struct irte irte; int modify_ioapic_rte; unsigned int dest; unsigned long flags; unsigned int irq; - cpus_and(tmpmask, *mask, cpu_online_map); - if (cpus_empty(tmpmask)) + if (!cpumask_intersects(mask, cpu_online_mask)) return; irq = desc->irq; @@ -2196,8 +2247,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) set_extra_move_desc(desc, mask); - cpus_and(tmpmask, cfg->domain, *mask); - dest = cpu_mask_to_apicid(&tmpmask); + dest = cpu_mask_to_apicid_and(cfg->domain, mask); modify_ioapic_rte = desc->status & IRQ_LEVEL; if (modify_ioapic_rte) { @@ -2214,14 +2264,10 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) */ modify_irte(irq, &irte); - if (cfg->move_in_progress) { - cpus_and(tmpmask, cfg->old_domain, cpu_online_map); - cfg->move_cleanup_count = cpus_weight(tmpmask); - send_IPI_mask(&tmpmask, IRQ_MOVE_CLEANUP_VECTOR); - cfg->move_in_progress = 0; - } + if (cfg->move_in_progress) + send_cleanup_vector(cfg); - desc->affinity = *mask; + cpumask_copy(&desc->affinity, mask); } static int migrate_irq_remapped_level_desc(struct irq_desc *desc) @@ -2247,7 +2293,7 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc) ret = 0; desc->status &= ~IRQ_MOVE_PENDING; - cpus_clear(desc->pending_mask); + cpumask_clear(&desc->pending_mask); unmask: unmask_IO_APIC_irq_desc(desc); @@ -2333,7 +2379,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) if (!cfg->move_cleanup_count) goto unlock; - if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) + if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) goto unlock; __get_cpu_var(vector_irq)[vector] = -1; @@ -2356,14 +2402,8 @@ static void irq_complete_move(struct irq_desc **descp) vector = ~get_irq_regs()->orig_ax; me = smp_processor_id(); - if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) { - cpumask_t cleanup_mask; - - cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); - cfg->move_cleanup_count = cpus_weight(cleanup_mask); - send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); - cfg->move_in_progress = 0; - } + if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) + send_cleanup_vector(cfg); } #else static inline void irq_complete_move(struct irq_desc **descp) {} @@ -3088,16 +3128,13 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms struct irq_cfg *cfg; int err; unsigned dest; - cpumask_t tmp; cfg = irq_cfg(irq); - tmp = *TARGET_CPUS; - err = assign_irq_vector(irq, cfg, &tmp); + err = assign_irq_vector(irq, cfg, TARGET_CPUS); if (err) return err; - cpus_and(tmp, cfg->domain, tmp); - dest = cpu_mask_to_apicid(&tmp); + dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); #ifdef CONFIG_INTR_REMAP if (irq_remapped(irq)) { @@ -3157,19 +3194,12 @@ static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) struct irq_cfg *cfg; struct msi_msg msg; unsigned int dest; - cpumask_t tmp; - if (!cpumask_intersects(mask, cpu_online_mask)) + dest = set_desc_affinity(desc, mask); + if (dest == BAD_APICID) return; cfg = desc->chip_data; - if (assign_irq_vector(irq, cfg, mask)) - return; - - set_extra_move_desc(desc, mask); - - cpumask_and(&tmp, &cfg->domain, mask); - dest = cpu_mask_to_apicid(&tmp); read_msi_msg_desc(desc, &msg); @@ -3179,7 +3209,6 @@ static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) msg.address_lo |= MSI_ADDR_DEST_ID(dest); write_msi_msg_desc(desc, &msg); - cpumask_copy(&desc->affinity, mask); } #ifdef CONFIG_INTR_REMAP /* @@ -3192,24 +3221,15 @@ ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg; unsigned int dest; - cpumask_t tmp, cleanup_mask; struct irte irte; - if (!cpumask_intersects(mask, cpu_online_mask)) - return; - if (get_irte(irq, &irte)) return; - cfg = desc->chip_data; - if (assign_irq_vector(irq, cfg, mask)) + dest = set_desc_affinity(desc, mask); + if (dest == BAD_APICID) return; - set_extra_move_desc(desc, mask); - - cpumask_and(&tmp, &cfg->domain, mask); - dest = cpu_mask_to_apicid(&tmp); - irte.vector = cfg->vector; irte.dest_id = IRTE_DEST(dest); @@ -3223,14 +3243,8 @@ ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) * at the new destination. So, time to cleanup the previous * vector allocation. */ - if (cfg->move_in_progress) { - cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); - cfg->move_cleanup_count = cpus_weight(cleanup_mask); - send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); - cfg->move_in_progress = 0; - } - - cpumask_copy(&desc->affinity, mask); + if (cfg->move_in_progress) + send_cleanup_vector(cfg); } #endif @@ -3421,25 +3435,18 @@ void arch_teardown_msi_irq(unsigned int irq) #ifdef CONFIG_DMAR #ifdef CONFIG_SMP -static void dmar_msi_set_affinity(unsigned int irq, const cpumask_t *mask) +static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) { struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg; struct msi_msg msg; unsigned int dest; - cpumask_t tmp; - if (!cpumask_intersects(mask, cpu_online_mask)) + dest = set_desc_affinity(desc, mask); + if (dest == BAD_APICID) return; cfg = desc->chip_data; - if (assign_irq_vector(irq, cfg, mask)) - return; - - set_extra_move_desc(desc, mask); - - cpumask_and(&tmp, &cfg->domain, mask); - dest = cpu_mask_to_apicid(&tmp); dmar_msi_read(irq, &msg); @@ -3449,7 +3456,6 @@ static void dmar_msi_set_affinity(unsigned int irq, const cpumask_t *mask) msg.address_lo |= MSI_ADDR_DEST_ID(dest); dmar_msi_write(irq, &msg); - cpumask_copy(&desc->affinity, mask); } #endif /* CONFIG_SMP */ @@ -3483,25 +3489,18 @@ int arch_setup_dmar_msi(unsigned int irq) #ifdef CONFIG_HPET_TIMER #ifdef CONFIG_SMP -static void hpet_msi_set_affinity(unsigned int irq, const cpumask_t *mask) +static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) { struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg; struct msi_msg msg; unsigned int dest; - cpumask_t tmp; - if (!cpumask_intersects(mask, cpu_online_mask)) + dest = set_desc_affinity(desc, mask); + if (dest == BAD_APICID) return; cfg = desc->chip_data; - if (assign_irq_vector(irq, cfg, mask)) - return; - - set_extra_move_desc(desc, mask); - - cpumask_and(&tmp, &cfg->domain, mask); - dest = cpu_mask_to_apicid(&tmp); hpet_msi_read(irq, &msg); @@ -3511,7 +3510,6 @@ static void hpet_msi_set_affinity(unsigned int irq, const cpumask_t *mask) msg.address_lo |= MSI_ADDR_DEST_ID(dest); hpet_msi_write(irq, &msg); - cpumask_copy(&desc->affinity, mask); } #endif /* CONFIG_SMP */ @@ -3566,27 +3564,19 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) write_ht_irq_msg(irq, &msg); } -static void set_ht_irq_affinity(unsigned int irq, const cpumask_t *mask) +static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) { struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg; unsigned int dest; - cpumask_t tmp; - if (!cpumask_intersects(mask, cpu_online_mask)) + dest = set_desc_affinity(desc, mask); + if (dest == BAD_APICID) return; cfg = desc->chip_data; - if (assign_irq_vector(irq, cfg, mask)) - return; - - set_extra_move_desc(desc, mask); - - cpumask_and(&tmp, &cfg->domain, mask); - dest = cpu_mask_to_apicid(&tmp); target_ht_irq(irq, dest, cfg->vector); - cpumask_copy(&desc->affinity, mask); } #endif @@ -3606,7 +3596,6 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) { struct irq_cfg *cfg; int err; - cpumask_t tmp; cfg = irq_cfg(irq); err = assign_irq_vector(irq, cfg, TARGET_CPUS); @@ -3614,8 +3603,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) struct ht_irq_msg msg; unsigned dest; - cpus_and(tmp, cfg->domain, tmp); - dest = cpu_mask_to_apicid(&tmp); + dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); @@ -3651,7 +3639,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, unsigned long mmr_offset) { - const cpumask_t *eligible_cpu = &cpumask_of_cpu(cpu); + const struct cpumask *eligible_cpu = cpumask_of(cpu); struct irq_cfg *cfg; int mmr_pnode; unsigned long mmr_value; @@ -3891,7 +3879,7 @@ void __init setup_ioapic_dest(void) int pin, ioapic, irq, irq_entry; struct irq_desc *desc; struct irq_cfg *cfg; - const cpumask_t *mask; + const struct cpumask *mask; if (skip_ioapic_setup == 1) return; -- cgit v1.2.3 From b78936e14ee47b6b2d628501a0eab5270db80132 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 16 Dec 2008 17:33:57 -0800 Subject: xen: convert to cpumask_var_t and new cpumask primitives. Simple change, and eventual space saving when NR_CPUS >> nr_cpu_ids. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Cc: Jeremy Fitzhardinge --- arch/x86/xen/smp.c | 9 ++++++--- arch/x86/xen/suspend.c | 3 ++- arch/x86/xen/xen-ops.h | 2 +- 3 files changed, 9 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 2cce362c9874..b3a95868839b 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -33,7 +33,7 @@ #include "xen-ops.h" #include "mmu.h" -cpumask_t xen_cpu_initialized_map; +cpumask_var_t xen_cpu_initialized_map; static DEFINE_PER_CPU(int, resched_irq); static DEFINE_PER_CPU(int, callfunc_irq); @@ -192,7 +192,10 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) if (xen_smp_intr_init(0)) BUG(); - xen_cpu_initialized_map = cpumask_of_cpu(0); + if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL)) + panic("could not allocate xen_cpu_initialized_map\n"); + + cpumask_copy(xen_cpu_initialized_map, cpumask_of(0)); /* Restrict the possible_map according to max_cpus. */ while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { @@ -221,7 +224,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) struct vcpu_guest_context *ctxt; struct desc_struct *gdt; - if (cpu_test_and_set(cpu, xen_cpu_initialized_map)) + if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) return 0; ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index 2a234db5949b..212ffe012b76 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c @@ -35,7 +35,8 @@ void xen_post_suspend(int suspend_cancelled) pfn_to_mfn(xen_start_info->console.domU.mfn); } else { #ifdef CONFIG_SMP - xen_cpu_initialized_map = cpu_online_map; + BUG_ON(xen_cpu_initialized_map == NULL); + cpumask_copy(xen_cpu_initialized_map, cpu_online_mask); #endif xen_vcpu_restore(); } diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 9e1afae8461f..c1f8faf0a2c5 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -58,7 +58,7 @@ void __init xen_init_spinlocks(void); __cpuinit void xen_init_lock_cpu(int cpu); void xen_uninit_lock_cpu(int cpu); -extern cpumask_t xen_cpu_initialized_map; +extern cpumask_var_t xen_cpu_initialized_map; #else static inline void xen_smp_init(void) {} #endif -- cgit v1.2.3 From d7b381bb7b1ad69ff008ea063d26e988b686c8de Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 16 Dec 2008 17:33:58 -0800 Subject: x86: fixup_irqs() doesnt need an argument. Impact: cleanup, remove on-stack cpumask. The "map" arg is always cpu_online_mask. Importantly, set_affinity always ands the argument with cpu_online_mask anyway, so we don't need to do it in fixup_irqs(), avoiding a temporary. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis --- arch/x86/include/asm/irq.h | 2 +- arch/x86/kernel/irq_32.c | 13 +++++++------ arch/x86/kernel/irq_64.c | 15 ++++++++------- arch/x86/kernel/smpboot.c | 2 +- 4 files changed, 17 insertions(+), 15 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index bae0eda95486..8766d30fb746 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h @@ -37,7 +37,7 @@ extern int irqbalance_disable(char *str); #ifdef CONFIG_HOTPLUG_CPU #include -extern void fixup_irqs(cpumask_t map); +extern void fixup_irqs(void); #endif extern unsigned int do_IRQ(struct pt_regs *regs); diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 9cf9cbbf7a02..9dc5588f336a 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -233,27 +233,28 @@ unsigned int do_IRQ(struct pt_regs *regs) #ifdef CONFIG_HOTPLUG_CPU #include -void fixup_irqs(cpumask_t map) +/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ +void fixup_irqs(void) { unsigned int irq; static int warned; struct irq_desc *desc; for_each_irq_desc(irq, desc) { - cpumask_t mask; + const struct cpumask *affinity; if (!desc) continue; if (irq == 2) continue; - cpus_and(mask, desc->affinity, map); - if (any_online_cpu(mask) == NR_CPUS) { + affinity = &desc->affinity; + if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { printk("Breaking affinity for irq %i\n", irq); - mask = map; + affinity = cpu_all_mask; } if (desc->chip->set_affinity) - desc->chip->set_affinity(irq, &mask); + desc->chip->set_affinity(irq, affinity); else if (desc->action && !(warned++)) printk("Cannot set affinity for irq %i\n", irq); } diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 27f2307b0a34..fca2991443f5 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -83,16 +83,17 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs) } #ifdef CONFIG_HOTPLUG_CPU -void fixup_irqs(cpumask_t map) +/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ +void fixup_irqs(void) { unsigned int irq; static int warned; struct irq_desc *desc; for_each_irq_desc(irq, desc) { - cpumask_t mask; int break_affinity = 0; int set_affinity = 1; + const struct cpumask *affinity; if (!desc) continue; @@ -102,23 +103,23 @@ void fixup_irqs(cpumask_t map) /* interrupt's are disabled at this point */ spin_lock(&desc->lock); + affinity = &desc->affinity; if (!irq_has_action(irq) || - cpus_equal(desc->affinity, map)) { + cpumask_equal(affinity, cpu_online_mask)) { spin_unlock(&desc->lock); continue; } - cpus_and(mask, desc->affinity, map); - if (cpus_empty(mask)) { + if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { break_affinity = 1; - mask = map; + affinity = cpu_all_mask; } if (desc->chip->mask) desc->chip->mask(irq); if (desc->chip->set_affinity) - desc->chip->set_affinity(irq, &mask); + desc->chip->set_affinity(irq, affinity); else if (!(warned++)) set_affinity = 0; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 9d58134e0231..8b6f675b363b 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1346,7 +1346,7 @@ void cpu_disable_common(void) lock_vector_lock(); remove_cpu_from_maps(cpu); unlock_vector_lock(); - fixup_irqs(cpu_online_map); + fixup_irqs(); } int native_cpu_disable(void) -- cgit v1.2.3 From bcda016eddd7a8b374bb371473c821a91ff1d8cc Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 16 Dec 2008 17:33:59 -0800 Subject: x86: cosmetic changes apic-related files. This patch simply changes cpumask_t to struct cpumask and similar trivial modernizations. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis --- arch/x86/include/asm/bigsmp/ipi.h | 14 +++----- arch/x86/include/asm/es7000/ipi.h | 13 +++---- arch/x86/include/asm/genapic_32.h | 11 +++--- arch/x86/include/asm/genapic_64.h | 11 +++--- arch/x86/include/asm/ipi.h | 10 +++--- arch/x86/include/asm/mach-default/mach_apic.h | 12 +++---- arch/x86/include/asm/mach-default/mach_ipi.h | 12 +++---- arch/x86/include/asm/numaq/ipi.h | 14 +++----- arch/x86/include/asm/smp.h | 4 +-- arch/x86/kernel/genapic_flat_64.c | 50 ++++++++++++++------------- arch/x86/kernel/genx2apic_cluster.c | 25 +++++++------- arch/x86/kernel/genx2apic_phys.c | 25 +++++++------- arch/x86/kernel/genx2apic_uv_x.c | 24 ++++++------- arch/x86/kernel/ipi.c | 14 ++++---- arch/x86/kernel/smp.c | 6 ++-- arch/x86/xen/smp.c | 11 +++--- 16 files changed, 127 insertions(+), 129 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/bigsmp/ipi.h b/arch/x86/include/asm/bigsmp/ipi.h index 63553e9f22b2..27fcd01b3ae6 100644 --- a/arch/x86/include/asm/bigsmp/ipi.h +++ b/arch/x86/include/asm/bigsmp/ipi.h @@ -1,26 +1,22 @@ #ifndef __ASM_MACH_IPI_H #define __ASM_MACH_IPI_H -void send_IPI_mask_sequence(const cpumask_t *mask, int vector); -void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); +void send_IPI_mask_sequence(const struct cpumask *mask, int vector); +void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); -static inline void send_IPI_mask(const cpumask_t *mask, int vector) +static inline void send_IPI_mask(const struct cpumask *mask, int vector) { send_IPI_mask_sequence(mask, vector); } static inline void send_IPI_allbutself(int vector) { - cpumask_t mask = cpu_online_map; - cpu_clear(smp_processor_id(), mask); - - if (!cpus_empty(mask)) - send_IPI_mask(&mask, vector); + send_IPI_mask_allbutself(cpu_online_mask, vector); } static inline void send_IPI_all(int vector) { - send_IPI_mask(&cpu_online_map, vector); + send_IPI_mask(cpu_online_mask, vector); } #endif /* __ASM_MACH_IPI_H */ diff --git a/arch/x86/include/asm/es7000/ipi.h b/arch/x86/include/asm/es7000/ipi.h index 1a8507265f91..7e8ed24d4b8a 100644 --- a/arch/x86/include/asm/es7000/ipi.h +++ b/arch/x86/include/asm/es7000/ipi.h @@ -1,25 +1,22 @@ #ifndef __ASM_ES7000_IPI_H #define __ASM_ES7000_IPI_H -void send_IPI_mask_sequence(const cpumask_t *mask, int vector); -void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); +void send_IPI_mask_sequence(const struct cpumask *mask, int vector); +void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); -static inline void send_IPI_mask(const cpumask_t *mask, int vector) +static inline void send_IPI_mask(const struct cpumask *mask, int vector) { send_IPI_mask_sequence(mask, vector); } static inline void send_IPI_allbutself(int vector) { - cpumask_t mask = cpu_online_map; - cpu_clear(smp_processor_id(), mask); - if (!cpus_empty(mask)) - send_IPI_mask(&mask, vector); + send_IPI_mask_allbutself(cpu_online_mask, vector); } static inline void send_IPI_all(int vector) { - send_IPI_mask(&cpu_online_map, vector); + send_IPI_mask(cpu_online_mask, vector); } #endif /* __ASM_ES7000_IPI_H */ diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h index eed6e305291f..746f37a7963a 100644 --- a/arch/x86/include/asm/genapic_32.h +++ b/arch/x86/include/asm/genapic_32.h @@ -24,7 +24,7 @@ struct genapic { int (*probe)(void); int (*apic_id_registered)(void); - const cpumask_t *(*target_cpus)(void); + const struct cpumask *(*target_cpus)(void); int int_delivery_mode; int int_dest_mode; int ESR_DISABLE; @@ -57,15 +57,16 @@ struct genapic { unsigned (*get_apic_id)(unsigned long x); unsigned long apic_id_mask; - unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask); + unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, const struct cpumask *andmask); - void (*vector_allocation_domain)(int cpu, cpumask_t *retmask); + void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); #ifdef CONFIG_SMP /* ipi */ - void (*send_IPI_mask)(const cpumask_t *mask, int vector); - void (*send_IPI_mask_allbutself)(const cpumask_t *mask, int vector); + void (*send_IPI_mask)(const struct cpumask *mask, int vector); + void (*send_IPI_mask_allbutself)(const struct cpumask *mask, + int vector); void (*send_IPI_allbutself)(int vector); void (*send_IPI_all)(int vector); #endif diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h index 244b71729ecb..adf32fb56aa6 100644 --- a/arch/x86/include/asm/genapic_64.h +++ b/arch/x86/include/asm/genapic_64.h @@ -20,17 +20,18 @@ struct genapic { u32 int_delivery_mode; u32 int_dest_mode; int (*apic_id_registered)(void); - const cpumask_t *(*target_cpus)(void); - void (*vector_allocation_domain)(int cpu, cpumask_t *retmask); + const struct cpumask *(*target_cpus)(void); + void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); void (*init_apic_ldr)(void); /* ipi */ - void (*send_IPI_mask)(const cpumask_t *mask, int vector); - void (*send_IPI_mask_allbutself)(const cpumask_t *mask, int vector); + void (*send_IPI_mask)(const struct cpumask *mask, int vector); + void (*send_IPI_mask_allbutself)(const struct cpumask *mask, + int vector); void (*send_IPI_allbutself)(int vector); void (*send_IPI_all)(int vector); void (*send_IPI_self)(int vector); /* */ - unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask); + unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, const struct cpumask *andmask); unsigned int (*phys_pkg_id)(int index_msb); diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h index 24b6e613edfa..c745a306f7d3 100644 --- a/arch/x86/include/asm/ipi.h +++ b/arch/x86/include/asm/ipi.h @@ -117,7 +117,8 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector, native_apic_mem_write(APIC_ICR, cfg); } -static inline void send_IPI_mask_sequence(const cpumask_t *mask, int vector) +static inline void send_IPI_mask_sequence(const struct cpumask *mask, + int vector) { unsigned long flags; unsigned long query_cpu; @@ -128,14 +129,15 @@ static inline void send_IPI_mask_sequence(const cpumask_t *mask, int vector) * - mbligh */ local_irq_save(flags); - for_each_cpu_mask_nr(query_cpu, *mask) { + for_each_cpu(query_cpu, mask) { __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), vector, APIC_DEST_PHYSICAL); } local_irq_restore(flags); } -static inline void send_IPI_mask_allbutself(const cpumask_t *mask, int vector) +static inline void send_IPI_mask_allbutself(const struct cpumask *mask, + int vector) { unsigned long flags; unsigned int query_cpu; @@ -144,7 +146,7 @@ static inline void send_IPI_mask_allbutself(const cpumask_t *mask, int vector) /* See Hack comment above */ local_irq_save(flags); - for_each_cpu_mask_nr(query_cpu, *mask) + for_each_cpu(query_cpu, mask) if (query_cpu != this_cpu) __send_IPI_dest_field( per_cpu(x86_cpu_to_apicid, query_cpu), diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index df8e024c43c5..8863d978cb96 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -8,12 +8,12 @@ #define APIC_DFR_VALUE (APIC_DFR_FLAT) -static inline const cpumask_t *target_cpus(void) +static inline const struct cpumask *target_cpus(void) { #ifdef CONFIG_SMP - return &cpu_online_map; + return cpu_online_mask; #else - return &cpumask_of_cpu(0); + return cpumask_of(0); #endif } @@ -62,9 +62,9 @@ static inline int apic_id_registered(void) return physid_isset(read_apic_id(), phys_cpu_present_map); } -static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) +static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask) { - return cpus_addr(*cpumask)[0]; + return cpumask_bits(cpumask)[0]; } static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, @@ -98,7 +98,7 @@ static inline int apicid_to_node(int logical_apicid) #endif } -static inline void vector_allocation_domain(int cpu, cpumask_t *retmask) +static inline void vector_allocation_domain(int cpu, struct cpumask *retmask) { /* Careful. Some cpus do not strictly honor the set of cpus * specified in the interrupt destination when using lowest diff --git a/arch/x86/include/asm/mach-default/mach_ipi.h b/arch/x86/include/asm/mach-default/mach_ipi.h index 9353ab854a10..191312d155da 100644 --- a/arch/x86/include/asm/mach-default/mach_ipi.h +++ b/arch/x86/include/asm/mach-default/mach_ipi.h @@ -4,8 +4,8 @@ /* Avoid include hell */ #define NMI_VECTOR 0x02 -void send_IPI_mask_bitmask(const cpumask_t *mask, int vector); -void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); +void send_IPI_mask_bitmask(const struct cpumask *mask, int vector); +void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); void __send_IPI_shortcut(unsigned int shortcut, int vector); extern int no_broadcast; @@ -15,17 +15,17 @@ extern int no_broadcast; #define send_IPI_mask (genapic->send_IPI_mask) #define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself) #else -static inline void send_IPI_mask(const cpumask_t *mask, int vector) +static inline void send_IPI_mask(const struct cpumask *mask, int vector) { send_IPI_mask_bitmask(mask, vector); } -void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); +void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); #endif static inline void __local_send_IPI_allbutself(int vector) { if (no_broadcast || vector == NMI_VECTOR) - send_IPI_mask_allbutself(&cpu_online_map, vector); + send_IPI_mask_allbutself(cpu_online_mask, vector); else __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); } @@ -33,7 +33,7 @@ static inline void __local_send_IPI_allbutself(int vector) static inline void __local_send_IPI_all(int vector) { if (no_broadcast || vector == NMI_VECTOR) - send_IPI_mask(&cpu_online_map, vector); + send_IPI_mask(cpu_online_mask, vector); else __send_IPI_shortcut(APIC_DEST_ALLINC, vector); } diff --git a/arch/x86/include/asm/numaq/ipi.h b/arch/x86/include/asm/numaq/ipi.h index c734d7acc430..a8374c652778 100644 --- a/arch/x86/include/asm/numaq/ipi.h +++ b/arch/x86/include/asm/numaq/ipi.h @@ -1,26 +1,22 @@ #ifndef __ASM_NUMAQ_IPI_H #define __ASM_NUMAQ_IPI_H -void send_IPI_mask_sequence(const cpumask_t *mask, int vector); -void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); +void send_IPI_mask_sequence(const struct cpumask *mask, int vector); +void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); -static inline void send_IPI_mask(const cpumask_t *mask, int vector) +static inline void send_IPI_mask(const struct cpumask *mask, int vector) { send_IPI_mask_sequence(mask, vector); } static inline void send_IPI_allbutself(int vector) { - cpumask_t mask = cpu_online_map; - cpu_clear(smp_processor_id(), mask); - - if (!cpus_empty(mask)) - send_IPI_mask(&mask, vector); + send_IPI_mask_allbutself(cpu_online_mask, vector); } static inline void send_IPI_all(int vector) { - send_IPI_mask(&cpu_online_map, vector); + send_IPI_mask(cpu_online_mask, vector); } #endif /* __ASM_NUMAQ_IPI_H */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index c4a9aa52df6e..830b9fcb6427 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -60,7 +60,7 @@ struct smp_ops { void (*cpu_die)(unsigned int cpu); void (*play_dead)(void); - void (*send_call_func_ipi)(const cpumask_t *mask); + void (*send_call_func_ipi)(const struct cpumask *mask); void (*send_call_func_single_ipi)(int cpu); }; @@ -138,7 +138,7 @@ void native_cpu_die(unsigned int cpu); void native_play_dead(void); void play_dead_common(void); -void native_send_call_func_ipi(const cpumask_t *mask); +void native_send_call_func_ipi(const struct cpumask *mask); void native_send_call_func_single_ipi(int cpu); extern void prefill_possible_map(void); diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index c772bb10b173..7fa5f49c2dda 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -30,12 +30,12 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) return 1; } -static const cpumask_t *flat_target_cpus(void) +static const struct cpumask *flat_target_cpus(void) { - return &cpu_online_map; + return cpu_online_mask; } -static void flat_vector_allocation_domain(int cpu, cpumask_t *retmask) +static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask) { /* Careful. Some cpus do not strictly honor the set of cpus * specified in the interrupt destination when using lowest @@ -45,7 +45,8 @@ static void flat_vector_allocation_domain(int cpu, cpumask_t *retmask) * deliver interrupts to the wrong hyperthread when only one * hyperthread was specified in the interrupt desitination. */ - *retmask = (cpumask_t) { {[0] = APIC_ALL_CPUS, } }; + cpumask_clear(retmask); + cpumask_bits(retmask)[0] = APIC_ALL_CPUS; } /* @@ -77,16 +78,17 @@ static inline void _flat_send_IPI_mask(unsigned long mask, int vector) local_irq_restore(flags); } -static void flat_send_IPI_mask(const cpumask_t *cpumask, int vector) +static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector) { - unsigned long mask = cpus_addr(*cpumask)[0]; + unsigned long mask = cpumask_bits(cpumask)[0]; _flat_send_IPI_mask(mask, vector); } -static void flat_send_IPI_mask_allbutself(const cpumask_t *cpumask, int vector) +static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, + int vector) { - unsigned long mask = cpus_addr(*cpumask)[0]; + unsigned long mask = cpumask_bits(cpumask)[0]; int cpu = smp_processor_id(); if (cpu < BITS_PER_LONG) @@ -103,8 +105,8 @@ static void flat_send_IPI_allbutself(int vector) int hotplug = 0; #endif if (hotplug || vector == NMI_VECTOR) { - if (!cpus_equal(cpu_online_map, cpumask_of_cpu(cpu))) { - unsigned long mask = cpus_addr(cpu_online_map)[0]; + if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) { + unsigned long mask = cpumask_bits(cpu_online_mask)[0]; if (cpu < BITS_PER_LONG) clear_bit(cpu, &mask); @@ -119,7 +121,7 @@ static void flat_send_IPI_allbutself(int vector) static void flat_send_IPI_all(int vector) { if (vector == NMI_VECTOR) - flat_send_IPI_mask(&cpu_online_map, vector); + flat_send_IPI_mask(cpu_online_mask, vector); else __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); } @@ -153,9 +155,9 @@ static int flat_apic_id_registered(void) return physid_isset(read_xapic_id(), phys_cpu_present_map); } -static unsigned int flat_cpu_mask_to_apicid(const cpumask_t *cpumask) +static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask) { - return cpus_addr(*cpumask)[0] & APIC_ALL_CPUS; + return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; } static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, @@ -217,23 +219,23 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) return 0; } -static const cpumask_t *physflat_target_cpus(void) +static const struct cpumask *physflat_target_cpus(void) { - return &cpu_online_map; + return cpu_online_mask; } -static void physflat_vector_allocation_domain(int cpu, cpumask_t *retmask) +static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask) { - cpus_clear(*retmask); - cpu_set(cpu, *retmask); + cpumask_clear(retmask); + cpumask_set_cpu(cpu, retmask); } -static void physflat_send_IPI_mask(const cpumask_t *cpumask, int vector) +static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector) { send_IPI_mask_sequence(cpumask, vector); } -static void physflat_send_IPI_mask_allbutself(const cpumask_t *cpumask, +static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) { send_IPI_mask_allbutself(cpumask, vector); @@ -241,15 +243,15 @@ static void physflat_send_IPI_mask_allbutself(const cpumask_t *cpumask, static void physflat_send_IPI_allbutself(int vector) { - send_IPI_mask_allbutself(&cpu_online_map, vector); + send_IPI_mask_allbutself(cpu_online_mask, vector); } static void physflat_send_IPI_all(int vector) { - physflat_send_IPI_mask(&cpu_online_map, vector); + physflat_send_IPI_mask(cpu_online_mask, vector); } -static unsigned int physflat_cpu_mask_to_apicid(const cpumask_t *cpumask) +static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask) { int cpu; @@ -257,7 +259,7 @@ static unsigned int physflat_cpu_mask_to_apicid(const cpumask_t *cpumask) * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - cpu = first_cpu(*cpumask); + cpu = cpumask_first(cpumask); if ((unsigned)cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_apicid, cpu); else diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index e7d16f53b9cd..4716a0c9f936 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -22,18 +22,18 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ -static const cpumask_t *x2apic_target_cpus(void) +static const struct cpumask *x2apic_target_cpus(void) { - return &cpumask_of_cpu(0); + return cpumask_of(0); } /* * for now each logical cpu is in its own vector allocation domain. */ -static void x2apic_vector_allocation_domain(int cpu, cpumask_t *retmask) +static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) { - cpus_clear(*retmask); - cpu_set(cpu, *retmask); + cpumask_clear(retmask); + cpumask_set_cpu(cpu, retmask); } static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, @@ -55,27 +55,28 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, * at once. We have 16 cpu's in a cluster. This will minimize IPI register * writes. */ -static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector) +static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) { unsigned long flags; unsigned long query_cpu; local_irq_save(flags); - for_each_cpu_mask_nr(query_cpu, *mask) + for_each_cpu(query_cpu, mask) __x2apic_send_IPI_dest( per_cpu(x86_cpu_to_logical_apicid, query_cpu), vector, APIC_DEST_LOGICAL); local_irq_restore(flags); } -static void x2apic_send_IPI_mask_allbutself(const cpumask_t *mask, int vector) +static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, + int vector) { unsigned long flags; unsigned long query_cpu; unsigned long this_cpu = smp_processor_id(); local_irq_save(flags); - for_each_cpu_mask_nr(query_cpu, *mask) + for_each_cpu(query_cpu, mask) if (query_cpu != this_cpu) __x2apic_send_IPI_dest( per_cpu(x86_cpu_to_logical_apicid, query_cpu), @@ -100,7 +101,7 @@ static void x2apic_send_IPI_allbutself(int vector) static void x2apic_send_IPI_all(int vector) { - x2apic_send_IPI_mask(&cpu_online_map, vector); + x2apic_send_IPI_mask(cpu_online_mask, vector); } static int x2apic_apic_id_registered(void) @@ -108,7 +109,7 @@ static int x2apic_apic_id_registered(void) return 1; } -static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) +static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) { int cpu; @@ -116,7 +117,7 @@ static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - cpu = first_cpu(*cpumask); + cpu = cpumask_first(cpumask); if ((unsigned)cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_logical_apicid, cpu); else diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index 9d0386c7e798..b255507884f2 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -29,15 +29,15 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ -static const cpumask_t *x2apic_target_cpus(void) +static const struct cpumask *x2apic_target_cpus(void) { - return &cpumask_of_cpu(0); + return cpumask_of(0); } -static void x2apic_vector_allocation_domain(int cpu, cpumask_t *retmask) +static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) { - cpus_clear(*retmask); - cpu_set(cpu, *retmask); + cpumask_clear(retmask); + cpumask_set_cpu(cpu, retmask); } static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, @@ -53,27 +53,28 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, x2apic_icr_write(cfg, apicid); } -static void x2apic_send_IPI_mask(const cpumask_t *mask, int vector) +static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) { unsigned long flags; unsigned long query_cpu; local_irq_save(flags); - for_each_cpu_mask_nr(query_cpu, *mask) { + for_each_cpu(query_cpu, mask) { __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), vector, APIC_DEST_PHYSICAL); } local_irq_restore(flags); } -static void x2apic_send_IPI_mask_allbutself(const cpumask_t *mask, int vector) +static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, + int vector) { unsigned long flags; unsigned long query_cpu; unsigned long this_cpu = smp_processor_id(); local_irq_save(flags); - for_each_cpu_mask_nr(query_cpu, *mask) { + for_each_cpu(query_cpu, mask) { if (query_cpu != this_cpu) __x2apic_send_IPI_dest( per_cpu(x86_cpu_to_apicid, query_cpu), @@ -99,7 +100,7 @@ static void x2apic_send_IPI_allbutself(int vector) static void x2apic_send_IPI_all(int vector) { - x2apic_send_IPI_mask(&cpu_online_map, vector); + x2apic_send_IPI_mask(cpu_online_mask, vector); } static int x2apic_apic_id_registered(void) @@ -107,7 +108,7 @@ static int x2apic_apic_id_registered(void) return 1; } -static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) +static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) { int cpu; @@ -115,7 +116,7 @@ static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask) * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - cpu = first_cpu(*cpumask); + cpu = cpumask_first(cpumask); if ((unsigned)cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_apicid, cpu); else diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 22596ec94c82..3984682cd849 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -75,15 +75,15 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second); /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ -static const cpumask_t *uv_target_cpus(void) +static const struct cpumask *uv_target_cpus(void) { - return &cpumask_of_cpu(0); + return cpumask_of(0); } -static void uv_vector_allocation_domain(int cpu, cpumask_t *retmask) +static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) { - cpus_clear(*retmask); - cpu_set(cpu, *retmask); + cpumask_clear(retmask); + cpumask_set_cpu(cpu, retmask); } int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) @@ -122,20 +122,20 @@ static void uv_send_IPI_one(int cpu, int vector) uv_write_global_mmr64(pnode, UVH_IPI_INT, val); } -static void uv_send_IPI_mask(const cpumask_t *mask, int vector) +static void uv_send_IPI_mask(const struct cpumask *mask, int vector) { unsigned int cpu; - for_each_cpu_mask_nr(cpu, *mask) + for_each_cpu(cpu, mask) uv_send_IPI_one(cpu, vector); } -static void uv_send_IPI_mask_allbutself(const cpumask_t *mask, int vector) +static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) { unsigned int cpu; unsigned int this_cpu = smp_processor_id(); - for_each_cpu_mask_nr(cpu, *mask) + for_each_cpu(cpu, mask) if (cpu != this_cpu) uv_send_IPI_one(cpu, vector); } @@ -152,7 +152,7 @@ static void uv_send_IPI_allbutself(int vector) static void uv_send_IPI_all(int vector) { - uv_send_IPI_mask(&cpu_online_map, vector); + uv_send_IPI_mask(cpu_online_mask, vector); } static int uv_apic_id_registered(void) @@ -164,7 +164,7 @@ static void uv_init_apic_ldr(void) { } -static unsigned int uv_cpu_mask_to_apicid(const cpumask_t *cpumask) +static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask) { int cpu; @@ -172,7 +172,7 @@ static unsigned int uv_cpu_mask_to_apicid(const cpumask_t *cpumask) * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - cpu = first_cpu(*cpumask); + cpu = cpumask_first(cpumask); if ((unsigned)cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_apicid, cpu); else diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c index 86aa50fc65a1..285bbf8831fa 100644 --- a/arch/x86/kernel/ipi.c +++ b/arch/x86/kernel/ipi.c @@ -116,18 +116,18 @@ static inline void __send_IPI_dest_field(unsigned long mask, int vector) /* * This is only used on smaller machines. */ -void send_IPI_mask_bitmask(const cpumask_t *cpumask, int vector) +void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector) { - unsigned long mask = cpus_addr(*cpumask)[0]; + unsigned long mask = cpumask_bits(cpumask)[0]; unsigned long flags; local_irq_save(flags); - WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]); + WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); __send_IPI_dest_field(mask, vector); local_irq_restore(flags); } -void send_IPI_mask_sequence(const cpumask_t *mask, int vector) +void send_IPI_mask_sequence(const struct cpumask *mask, int vector) { unsigned long flags; unsigned int query_cpu; @@ -139,12 +139,12 @@ void send_IPI_mask_sequence(const cpumask_t *mask, int vector) */ local_irq_save(flags); - for_each_cpu_mask_nr(query_cpu, *mask) + for_each_cpu(query_cpu, mask) __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector); local_irq_restore(flags); } -void send_IPI_mask_allbutself(const cpumask_t *mask, int vector) +void send_IPI_mask_allbutself(const struct cpumask *mask, int vector) { unsigned long flags; unsigned int query_cpu; @@ -153,7 +153,7 @@ void send_IPI_mask_allbutself(const cpumask_t *mask, int vector) /* See Hack comment above */ local_irq_save(flags); - for_each_cpu_mask_nr(query_cpu, *mask) + for_each_cpu(query_cpu, mask) if (query_cpu != this_cpu) __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector); diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 341df946f9a9..49ed667b06f3 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -118,15 +118,15 @@ static void native_smp_send_reschedule(int cpu) WARN_ON(1); return; } - send_IPI_mask(&cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); + send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); } void native_send_call_func_single_ipi(int cpu) { - send_IPI_mask(&cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); + send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR); } -void native_send_call_func_ipi(const cpumask_t *mask) +void native_send_call_func_ipi(const struct cpumask *mask) { cpumask_t allbutself; diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index b3a95868839b..c44e2069c7c7 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -411,22 +411,23 @@ static void xen_smp_send_reschedule(int cpu) xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); } -static void xen_send_IPI_mask(const cpumask_t *mask, enum ipi_vector vector) +static void xen_send_IPI_mask(const struct cpumask *mask, + enum ipi_vector vector) { unsigned cpu; - for_each_cpu_and(cpu, mask, &cpu_online_map) + for_each_cpu_and(cpu, mask, cpu_online_mask) xen_send_IPI_one(cpu, vector); } -static void xen_smp_send_call_function_ipi(const cpumask_t *mask) +static void xen_smp_send_call_function_ipi(const struct cpumask *mask) { int cpu; xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); /* Make sure other vcpus get a chance to run if they need to. */ - for_each_cpu_mask_nr(cpu, *mask) { + for_each_cpu(cpu, mask) { if (xen_vcpu_stolen(cpu)) { HYPERVISOR_sched_op(SCHEDOP_yield, 0); break; @@ -436,7 +437,7 @@ static void xen_smp_send_call_function_ipi(const cpumask_t *mask) static void xen_smp_send_call_function_single_ipi(int cpu) { - xen_send_IPI_mask(&cpumask_of_cpu(cpu), + xen_send_IPI_mask(cpumask_of(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); } -- cgit v1.2.3 From 78637a97b7fe1df51f40a460448df0b93d511176 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 16 Dec 2008 17:34:00 -0800 Subject: x86: Set CONFIG_NR_CPUS even on UP Impact: cleanup Signed-off-by: Rusty Russell Signed-off-by: Mike Travis --- arch/x86/Kconfig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1fd44352f27c..4a3f5851ec64 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -599,12 +599,12 @@ config MAXSMP If unsure, say N. config NR_CPUS - depends on SMP int "Maximum number of CPUs" if SMP && !MAXSMP range 2 512 if SMP && !MAXSMP + default "1" if !SMP default "4096" if MAXSMP - default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000 - default "8" + default "32" if SMP && (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000) + default "8" if SMP help This allows you to specify the maximum number of CPUs which this kernel will support. The maximum supported value is 512 and the -- cgit v1.2.3 From 168ef543a43678146e06b3911e987ac021d575b8 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 16 Dec 2008 17:34:01 -0800 Subject: x86: prepare for cpumask iterators to only go to nr_cpu_ids Impact: cleanup, futureproof In fact, all cpumask ops will only be valid (in general) for bit numbers < nr_cpu_ids. So use that instead of NR_CPUS in various places. This is always safe: no cpu number can be >= nr_cpu_ids, and nr_cpu_ids is initialized to NR_CPUS at boot. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Acked-by: Ingo Molnar --- arch/x86/kernel/apic.c | 2 +- arch/x86/mach-voyager/voyager_smp.c | 2 +- arch/x86/mm/numa_64.c | 4 ++-- arch/x86/mm/srat_64.c | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index a375791c08ca..3b630ec24935 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -2106,7 +2106,7 @@ __cpuinit int apic_is_clustered_box(void) bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); bitmap_zero(clustermap, NUM_APIC_CLUSTERS); - for (i = 0; i < NR_CPUS; i++) { + for (i = 0; i < nr_cpu_ids; i++) { /* are we being called early in kernel startup? */ if (bios_cpu_apicid) { id = bios_cpu_apicid[i]; diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 9c990185e9f2..a5bc05492b1e 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -672,7 +672,7 @@ void __init smp_boot_cpus(void) /* loop over all the extended VIC CPUs and boot them. The * Quad CPUs must be bootstrapped by their extended VIC cpu */ - for (i = 0; i < NR_CPUS; i++) { + for (i = 0; i < nr_cpu_ids; i++) { if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map)) continue; do_boot_cpu(i); diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index cebcbf152d46..71a14f89f89e 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -278,7 +278,7 @@ void __init numa_init_array(void) int rr, i; rr = first_node(node_online_map); - for (i = 0; i < NR_CPUS; i++) { + for (i = 0; i < nr_cpu_ids; i++) { if (early_cpu_to_node(i) != NUMA_NO_NODE) continue; numa_set_node(i, rr); @@ -549,7 +549,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn) memnodemap[0] = 0; node_set_online(0); node_set(0, node_possible_map); - for (i = 0; i < NR_CPUS; i++) + for (i = 0; i < nr_cpu_ids; i++) numa_set_node(i, 0); e820_register_active_regions(0, start_pfn, last_pfn); setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 51c0a2fc14fe..09737c8af074 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -382,7 +382,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) if (!node_online(i)) setup_node_bootmem(i, nodes[i].start, nodes[i].end); - for (i = 0; i < NR_CPUS; i++) { + for (i = 0; i < nr_cpu_ids; i++) { int node = early_cpu_to_node(i); if (node == NUMA_NO_NODE) -- cgit v1.2.3 From 1de88cd4a33fcc2fcf70cbce01688723f728675d Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 16 Dec 2008 17:34:02 -0800 Subject: x86: Use cpumask accessors code for possible/present maps. Impact: use new API Use the accessors rather than frobbing bits directly. Most of this is in arch code I haven't even compiled, but is straightforward. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis --- arch/x86/kernel/apic.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 3b630ec24935..edda4c00e3d2 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -1903,8 +1903,8 @@ void __cpuinit generic_processor_info(int apicid, int version) } #endif - cpu_set(cpu, cpu_possible_map); - cpu_set(cpu, cpu_present_map); + set_cpu_possible(cpu, true); + set_cpu_present(cpu, true); } #ifdef CONFIG_X86_64 -- cgit v1.2.3 From b2bb85549134c005e997e5a7ed303bda6a1ae738 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 16 Dec 2008 17:34:03 -0800 Subject: x86: Remove cpumask games in x86/kernel/cpu/intel_cacheinfo.c Impact: remove cpumask_t from stack. We should not try to save and restore cpus_allowed on current. We can't use work_on_cpu() here, since it's in the hotplug cpu path (if anyone else tries to get the hotplug lock from a workqueue we could deadlock against them). Fortunately, we can just use smp_call_function_single() since the function can run from an interrupt. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Cc: Ingo Molnar Cc: Oleg Nesterov --- arch/x86/kernel/cpu/intel_cacheinfo.c | 41 ++++++++++++++++------------------- 1 file changed, 19 insertions(+), 22 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 43ea612d3e9d..fb7f946cb65e 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -534,31 +534,16 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) per_cpu(cpuid4_info, cpu) = NULL; } -static int __cpuinit detect_cache_attributes(unsigned int cpu) +static void get_cpu_leaves(void *_retval) { - struct _cpuid4_info *this_leaf; - unsigned long j; - int retval; - cpumask_t oldmask; - - if (num_cache_leaves == 0) - return -ENOENT; - - per_cpu(cpuid4_info, cpu) = kzalloc( - sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); - if (per_cpu(cpuid4_info, cpu) == NULL) - return -ENOMEM; - - oldmask = current->cpus_allowed; - retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); - if (retval) - goto out; + int j, *retval = _retval, cpu = smp_processor_id(); /* Do cpuid and store the results */ for (j = 0; j < num_cache_leaves; j++) { + struct _cpuid4_info *this_leaf; this_leaf = CPUID4_INFO_IDX(cpu, j); - retval = cpuid4_cache_lookup(j, this_leaf); - if (unlikely(retval < 0)) { + *retval = cpuid4_cache_lookup(j, this_leaf); + if (unlikely(*retval < 0)) { int i; for (i = 0; i < j; i++) @@ -567,9 +552,21 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) } cache_shared_cpu_map_setup(cpu, j); } - set_cpus_allowed_ptr(current, &oldmask); +} + +static int __cpuinit detect_cache_attributes(unsigned int cpu) +{ + int retval; + + if (num_cache_leaves == 0) + return -ENOENT; + + per_cpu(cpuid4_info, cpu) = kzalloc( + sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); + if (per_cpu(cpuid4_info, cpu) == NULL) + return -ENOMEM; -out: + smp_call_function_single(cpu, get_cpu_leaves, &retval, true); if (retval) { kfree(per_cpu(cpuid4_info, cpu)); per_cpu(cpuid4_info, cpu) = NULL; -- cgit v1.2.3 From 4cd4601d592d07b26e4b7d2bb8fcd55bbfd6cf6e Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 16 Dec 2008 17:34:04 -0800 Subject: x86: use work_on_cpu in x86/kernel/cpu/mcheck/mce_amd_64.c Impact: Remove cpumask_t's from stack. Simple transition to work_on_cpu(), rather than cpumask games. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Cc: Ingo Molnar Cc: Robert Richter Cc: jacob.shin@amd.com --- arch/x86/kernel/cpu/mcheck/mce_amd_64.c | 108 ++++++++++++++++---------------- 1 file changed, 55 insertions(+), 53 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 5eb390a4b2e9..a1de80f368f1 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c @@ -83,34 +83,41 @@ static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ * CPU Initialization */ +struct thresh_restart { + struct threshold_block *b; + int reset; + u16 old_limit; +}; + /* must be called with correct cpu affinity */ -static void threshold_restart_bank(struct threshold_block *b, - int reset, u16 old_limit) +static long threshold_restart_bank(void *_tr) { + struct thresh_restart *tr = _tr; u32 mci_misc_hi, mci_misc_lo; - rdmsr(b->address, mci_misc_lo, mci_misc_hi); + rdmsr(tr->b->address, mci_misc_lo, mci_misc_hi); - if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) - reset = 1; /* limit cannot be lower than err count */ + if (tr->b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) + tr->reset = 1; /* limit cannot be lower than err count */ - if (reset) { /* reset err count and overflow bit */ + if (tr->reset) { /* reset err count and overflow bit */ mci_misc_hi = (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | - (THRESHOLD_MAX - b->threshold_limit); - } else if (old_limit) { /* change limit w/o reset */ + (THRESHOLD_MAX - tr->b->threshold_limit); + } else if (tr->old_limit) { /* change limit w/o reset */ int new_count = (mci_misc_hi & THRESHOLD_MAX) + - (old_limit - b->threshold_limit); + (tr->old_limit - tr->b->threshold_limit); mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) | (new_count & THRESHOLD_MAX); } - b->interrupt_enable ? + tr->b->interrupt_enable ? (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) : (mci_misc_hi &= ~MASK_INT_TYPE_HI); mci_misc_hi |= MASK_COUNT_EN_HI; - wrmsr(b->address, mci_misc_lo, mci_misc_hi); + wrmsr(tr->b->address, mci_misc_lo, mci_misc_hi); + return 0; } /* cpu init entry point, called from mce.c with preempt off */ @@ -120,6 +127,7 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) unsigned int cpu = smp_processor_id(); u8 lvt_off; u32 low = 0, high = 0, address = 0; + struct thresh_restart tr; for (bank = 0; bank < NR_BANKS; ++bank) { for (block = 0; block < NR_BLOCKS; ++block) { @@ -162,7 +170,10 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) wrmsr(address, low, high); threshold_defaults.address = address; - threshold_restart_bank(&threshold_defaults, 0, 0); + tr.b = &threshold_defaults; + tr.reset = 0; + tr.old_limit = 0; + threshold_restart_bank(&tr); } } } @@ -251,20 +262,6 @@ struct threshold_attr { ssize_t(*store) (struct threshold_block *, const char *, size_t count); }; -static void affinity_set(unsigned int cpu, cpumask_t *oldmask, - cpumask_t *newmask) -{ - *oldmask = current->cpus_allowed; - cpus_clear(*newmask); - cpu_set(cpu, *newmask); - set_cpus_allowed_ptr(current, newmask); -} - -static void affinity_restore(const cpumask_t *oldmask) -{ - set_cpus_allowed_ptr(current, oldmask); -} - #define SHOW_FIELDS(name) \ static ssize_t show_ ## name(struct threshold_block * b, char *buf) \ { \ @@ -277,15 +274,16 @@ static ssize_t store_interrupt_enable(struct threshold_block *b, const char *buf, size_t count) { char *end; - cpumask_t oldmask, newmask; + struct thresh_restart tr; unsigned long new = simple_strtoul(buf, &end, 0); if (end == buf) return -EINVAL; b->interrupt_enable = !!new; - affinity_set(b->cpu, &oldmask, &newmask); - threshold_restart_bank(b, 0, 0); - affinity_restore(&oldmask); + tr.b = b; + tr.reset = 0; + tr.old_limit = 0; + work_on_cpu(b->cpu, threshold_restart_bank, &tr); return end - buf; } @@ -294,8 +292,7 @@ static ssize_t store_threshold_limit(struct threshold_block *b, const char *buf, size_t count) { char *end; - cpumask_t oldmask, newmask; - u16 old; + struct thresh_restart tr; unsigned long new = simple_strtoul(buf, &end, 0); if (end == buf) return -EINVAL; @@ -303,34 +300,36 @@ static ssize_t store_threshold_limit(struct threshold_block *b, new = THRESHOLD_MAX; if (new < 1) new = 1; - old = b->threshold_limit; + tr.old_limit = b->threshold_limit; b->threshold_limit = new; + tr.b = b; + tr.reset = 0; - affinity_set(b->cpu, &oldmask, &newmask); - threshold_restart_bank(b, 0, old); - affinity_restore(&oldmask); + work_on_cpu(b->cpu, threshold_restart_bank, &tr); return end - buf; } -static ssize_t show_error_count(struct threshold_block *b, char *buf) +static long local_error_count(void *_b) { - u32 high, low; - cpumask_t oldmask, newmask; - affinity_set(b->cpu, &oldmask, &newmask); + struct threshold_block *b = _b; + u32 low, high; + rdmsr(b->address, low, high); - affinity_restore(&oldmask); - return sprintf(buf, "%x\n", - (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit)); + return (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit); +} + +static ssize_t show_error_count(struct threshold_block *b, char *buf) +{ + return sprintf(buf, "%lx\n", work_on_cpu(b->cpu, local_error_count, b)); } static ssize_t store_error_count(struct threshold_block *b, const char *buf, size_t count) { - cpumask_t oldmask, newmask; - affinity_set(b->cpu, &oldmask, &newmask); - threshold_restart_bank(b, 1, 0); - affinity_restore(&oldmask); + struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 }; + + work_on_cpu(b->cpu, threshold_restart_bank, &tr); return 1; } @@ -463,12 +462,19 @@ out_free: return err; } +static long local_allocate_threshold_blocks(void *_bank) +{ + unsigned int *bank = _bank; + + return allocate_threshold_blocks(smp_processor_id(), *bank, 0, + MSR_IA32_MC0_MISC + *bank * 4); +} + /* symlinks sibling shared banks to first core. first core owns dir/files. */ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) { int i, err = 0; struct threshold_bank *b = NULL; - cpumask_t oldmask, newmask; char name[32]; sprintf(name, "threshold_bank%i", bank); @@ -519,11 +525,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) per_cpu(threshold_banks, cpu)[bank] = b; - affinity_set(cpu, &oldmask, &newmask); - err = allocate_threshold_blocks(cpu, bank, 0, - MSR_IA32_MC0_MISC + bank * 4); - affinity_restore(&oldmask); - + err = work_on_cpu(cpu, local_allocate_threshold_blocks, &bank); if (err) goto out_free; -- cgit v1.2.3 From e4d98207ea3f3d15eb664282df16d18c4ac86f80 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 16 Dec 2008 17:34:05 -0800 Subject: x86: xen: use smp_call_function_many() Impact: use new API, remove cpumask from stack. Change smp_call_function_mask() callers to smp_call_function_many(). This removes a cpumask from the stack, and falls back should allocating the cpumask var fail (only possible with CONFIG_CPUMASKS_OFFSTACK). Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Cc: jeremy@xensource.com --- arch/x86/xen/mmu.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 636ef4caa52d..e59e53b11e2b 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1079,7 +1079,7 @@ static void drop_other_mm_ref(void *info) static void xen_drop_mm_ref(struct mm_struct *mm) { - cpumask_t mask; + cpumask_var_t mask; unsigned cpu; if (current->active_mm == mm) { @@ -1091,7 +1091,16 @@ static void xen_drop_mm_ref(struct mm_struct *mm) } /* Get the "official" set of cpus referring to our pagetable. */ - mask = mm->cpu_vm_mask; + if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) { + for_each_online_cpu(cpu) { + if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask) + && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) + continue; + smp_call_function_single(cpu, drop_other_mm_ref, mm, 1); + } + return; + } + cpumask_copy(mask, &mm->cpu_vm_mask); /* It's possible that a vcpu may have a stale reference to our cr3, because its in lazy mode, and it hasn't yet flushed @@ -1100,11 +1109,12 @@ static void xen_drop_mm_ref(struct mm_struct *mm) if needed. */ for_each_online_cpu(cpu) { if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) - cpu_set(cpu, mask); + cpumask_set_cpu(cpu, mask); } - if (!cpus_empty(mask)) - smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); + if (!cpumask_empty(mask)) + smp_call_function_many(mask, drop_other_mm_ref, mm, 1); + free_cpumask_var(mask); } #else static void xen_drop_mm_ref(struct mm_struct *mm) -- cgit v1.2.3 From 83b19597f793fd5f91533bda0dc2eb3d21936798 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 16 Dec 2008 17:34:06 -0800 Subject: x86: Introduce topology_core_cpumask()/topology_thread_cpumask() Impact: new API The old topology_core_siblings() and topology_thread_siblings() return a cpumask_t; these new ones return a (const) struct cpumask *. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis --- arch/x86/include/asm/topology.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index ff386ff50ed7..79e31e9dcdda 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -226,6 +226,8 @@ extern cpumask_t cpu_coregroup_map(int cpu); #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) +#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu)) +#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) /* indicates that pointers to the topology cpumask_t maps are valid */ #define arch_provides_topology_pointers yes -- cgit v1.2.3 From d733e00d7c10cc68333fbb88108bb15bb044f61b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 17 Dec 2008 13:35:51 +0100 Subject: x86: update io_apic.c to the new cpumask code Impact: build fix The sparseirq tree crossed with the cpumask changes, fix the fallout. Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 6bd51ce3ce32..58938cc4b7d3 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -347,13 +347,14 @@ void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) } } -static void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask) +static void +set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) { struct irq_cfg *cfg = desc->chip_data; if (!cfg->move_in_progress) { /* it means that domain is not changed */ - if (!cpus_intersects(desc->affinity, mask)) + if (!cpumask_intersects(&desc->affinity, mask)) cfg->move_desc_pending = 1; } } -- cgit v1.2.3 From a775a38b1353161a6d7af86b667d6523c12c1a37 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Wed, 17 Dec 2008 15:21:39 -0800 Subject: x86: fix cpu_mask_to_apicid_and to include cpu_online_mask Impact: fix potential APIC crash In determining the destination apicid, there are usually three cpumasks that are considered: the incoming cpumask arg, cfg->domain and the cpu_online_mask. Since we are just introducing the cpu_mask_to_apicid_and function, make sure it includes the cpu_online_mask in it's evaluation. [Added with this patch.] There are two io_apic.c functions that did not previously use the cpu_online_mask: setup_IO_APIC_irq and msi_compose_msg. Both of these simply used cpu_mask_to_apicid(cfg->domain & TARGET_CPUS), and all but one arch (NUMAQ[*]) returns only online cpus in the TARGET_CPUS mask, so the behavior is identical for all cases. [*: NUMAQ bug?] Note that alloc_cpumask_var is only used for the 32-bit cases where it's highly likely that the cpumask set size will be small and therefore CPUMASK_OFFSTACK=n. But if that's not the case, failing the allocate will cause the same return value as the default. Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bigsmp/apic.h | 4 ++- arch/x86/include/asm/es7000/apic.h | 40 ++++++++++++--------------- arch/x86/include/asm/mach-default/mach_apic.h | 3 +- arch/x86/include/asm/summit/apic.h | 30 +++++++++++--------- arch/x86/kernel/genapic_flat_64.c | 4 ++- arch/x86/kernel/genx2apic_cluster.c | 4 ++- arch/x86/kernel/genx2apic_phys.c | 4 ++- arch/x86/kernel/genx2apic_uv_x.c | 4 ++- 8 files changed, 52 insertions(+), 41 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index 976399debb3f..d8dd9f537911 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h @@ -138,7 +138,9 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - cpu = cpumask_any_and(cpumask, andmask); + for_each_cpu_and(cpu, cpumask, andmask) + if (cpumask_test_cpu(cpu, cpu_online_mask)) + break; if (cpu < nr_cpu_ids) return cpu_to_logical_apicid(cpu); diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index ba8423c5363f..51ac1230294e 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -214,51 +214,47 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) return apicid; } -static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, + +static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, const struct cpumask *andmask) { int num_bits_set; - int num_bits_set2; int cpus_found = 0; int cpu; - int apicid = 0; + int apicid = cpu_to_logical_apicid(0); + cpumask_var_t cpumask; + + if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) + return apicid; + + cpumask_and(cpumask, inmask, andmask); + cpumask_and(cpumask, cpumask, cpu_online_mask); num_bits_set = cpumask_weight(cpumask); - num_bits_set2 = cpumask_weight(andmask); - num_bits_set = min(num_bits_set, num_bits_set2); /* Return id to all */ - if (num_bits_set >= nr_cpu_ids) -#if defined CONFIG_ES7000_CLUSTERED_APIC - return 0xFF; -#else - return cpu_to_logical_apicid(0); -#endif + if (num_bits_set == NR_CPUS) + goto exit; /* * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of TARGET_CPUS. */ - cpu = cpumask_first_and(cpumask, andmask); + cpu = cpumask_first(cpumask); apicid = cpu_to_logical_apicid(cpu); - while (cpus_found < num_bits_set) { - if (cpumask_test_cpu(cpu, cpumask) && - cpumask_test_cpu(cpu, andmask)) { + if (cpumask_test_cpu(cpu, cpumask)) { int new_apicid = cpu_to_logical_apicid(cpu); if (apicid_cluster(apicid) != - apicid_cluster(new_apicid)) { - printk(KERN_WARNING - "%s: Not a valid mask!\n", __func__); -#if defined CONFIG_ES7000_CLUSTERED_APIC - return 0xFF; -#else + apicid_cluster(new_apicid)){ + printk ("%s: Not a valid mask!\n", __func__); return cpu_to_logical_apicid(0); -#endif } apicid = new_apicid; cpus_found++; } cpu++; } +exit: + free_cpumask_var(cpumask); return apicid; } diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 8863d978cb96..cc09cbbee27e 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h @@ -72,8 +72,9 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, { unsigned long mask1 = cpumask_bits(cpumask)[0]; unsigned long mask2 = cpumask_bits(andmask)[0]; + unsigned long mask3 = cpumask_bits(cpu_online_mask)[0]; - return (unsigned int)(mask1 & mask2); + return (unsigned int)(mask1 & mask2 & mask3); } static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 651a93849341..99327d1be49f 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -170,35 +170,37 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) return apicid; } -static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, +static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, const struct cpumask *andmask) { int num_bits_set; - int num_bits_set2; int cpus_found = 0; int cpu; - int apicid = 0; + int apicid = 0xFF; + cpumask_var_t cpumask; + + if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) + return (int) 0xFF; + + cpumask_and(cpumask, inmask, andmask); + cpumask_and(cpumask, cpumask, cpu_online_mask); num_bits_set = cpumask_weight(cpumask); - num_bits_set2 = cpumask_weight(andmask); - num_bits_set = min(num_bits_set, num_bits_set2); /* Return id to all */ - if (num_bits_set >= nr_cpu_ids) - return 0xFF; + if (num_bits_set == nr_cpu_ids) + goto exit; /* * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of TARGET_CPUS. */ - cpu = cpumask_first_and(cpumask, andmask); + cpu = cpumask_first(cpumask); apicid = cpu_to_logical_apicid(cpu); while (cpus_found < num_bits_set) { - if (cpumask_test_cpu(cpu, cpumask) - && cpumask_test_cpu(cpu, andmask)) { + if (cpumask_test_cpu(cpu, cpumask)) { int new_apicid = cpu_to_logical_apicid(cpu); if (apicid_cluster(apicid) != - apicid_cluster(new_apicid)) { - printk(KERN_WARNING - "%s: Not a valid mask!\n", __func__); + apicid_cluster(new_apicid)){ + printk ("%s: Not a valid mask!\n", __func__); return 0xFF; } apicid = apicid | new_apicid; @@ -206,6 +208,8 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, } cpu++; } +exit: + free_cpumask_var(cpumask); return apicid; } diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index 7fa5f49c2dda..34185488e4fb 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c @@ -276,7 +276,9 @@ physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - cpu = cpumask_any_and(cpumask, andmask); + for_each_cpu_and(cpu, cpumask, andmask) + if (cpumask_test_cpu(cpu, cpu_online_mask)) + break; if (cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_apicid, cpu); return BAD_APICID; diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index 4716a0c9f936..d451c9b9fdff 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -133,7 +133,9 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - cpu = cpumask_any_and(cpumask, andmask); + for_each_cpu_and(cpu, cpumask, andmask) + if (cpumask_test_cpu(cpu, cpu_online_mask)) + break; if (cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_apicid, cpu); return BAD_APICID; diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index b255507884f2..62895cf315ff 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -132,7 +132,9 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - cpu = cpumask_any_and(cpumask, andmask); + for_each_cpu_and(cpu, cpumask, andmask) + if (cpumask_test_cpu(cpu, cpu_online_mask)) + break; if (cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_apicid, cpu); return BAD_APICID; diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 3984682cd849..0e88be11227d 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -188,7 +188,9 @@ static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ - cpu = cpumask_any_and(cpumask, andmask); + for_each_cpu_and(cpu, cpumask, andmask) + if (cpumask_test_cpu(cpu, cpu_online_mask)) + break; if (cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_apicid, cpu); return BAD_APICID; -- cgit v1.2.3 From 3b11ce7f542e415c90267b4482d4611410b468e6 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Wed, 17 Dec 2008 15:21:39 -0800 Subject: x86: use possible_cpus=NUM to extend the possible cpus allowed Impact: add new boot parameter Use possible_cpus=NUM kernel parameter to extend the number of possible cpus. The ability to HOTPLUG ON cpus that are "possible" but not "present" is dealt with in a later patch. Signed-off-by: Mike Travis --- Documentation/cpu-hotplug.txt | 17 +++++++++-------- arch/x86/kernel/apic.c | 20 ++++++++++++-------- arch/x86/kernel/smpboot.c | 25 +++++++++++++++++++++---- 3 files changed, 42 insertions(+), 20 deletions(-) (limited to 'arch/x86') diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt index 94bbc27ddd4f..9d620c153b04 100644 --- a/Documentation/cpu-hotplug.txt +++ b/Documentation/cpu-hotplug.txt @@ -50,16 +50,17 @@ additional_cpus=n (*) Use this to limit hotpluggable cpus. This option sets cpu_possible_map = cpu_present_map + additional_cpus (*) Option valid only for following architectures -- x86_64, ia64 +- ia64 -ia64 and x86_64 use the number of disabled local apics in ACPI tables MADT -to determine the number of potentially hot-pluggable cpus. The implementation -should only rely on this to count the # of cpus, but *MUST* not rely on the -apicid values in those tables for disabled apics. In the event BIOS doesn't -mark such hot-pluggable cpus as disabled entries, one could use this -parameter "additional_cpus=x" to represent those cpus in the cpu_possible_map. +ia64 uses the number of disabled local apics in ACPI tables MADT to +determine the number of potentially hot-pluggable cpus. The implementation +should only rely on this to count the # of cpus, but *MUST* not rely +on the apicid values in those tables for disabled apics. In the event +BIOS doesn't mark such hot-pluggable cpus as disabled entries, one could +use this parameter "additional_cpus=x" to represent those cpus in the +cpu_possible_map. -possible_cpus=n [s390 only] use this to set hotpluggable cpus. +possible_cpus=n [s390,x86_64] use this to set hotpluggable cpus. This option sets possible_cpus bits in cpu_possible_map. Thus keeping the numbers of bits set constant even if the machine gets rebooted. diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 93cf2d13f335..f7a32a3beb2f 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -1819,28 +1819,32 @@ void disconnect_bsp_APIC(int virt_wire_setup) void __cpuinit generic_processor_info(int apicid, int version) { int cpu; - cpumask_t tmp_map; /* * Validate version */ if (version == 0x0) { pr_warning("BIOS bug, APIC version is 0 for CPU#%d! " - "fixing up to 0x10. (tell your hw vendor)\n", - version); + "fixing up to 0x10. (tell your hw vendor)\n", + version); version = 0x10; } apic_version[apicid] = version; - if (num_processors >= NR_CPUS) { - pr_warning("WARNING: NR_CPUS limit of %i reached." - " Processor ignored.\n", NR_CPUS); + if (num_processors >= nr_cpu_ids) { + int max = nr_cpu_ids; + int thiscpu = max + disabled_cpus; + + pr_warning( + "ACPI: NR_CPUS/possible_cpus limit of %i reached." + " Processor %d/0x%x ignored.\n", max, thiscpu, apicid); + + disabled_cpus++; return; } num_processors++; - cpus_complement(tmp_map, cpu_present_map); - cpu = first_cpu(tmp_map); + cpu = cpumask_next_zero(-1, cpu_present_mask); physid_set(apicid, phys_cpu_present_map); if (apicid == boot_cpu_physical_apicid) { diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index be9466788043..1a9941b11150 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1252,6 +1252,15 @@ void __init native_smp_cpus_done(unsigned int max_cpus) check_nmi_watchdog(); } +static int __initdata setup_possible_cpus = -1; +static int __init _setup_possible_cpus(char *str) +{ + get_option(&str, &setup_possible_cpus); + return 0; +} +early_param("possible_cpus", _setup_possible_cpus); + + /* * cpu_possible_map should be static, it cannot change as cpu's * are onlined, or offlined. The reason is per-cpu data-structures @@ -1264,7 +1273,7 @@ void __init native_smp_cpus_done(unsigned int max_cpus) * * Three ways to find out the number of additional hotplug CPUs: * - If the BIOS specified disabled CPUs in ACPI/mptables use that. - * - The user can overwrite it with additional_cpus=NUM + * - The user can overwrite it with possible_cpus=NUM * - Otherwise don't reserve additional CPUs. * We do this because additional CPUs waste a lot of memory. * -AK @@ -1277,9 +1286,17 @@ __init void prefill_possible_map(void) if (!num_processors) num_processors = 1; - possible = num_processors + disabled_cpus; - if (possible > NR_CPUS) - possible = NR_CPUS; + if (setup_possible_cpus == -1) + possible = num_processors + disabled_cpus; + else + possible = setup_possible_cpus; + + if (possible > CONFIG_NR_CPUS) { + printk(KERN_WARNING + "%d Processors exceeds NR_CPUS limit of %d\n", + possible, CONFIG_NR_CPUS); + possible = CONFIG_NR_CPUS; + } printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", possible, max_t(int, possible - num_processors, 0)); -- cgit v1.2.3 From a7883dece6ef82097e6bdf19c1d0a20351e06056 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 19 Dec 2008 00:59:09 +0100 Subject: x86: fix warning in arch/x86/kernel/io_apic.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit this warning: arch/x86/kernel/io_apic.c: In function ‘ir_set_msi_irq_affinity’: arch/x86/kernel/io_apic.c:3373: warning: ‘cfg’ may be used uninitialized in this function triggers because the variable was truly uninitialized. We'd crash on entering this code. Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 58938cc4b7d3..908c1d00a5c7 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -3360,7 +3360,7 @@ static void ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) { struct irq_desc *desc = irq_to_desc(irq); - struct irq_cfg *cfg; + struct irq_cfg *cfg = desc->chip_data; unsigned int dest; struct irte irte; -- cgit v1.2.3 From b69edc76539be6a4aa39a22f85365fd4a3b3b9d2 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 31 Oct 2008 01:02:41 +0100 Subject: x86 hibernate: Mark ACPI NVS memory region at startup Introduce new initcall for marking the ACPI NVS memory at startup, so that it can be saved/restored during hibernation/resume. Based on a patch by Zhang Rui. Signed-off-by: Rafael J. Wysocki Signed-off-by: Len Brown --- arch/x86/kernel/e820.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 7aafeb5263ef..74c6a21fdc8c 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -665,6 +665,27 @@ void __init e820_mark_nosave_regions(unsigned long limit_pfn) } #endif +#ifdef CONFIG_HIBERNATION +/** + * Mark ACPI NVS memory region, so that we can save/restore it during + * hibernation and the subsequent resume. + */ +static int __init e820_mark_nvs_memory(void) +{ + int i; + + for (i = 0; i < e820.nr_map; i++) { + struct e820entry *ei = &e820.map[i]; + + if (ei->type == E820_NVS) + hibernate_nvs_register(ei->addr, ei->size); + } + + return 0; +} +core_initcall(e820_mark_nvs_memory); +#endif + /* * Early reserved memory areas. */ -- cgit v1.2.3 From ba84ed9546e91348fdf3ff2bff859b0ee53b407a Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sun, 26 Oct 2008 20:56:30 +0100 Subject: ACPI hibernate: Introduce new kernel parameter acpi_sleep=s4_nonvs On some machines it may be necessary to disable the saving/restoring of the ACPI NVS memory region during hibernation/resume. For this purpose, introduce new ACPI kernel command line option acpi_sleep=s4_nonvs. Based on a patch by Zhang Rui. Signed-off-by: Rafael J. Wysocki Acked-by: Nigel Cunningham Acked-by: Pavel Machek Signed-off-by: Len Brown --- Documentation/kernel-parameters.txt | 5 ++++- arch/x86/kernel/acpi/sleep.c | 2 ++ drivers/acpi/sleep/main.c | 18 ++++++++++++++++-- include/linux/acpi.h | 1 + 4 files changed, 23 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index e0f346d201ed..1d089eeff3cf 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -149,7 +149,8 @@ and is between 256 and 4096 characters. It is defined in the file default: 0 acpi_sleep= [HW,ACPI] Sleep options - Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig, old_ordering } + Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig, + old_ordering, s4_nonvs } See Documentation/power/video.txt for s3_bios and s3_mode. s3_beep is for debugging; it makes the PC's speaker beep as soon as the kernel's real-mode entry point is called. @@ -159,6 +160,8 @@ and is between 256 and 4096 characters. It is defined in the file control method, wrt putting devices into low power states, to be enforced (the ACPI 2.0 ordering of _PTS is used by default). + s4_nonvs prevents the kernel from saving/restoring the + ACPI NVS memory during hibernation. acpi_sci= [HW,ACPI] ACPI System Control Interrupt trigger mode Format: { level | edge | high | low } diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 806b4e9051b4..707c1f6f95fa 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -159,6 +159,8 @@ static int __init acpi_sleep_setup(char *str) #endif if (strncmp(str, "old_ordering", 12) == 0) acpi_old_suspend_ordering(); + if (strncmp(str, "s4_nonvs", 8) == 0) + acpi_s4_no_nvs(); str = strchr(str, ','); if (str != NULL) str += strspn(str, ", \t"); diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c index 45a8015e4217..bef41fd4c877 100644 --- a/drivers/acpi/sleep/main.c +++ b/drivers/acpi/sleep/main.c @@ -101,6 +101,19 @@ void __init acpi_old_suspend_ordering(void) * cases. */ static bool set_sci_en_on_resume; +/* + * The ACPI specification wants us to save NVS memory regions during hibernation + * and to restore them during the subsequent resume. However, it is not certain + * if this mechanism is going to work on all machines, so we allow the user to + * disable this mechanism using the 'acpi_sleep=s4_nonvs' kernel command line + * option. + */ +static bool s4_no_nvs; + +void __init acpi_s4_no_nvs(void) +{ + s4_no_nvs = true; +} /** * acpi_pm_disable_gpes - Disable the GPEs. @@ -396,7 +409,7 @@ static int acpi_hibernation_begin(void) { int error; - error = hibernate_nvs_alloc(); + error = s4_no_nvs ? 0 : hibernate_nvs_alloc(); if (!error) { acpi_target_sleep_state = ACPI_STATE_S4; acpi_sleep_tts_switch(acpi_target_sleep_state); @@ -494,7 +507,8 @@ static int acpi_hibernation_begin_old(void) error = acpi_sleep_prepare(ACPI_STATE_S4); if (!error) { - error = hibernate_nvs_alloc(); + if (!s4_no_nvs) + error = hibernate_nvs_alloc(); if (!error) acpi_target_sleep_state = ACPI_STATE_S4; } diff --git a/include/linux/acpi.h b/include/linux/acpi.h index fba8051fb297..dfa0a5356c53 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -270,6 +270,7 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n, #ifdef CONFIG_PM_SLEEP void __init acpi_no_s4_hw_signature(void); void __init acpi_old_suspend_ordering(void); +void __init acpi_s4_no_nvs(void); #endif /* CONFIG_PM_SLEEP */ #else /* CONFIG_ACPI */ -- cgit v1.2.3 From 7b37b5fd9ba32c0c5afc3537eed7e7466f2173e2 Mon Sep 17 00:00:00 2001 From: Len Brown Date: Tue, 23 Dec 2008 01:47:42 -0500 Subject: ACPI: disable MPS when NO APIC-table found When ACPI is asked to find an MADT (APIC table) and fails, then ACPI expects to run in PIC mode. However, if an MP Table is was found, IRQs will be registered as if an IOAPIC is being used, even though ACPI is configuring interrupt links links for PIC mode. In this scenario, disable MPS so that IRQs are registered in PIC mode, consistent with ACPI. http://bugzilla.kernel.org/show_bug.cgi?id=12257 Signed-off-by: Len Brown --- arch/x86/kernel/acpi/boot.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 4c51a2f8fd31..de8ce79dd881 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1359,6 +1359,17 @@ static void __init acpi_process_madt(void) "Invalid BIOS MADT, disabling ACPI\n"); disable_acpi(); } + } else { + /* + * ACPI found no MADT, and so ACPI wants UP PIC mode. + * In the event an MPS table was found, forget it. + * Boot with "acpi=off" to use MPS on such a system. + */ + if (smp_found_config) { + printk(KERN_WARNING PREFIX + "No APIC-table, disabling MPS\n"); + smp_found_config = 0; + } } #endif return; -- cgit v1.2.3 From b77b881f21b29aa7efa668fde69ee3dc0372ae3f Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Fri, 19 Dec 2008 15:23:44 -0800 Subject: x86: fix lguest used_vectors breakage, -v2 Impact: fix lguest, clean up 32-bit lguest used used_vectors to record vectors, but that model of allocating vectors changed and got broken, after we changed vector allocation to a per_cpu array. Try enable that for 64bit, and the array is used for all vectors that are not managed by vector_irq per_cpu array. Also kill system_vectors[], that is now a duplication of the used_vectors bitmap. [ merged in cpus4096 due to io_apic.c cpumask changes. ] [ -v2, fix build failure ] Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar Signed-off-by: Ingo Molnar --- arch/x86/include/asm/desc.h | 10 ++++------ arch/x86/include/asm/irq.h | 1 + arch/x86/kernel/apic.c | 2 -- arch/x86/kernel/io_apic.c | 9 +++------ arch/x86/kernel/irqinit_32.c | 16 +++++++++++++++- arch/x86/kernel/irqinit_64.c | 13 +++++++++++++ arch/x86/kernel/traps.c | 12 +++++++----- drivers/lguest/interrupts_and_traps.c | 13 +++++++++---- 8 files changed, 52 insertions(+), 24 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index e6b82b17b072..dc27705f5443 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -320,16 +320,14 @@ static inline void set_intr_gate(unsigned int n, void *addr) _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); } -#define SYS_VECTOR_FREE 0 -#define SYS_VECTOR_ALLOCED 1 - extern int first_system_vector; -extern char system_vectors[]; +/* used_vectors is BITMAP for irq is not managed by percpu vector_irq */ +extern unsigned long used_vectors[]; static inline void alloc_system_vector(int vector) { - if (system_vectors[vector] == SYS_VECTOR_FREE) { - system_vectors[vector] = SYS_VECTOR_ALLOCED; + if (!test_bit(vector, used_vectors)) { + set_bit(vector, used_vectors); if (first_system_vector > vector) first_system_vector = vector; } else diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index 8766d30fb746..4bb732e45a85 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h @@ -46,5 +46,6 @@ extern void native_init_IRQ(void); /* Interrupt vector management */ extern DECLARE_BITMAP(used_vectors, NR_VECTORS); +extern int vector_used_by_percpu_irq(unsigned int vector); #endif /* _ASM_X86_IRQ_H */ diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index f7a32a3beb2f..b9019271af62 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -118,8 +118,6 @@ EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); int first_system_vector = 0xfe; -char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE}; - /* * Debug level, exported for io_apic.c */ diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 908c1d00a5c7..1cbf7c8d46e0 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -1326,13 +1326,10 @@ next: } if (unlikely(current_vector == vector)) continue; -#ifdef CONFIG_X86_64 - if (vector == IA32_SYSCALL_VECTOR) - goto next; -#else - if (vector == SYSCALL_VECTOR) + + if (test_bit(vector, used_vectors)) goto next; -#endif + for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) if (per_cpu(vector_irq, new_cpu)[vector] != -1) goto next; diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index 6a92f47c52e7..61aa2a1004b5 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c @@ -110,6 +110,18 @@ DEFINE_PER_CPU(vector_irq_t, vector_irq) = { [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 }; +int vector_used_by_percpu_irq(unsigned int vector) +{ + int cpu; + + for_each_online_cpu(cpu) { + if (per_cpu(vector_irq, cpu)[vector] != -1) + return 1; + } + + return 0; +} + /* Overridden in paravirt.c */ void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); @@ -146,10 +158,12 @@ void __init native_init_IRQ(void) alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); /* IPI for single call function */ - set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt); + alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, + call_function_single_interrupt); /* Low priority IPI to cleanup after moving an irq */ set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); + set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); #endif #ifdef CONFIG_X86_LOCAL_APIC diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index 40c1e62ec785..1020919efe1c 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c @@ -135,6 +135,18 @@ DEFINE_PER_CPU(vector_irq_t, vector_irq) = { [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 }; +int vector_used_by_percpu_irq(unsigned int vector) +{ + int cpu; + + for_each_online_cpu(cpu) { + if (per_cpu(vector_irq, cpu)[vector] != -1) + return 1; + } + + return 0; +} + void __init init_ISA_irqs(void) { int i; @@ -187,6 +199,7 @@ static void __init smp_intr_init(void) /* Low priority IPI to cleanup after moving an irq */ set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); + set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); #endif } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 04d242ab0161..4a6dff39a470 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -72,9 +72,6 @@ #include "cpu/mcheck/mce.h" -DECLARE_BITMAP(used_vectors, NR_VECTORS); -EXPORT_SYMBOL_GPL(used_vectors); - asmlinkage int system_call(void); /* Do we ignore FPU interrupts ? */ @@ -89,6 +86,9 @@ gate_desc idt_table[256] __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; #endif +DECLARE_BITMAP(used_vectors, NR_VECTORS); +EXPORT_SYMBOL_GPL(used_vectors); + static int ignore_nmis; static inline void conditional_sti(struct pt_regs *regs) @@ -949,9 +949,7 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) void __init trap_init(void) { -#ifdef CONFIG_X86_32 int i; -#endif #ifdef CONFIG_EISA void __iomem *p = early_ioremap(0x0FFFD9, 4); @@ -1008,11 +1006,15 @@ void __init trap_init(void) } set_system_trap_gate(SYSCALL_VECTOR, &system_call); +#endif /* Reserve all the builtin and the syscall vector: */ for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) set_bit(i, used_vectors); +#ifdef CONFIG_X86_64 + set_bit(IA32_SYSCALL_VECTOR, used_vectors); +#else set_bit(SYSCALL_VECTOR, used_vectors); #endif /* diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c index a1039068f95c..415fab0125ac 100644 --- a/drivers/lguest/interrupts_and_traps.c +++ b/drivers/lguest/interrupts_and_traps.c @@ -222,11 +222,16 @@ bool check_syscall_vector(struct lguest *lg) int init_interrupts(void) { /* If they want some strange system call vector, reserve it now */ - if (syscall_vector != SYSCALL_VECTOR - && test_and_set_bit(syscall_vector, used_vectors)) { - printk("lg: couldn't reserve syscall %u\n", syscall_vector); - return -EBUSY; + if (syscall_vector != SYSCALL_VECTOR) { + if (test_bit(syscall_vector, used_vectors) || + vector_used_by_percpu_irq(syscall_vector)) { + printk(KERN_ERR "lg: couldn't reserve syscall %u\n", + syscall_vector); + return -EBUSY; + } + set_bit(syscall_vector, used_vectors); } + return 0; } -- cgit v1.2.3 From 7d87d5365556b1c6e8c00abcc632c3ad1fdc58b8 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Mon, 22 Dec 2008 17:33:28 -0800 Subject: x86: use logical apicid in x2apic_cluster's x2apic_cpu_mask_to_apicid_and() These commits: commit 95d313cf1c1ecedc8bec5727b09bdacbf67dfc45 Author: Mike Travis Date: Tue Dec 16 17:33:54 2008 -0800 x86: Add cpu_mask_to_apicid_and and commit 6eeb7c5a99434596c5953a95baa17d2f085664e3 Author: Mike Travis Date: Tue Dec 16 17:33:55 2008 -0800 x86: update add-cpu_mask_to_apicid_and to use struct cpumask* broke interrupt delivery on x2apic platforms. As x2apic cluster mode uses logical delivery mode, we need to use logical apicid instead of physical apicid in x2apic_cpu_mask_to_apicid_and() Impact: fixes the broken interrupt delivery issue on generic x2apic platforms. Signed-off-by: Suresh Siddha Acked-by: Mike Travis Signed-off-by: H. Peter Anvin Signed-off-by: Ingo Molnar --- arch/x86/kernel/genx2apic_cluster.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index d451c9b9fdff..6ce497cc372d 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c @@ -114,7 +114,7 @@ static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) int cpu; /* - * We're using fixed IRQ delivery, can only return one phys APIC ID. + * We're using fixed IRQ delivery, can only return one logical APIC ID. * May as well be the first. */ cpu = cpumask_first(cpumask); @@ -130,14 +130,14 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, int cpu; /* - * We're using fixed IRQ delivery, can only return one phys APIC ID. + * We're using fixed IRQ delivery, can only return one logical APIC ID. * May as well be the first. */ for_each_cpu_and(cpu, cpumask, andmask) if (cpumask_test_cpu(cpu, cpu_online_mask)) break; if (cpu < nr_cpu_ids) - return per_cpu(x86_cpu_to_apicid, cpu); + return per_cpu(x86_cpu_to_logical_apicid, cpu); return BAD_APICID; } -- cgit v1.2.3 From c3d80000e3a812fe5a200d6bde755fbd7fa65481 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 23 Dec 2008 15:15:17 +0100 Subject: x86: export vector_used_by_percpu_irq Impact: build fix lguest can be built as a module and makes use of this new symbol: ERROR: "vector_used_by_percpu_irq" [drivers/lguest/lg.ko] undefined! export it. Signed-off-by: Ingo Molnar --- arch/x86/kernel/irq.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 3f1d9d18df67..bce53e1352a0 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -9,6 +9,7 @@ #include #include #include +#include atomic_t irq_err_count; @@ -190,3 +191,5 @@ u64 arch_irq_stat(void) #endif return sum; } + +EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); -- cgit v1.2.3 From a73ad3331fdbf4191cf99b83ea9ac7082b6757ba Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Thu, 25 Dec 2008 10:39:01 -0800 Subject: x86: unify the implementation of FPU traps On 32 bits, we may suffer IRQ 13, or supposedly we might have a buggy implementation which gives spurious trap 16. We did not check for this on 64 bits, but there is no reason we can't make the code the same in both cases. Furthermore, this is presumably rare, so do the spurious check last, instead of first. Signed-off-by: H. Peter Anvin --- arch/x86/kernel/traps.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index f37cee75ab58..f5a640ba04bc 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -689,12 +689,7 @@ void math_error(void __user *ip) cwd = get_fpu_cwd(task); swd = get_fpu_swd(task); - err = swd & ~cwd & 0x3f; - -#ifdef CONFIG_X86_32 - if (!err) - return; -#endif + err = swd & ~cwd; if (err & 0x001) { /* Invalid op */ /* @@ -712,7 +707,9 @@ void math_error(void __user *ip) } else if (err & 0x020) { /* Precision */ info.si_code = FPE_FLTRES; } else { - info.si_code = __SI_FAULT|SI_KERNEL; /* WTF? */ + /* If we're using IRQ 13, or supposedly even some trap 16 + implementations, it's possible we get a spurious trap... */ + return; /* Spurious trap, no error */ } force_sig_info(SIGFPE, &info, task); } -- cgit v1.2.3 From 71ab6b245fda6e7597a667a67cce0d26c3c7a14b Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 25 Dec 2008 17:18:43 +1030 Subject: x86: remove impossible test in mtrr/main.c Impact: cleanup enable_mtrr_cleanup is static, and is never set to anything but 0 or 1. Signed-off-by: Rusty Russell Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/mtrr/main.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index c78c04821ea1..acd9ac54d47f 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -823,16 +823,14 @@ static int enable_mtrr_cleanup __initdata = static int __init disable_mtrr_cleanup_setup(char *str) { - if (enable_mtrr_cleanup != -1) - enable_mtrr_cleanup = 0; + enable_mtrr_cleanup = 0; return 0; } early_param("disable_mtrr_cleanup", disable_mtrr_cleanup_setup); static int __init enable_mtrr_cleanup_setup(char *str) { - if (enable_mtrr_cleanup != -1) - enable_mtrr_cleanup = 1; + enable_mtrr_cleanup = 1; return 0; } early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup); -- cgit v1.2.3 From bd8b96dfc216eebc72950a6c40da8d3eca8667df Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 26 Dec 2008 09:20:22 +0100 Subject: x86: clean up comment style in arch/x86/kernel/traps.c Impact: cleanup Signed-off-by: Ingo Molnar --- arch/x86/kernel/traps.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index f5a640ba04bc..dbfb80289928 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -292,8 +292,10 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) tsk->thread.error_code = error_code; tsk->thread.trap_no = 8; - /* This is always a kernel trap and never fixable (and thus must - never return). */ + /* + * This is always a kernel trap and never fixable (and thus must + * never return). + */ for (;;) die(str, regs, error_code); } @@ -524,9 +526,11 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) } #ifdef CONFIG_X86_64 -/* Help handler running on IST stack to switch back to user stack - for scheduling or signal handling. The actual stack switch is done in - entry.S */ +/* + * Help handler running on IST stack to switch back to user stack + * for scheduling or signal handling. The actual stack switch is done in + * entry.S + */ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) { struct pt_regs *regs = eregs; @@ -536,8 +540,10 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) /* Exception from user space */ else if (user_mode(eregs)) regs = task_pt_regs(current); - /* Exception from kernel and interrupts are enabled. Move to - kernel process stack. */ + /* + * Exception from kernel and interrupts are enabled. Move to + * kernel process stack. + */ else if (eregs->flags & X86_EFLAGS_IF) regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); if (eregs != regs) @@ -707,8 +713,10 @@ void math_error(void __user *ip) } else if (err & 0x020) { /* Precision */ info.si_code = FPE_FLTRES; } else { - /* If we're using IRQ 13, or supposedly even some trap 16 - implementations, it's possible we get a spurious trap... */ + /* + * If we're using IRQ 13, or supposedly even some trap 16 + * implementations, it's possible we get a spurious trap... + */ return; /* Spurious trap, no error */ } force_sig_info(SIGFPE, &info, task); -- cgit v1.2.3 From 18eefedfe8ad33e8fc7614c13359e29a9fab4644 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Fri, 26 Dec 2008 12:29:48 +0900 Subject: irq: simplify for_each_irq_desc() usage Impact: cleanup all for_each_irq_desc() usage point have !desc check. then its check can move into for_each_irq_desc() macro. Signed-off-by: KOSAKI Motohiro Acked-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic.c | 10 ---------- drivers/xen/events.c | 3 --- include/linux/irqnr.h | 8 ++++++-- kernel/irq/autoprobe.c | 15 --------------- kernel/irq/handle.c | 3 --- kernel/irq/spurious.c | 5 ----- 6 files changed, 6 insertions(+), 38 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index a74887b416cc..2fe543f58ac8 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -1345,8 +1345,6 @@ void __setup_vector_irq(int cpu) /* Mark the inuse vectors */ for_each_irq_desc(irq, desc) { - if (!desc) - continue; cfg = desc->chip_data; if (!cpu_isset(cpu, cfg->domain)) continue; @@ -1730,8 +1728,6 @@ __apicdebuginit(void) print_IO_APIC(void) for_each_irq_desc(irq, desc) { struct irq_pin_list *entry; - if (!desc) - continue; cfg = desc->chip_data; entry = cfg->irq_2_pin; if (!entry) @@ -2378,9 +2374,6 @@ static void ir_irq_migration(struct work_struct *work) struct irq_desc *desc; for_each_irq_desc(irq, desc) { - if (!desc) - continue; - if (desc->status & IRQ_MOVE_PENDING) { unsigned long flags; @@ -2671,9 +2664,6 @@ static inline void init_IO_APIC_traps(void) * 0x80, because int 0x80 is hm, kind of importantish. ;) */ for_each_irq_desc(irq, desc) { - if (!desc) - continue; - cfg = desc->chip_data; if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { /* diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 46625cd38743..e26733a9df21 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -142,9 +142,6 @@ static void init_evtchn_cpu_bindings(void) /* By default all event channels notify CPU#0. */ for_each_irq_desc(i, desc) { - if (!desc) - continue; - desc->affinity = cpumask_of_cpu(0); } #endif diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index c4a59c7a478b..5504a5c97836 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h @@ -22,10 +22,14 @@ extern struct irq_desc *irq_to_desc(unsigned int irq); # define for_each_irq_desc(irq, desc) \ for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ - irq++, desc = irq_to_desc(irq)) + irq++, desc = irq_to_desc(irq)) \ + if (desc) + + # define for_each_irq_desc_reverse(irq, desc) \ for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; \ - irq--, desc = irq_to_desc(irq)) + irq--, desc = irq_to_desc(irq)) \ + if (desc) #endif /* CONFIG_GENERIC_HARDIRQS */ diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index 650ce4102a63..cc0f7321b8ce 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c @@ -40,9 +40,6 @@ unsigned long probe_irq_on(void) * flush such a longstanding irq before considering it as spurious. */ for_each_irq_desc_reverse(i, desc) { - if (!desc) - continue; - spin_lock_irq(&desc->lock); if (!desc->action && !(desc->status & IRQ_NOPROBE)) { /* @@ -71,9 +68,6 @@ unsigned long probe_irq_on(void) * happened in the previous stage, it may have masked itself) */ for_each_irq_desc_reverse(i, desc) { - if (!desc) - continue; - spin_lock_irq(&desc->lock); if (!desc->action && !(desc->status & IRQ_NOPROBE)) { desc->status |= IRQ_AUTODETECT | IRQ_WAITING; @@ -92,9 +86,6 @@ unsigned long probe_irq_on(void) * Now filter out any obviously spurious interrupts */ for_each_irq_desc(i, desc) { - if (!desc) - continue; - spin_lock_irq(&desc->lock); status = desc->status; @@ -133,9 +124,6 @@ unsigned int probe_irq_mask(unsigned long val) int i; for_each_irq_desc(i, desc) { - if (!desc) - continue; - spin_lock_irq(&desc->lock); status = desc->status; @@ -178,9 +166,6 @@ int probe_irq_off(unsigned long val) unsigned int status; for_each_irq_desc(i, desc) { - if (!desc) - continue; - spin_lock_irq(&desc->lock); status = desc->status; diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 03479dfdebb8..7dbdfe524693 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -437,9 +437,6 @@ void early_init_irq_lock_class(void) int i; for_each_irq_desc(i, desc) { - if (!desc) - continue; - lockdep_set_class(&desc->lock, &irq_desc_lock_class); } } diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 3738107531fd..dd364c11e56e 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -91,9 +91,6 @@ static int misrouted_irq(int irq) int i, ok = 0; for_each_irq_desc(i, desc) { - if (!desc) - continue; - if (!i) continue; @@ -115,8 +112,6 @@ static void poll_spurious_irqs(unsigned long dummy) for_each_irq_desc(i, desc) { unsigned int status; - if (!desc) - continue; if (!i) continue; -- cgit v1.2.3 From 393d68fb9929817cde7ab31c82d66fcb28ad35fc Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 26 Dec 2008 22:23:38 +1030 Subject: cpumask: x86: Introduce cpumask_of_{node,pcibus} to replace {node,pcibus}_to_cpumask Impact: New APIs The old node_to_cpumask/node_to_pcibus returned a cpumask_t: these return a pointer to a struct cpumask. Part of removing cpumasks from the stack. Also makes __pcibus_to_node take a const pointer. Signed-off-by: Rusty Russell Acked-by: Ingo Molnar --- arch/x86/include/asm/pci.h | 10 ++++++++-- arch/x86/include/asm/topology.h | 35 +++++++++++++++++++++++------------ arch/x86/kernel/setup_percpu.c | 8 ++++---- 3 files changed, 35 insertions(+), 18 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index 875b38edf193..52d80d3d94f3 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h @@ -98,9 +98,9 @@ static inline void early_quirks(void) { } #ifdef CONFIG_NUMA /* Returns the node based on pci bus */ -static inline int __pcibus_to_node(struct pci_bus *bus) +static inline int __pcibus_to_node(const struct pci_bus *bus) { - struct pci_sysdata *sd = bus->sysdata; + const struct pci_sysdata *sd = bus->sysdata; return sd->node; } @@ -109,6 +109,12 @@ static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus) { return node_to_cpumask(__pcibus_to_node(bus)); } + +static inline const struct cpumask * +cpumask_of_pcibus(const struct pci_bus *bus) +{ + return cpumask_of_node(__pcibus_to_node(bus)); +} #endif #endif /* _ASM_X86_PCI_H */ diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index ff386ff50ed7..45da5dc50fc8 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -61,13 +61,19 @@ static inline int cpu_to_node(int cpu) * * Side note: this function creates the returned cpumask on the stack * so with a high NR_CPUS count, excessive stack space is used. The - * node_to_cpumask_ptr function should be used whenever possible. + * cpumask_of_node function should be used whenever possible. */ static inline cpumask_t node_to_cpumask(int node) { return node_to_cpumask_map[node]; } +/* Returns a bitmask of CPUs on Node 'node'. */ +static inline const struct cpumask *cpumask_of_node(int node) +{ + return &node_to_cpumask_map[node]; +} + #else /* CONFIG_X86_64 */ /* Mappings between node number and cpus on that node. */ @@ -82,7 +88,7 @@ DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); #ifdef CONFIG_DEBUG_PER_CPU_MAPS extern int cpu_to_node(int cpu); extern int early_cpu_to_node(int cpu); -extern const cpumask_t *_node_to_cpumask_ptr(int node); +extern const cpumask_t *cpumask_of_node(int node); extern cpumask_t node_to_cpumask(int node); #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ @@ -103,7 +109,7 @@ static inline int early_cpu_to_node(int cpu) } /* Returns a pointer to the cpumask of CPUs on Node 'node'. */ -static inline const cpumask_t *_node_to_cpumask_ptr(int node) +static inline const cpumask_t *cpumask_of_node(int node) { return &node_to_cpumask_map[node]; } @@ -116,12 +122,15 @@ static inline cpumask_t node_to_cpumask(int node) #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ -/* Replace default node_to_cpumask_ptr with optimized version */ +/* + * Replace default node_to_cpumask_ptr with optimized version + * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" + */ #define node_to_cpumask_ptr(v, node) \ - const cpumask_t *v = _node_to_cpumask_ptr(node) + const cpumask_t *v = cpumask_of_node(node) #define node_to_cpumask_ptr_next(v, node) \ - v = _node_to_cpumask_ptr(node) + v = cpumask_of_node(node) #endif /* CONFIG_X86_64 */ @@ -187,7 +196,7 @@ extern int __node_distance(int, int); #define cpu_to_node(cpu) 0 #define early_cpu_to_node(cpu) 0 -static inline const cpumask_t *_node_to_cpumask_ptr(int node) +static inline const cpumask_t *cpumask_of_node(int node) { return &cpu_online_map; } @@ -200,12 +209,15 @@ static inline int node_to_first_cpu(int node) return first_cpu(cpu_online_map); } -/* Replace default node_to_cpumask_ptr with optimized version */ +/* + * Replace default node_to_cpumask_ptr with optimized version + * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" + */ #define node_to_cpumask_ptr(v, node) \ - const cpumask_t *v = _node_to_cpumask_ptr(node) + const cpumask_t *v = cpumask_of_node(node) #define node_to_cpumask_ptr_next(v, node) \ - v = _node_to_cpumask_ptr(node) + v = cpumask_of_node(node) #endif #include @@ -214,8 +226,7 @@ static inline int node_to_first_cpu(int node) /* Returns the number of the first CPU on Node 'node'. */ static inline int node_to_first_cpu(int node) { - node_to_cpumask_ptr(mask, node); - return first_cpu(*mask); + return cpumask_first(cpumask_of_node(node)); } #endif diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 1c2084291f97..8e8b1193add5 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -334,25 +334,25 @@ static const cpumask_t cpu_mask_none; /* * Returns a pointer to the bitmask of CPUs on Node 'node'. */ -const cpumask_t *_node_to_cpumask_ptr(int node) +const cpumask_t *cpumask_of_node(int node) { if (node_to_cpumask_map == NULL) { printk(KERN_WARNING - "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n", + "cpumask_of_node(%d): no node_to_cpumask_map!\n", node); dump_stack(); return (const cpumask_t *)&cpu_online_map; } if (node >= nr_node_ids) { printk(KERN_WARNING - "_node_to_cpumask_ptr(%d): node > nr_node_ids(%d)\n", + "cpumask_of_node(%d): node > nr_node_ids(%d)\n", node, nr_node_ids); dump_stack(); return &cpu_mask_none; } return &node_to_cpumask_map[node]; } -EXPORT_SYMBOL(_node_to_cpumask_ptr); +EXPORT_SYMBOL(cpumask_of_node); /* * Returns a bitmask of CPUs on Node 'node'. -- cgit v1.2.3 From 030bb203e01db12e3f2866799f4f03a114d06349 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 26 Dec 2008 22:23:41 +1030 Subject: cpumask: cpu_coregroup_mask(): x86 Impact: New API Like cpu_coregroup_map, but returns a (const) pointer. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Cc: Ingo Molnar --- arch/x86/include/asm/topology.h | 1 + arch/x86/kernel/smpboot.c | 11 ++++++++--- 2 files changed, 9 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 45da5dc50fc8..168203c0c316 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -231,6 +231,7 @@ static inline int node_to_first_cpu(int node) #endif extern cpumask_t cpu_coregroup_map(int cpu); +extern const struct cpumask *cpu_coregroup_mask(int cpu); #ifdef ENABLE_TOPO_DEFINES #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 468c2f9d47ae..d5274b6b088e 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -497,7 +497,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) } /* maps the cpu to the sched domain representing multi-core */ -cpumask_t cpu_coregroup_map(int cpu) +const struct cpumask *cpu_coregroup_mask(int cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); /* @@ -505,9 +505,14 @@ cpumask_t cpu_coregroup_map(int cpu) * And for power savings, we return cpu_core_map */ if (sched_mc_power_savings || sched_smt_power_savings) - return per_cpu(cpu_core_map, cpu); + return &per_cpu(cpu_core_map, cpu); else - return c->llc_shared_map; + return &c->llc_shared_map; +} + +cpumask_t cpu_coregroup_map(int cpu) +{ + return *cpu_coregroup_mask(cpu); } static void impress_friends(void) -- cgit v1.2.3 From 0b936bfdeb85784b7df132b2c64fb34ad9b11ffa Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Date: Tue, 23 Dec 2008 21:51:28 +0530 Subject: x86: reboot.c declare port_cf9_safe before they get used Impact: cleanup, avoid sparse warning Include "../pci/pci.h" for port_cf9_safe Fixes this sparse warning: arch/x86/kernel/reboot.c:43:6: warning: symbol 'port_cf9_safe' was not declared. Should it be static? Signed-off-by: Jaswinder Singh Signed-off-by: Ingo Molnar --- arch/x86/kernel/reboot.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 61f718df6eec..b165eb0884ed 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -22,6 +22,7 @@ #endif #include +#include "../pci/pci.h" /* -- cgit v1.2.3 From b6b301aa9fba57b114c3a00f5f43abf672bd4ecd Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Date: Tue, 23 Dec 2008 21:52:33 +0530 Subject: x86: apic.c x2apic_preenabled and disable_x2apic should be static Impact: cleanup, reduce kernel size a bit, avoid sparse warning Fixes sparse warning: arch/x86/kernel/apic.c:103:5: warning: symbol 'disable_x2apic' was not declared. Should it be static? Signed-off-by: Jaswinder Singh Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apic.h | 2 +- arch/x86/kernel/apic.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 25caa0738af5..e644bf6f90dc 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -93,7 +93,7 @@ static inline u32 native_apic_msr_read(u32 reg) } #ifndef CONFIG_X86_32 -extern int x2apic, x2apic_preenabled; +extern int x2apic; extern void check_x2apic(void); extern void enable_x2apic(void); extern void enable_IR_x2apic(void); diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 7397911f8478..3a961bd9f619 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -97,8 +97,8 @@ __setup("apicpmtimer", setup_apicpmtimer); #ifdef HAVE_X2APIC int x2apic; /* x2apic enabled before OS handover */ -int x2apic_preenabled; -int disable_x2apic; +static int x2apic_preenabled; +static int disable_x2apic; static __init int setup_nox2apic(char *str) { disable_x2apic = 1; -- cgit v1.2.3 From 13a0c3c269b223f60abfac8a9811d77111a8b4ba Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Fri, 26 Dec 2008 02:05:47 -0800 Subject: sparseirq: work around compiler optimizing away __weak functions Impact: fix panic on null pointer with sparseirq Some GCC versions seem to inline the weak global function, when that function is empty. Work it around, by making the functions return a (dummy) integer. Signed-off-by: Yinghai Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic.c | 8 ++++++-- include/linux/irq.h | 6 +++--- init/main.c | 7 ++++--- kernel/irq/handle.c | 7 ++++--- 4 files changed, 17 insertions(+), 11 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 2fe543f58ac8..976039377846 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -170,7 +170,7 @@ static struct irq_cfg irq_cfgx[NR_IRQS] = { [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, }; -void __init arch_early_irq_init(void) +int __init arch_early_irq_init(void) { struct irq_cfg *cfg; struct irq_desc *desc; @@ -184,6 +184,8 @@ void __init arch_early_irq_init(void) desc = irq_to_desc(i); desc->chip_data = &cfg[i]; } + + return 0; } #ifdef CONFIG_SPARSE_IRQ @@ -212,7 +214,7 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu) return cfg; } -void arch_init_chip_data(struct irq_desc *desc, int cpu) +int arch_init_chip_data(struct irq_desc *desc, int cpu) { struct irq_cfg *cfg; @@ -224,6 +226,8 @@ void arch_init_chip_data(struct irq_desc *desc, int cpu) BUG_ON(1); } } + + return 0; } #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC diff --git a/include/linux/irq.h b/include/linux/irq.h index 69da275c0ebd..0e40af4bac40 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -193,9 +193,9 @@ struct irq_desc { const char *name; } ____cacheline_internodealigned_in_smp; -extern void early_irq_init(void); -extern void arch_early_irq_init(void); -extern void arch_init_chip_data(struct irq_desc *desc, int cpu); +extern int early_irq_init(void); +extern int arch_early_irq_init(void); +extern int arch_init_chip_data(struct irq_desc *desc, int cpu); extern void arch_init_copy_chip_data(struct irq_desc *old_desc, struct irq_desc *desc, int cpu); extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); diff --git a/init/main.c b/init/main.c index c1f999a3cf31..c314aa15370e 100644 --- a/init/main.c +++ b/init/main.c @@ -539,13 +539,14 @@ void __init __weak thread_info_cache_init(void) { } -void __init __weak arch_early_irq_init(void) +int __init __weak arch_early_irq_init(void) { + return 0; } -void __init __weak early_irq_init(void) +int __init __weak early_irq_init(void) { - arch_early_irq_init(); + return arch_early_irq_init(); } asmlinkage void __init start_kernel(void) diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 893da67b7781..0bef3ecb7a0e 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -86,8 +86,9 @@ void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) desc->kstat_irqs = (unsigned int *)ptr; } -void __attribute__((weak)) arch_init_chip_data(struct irq_desc *desc, int cpu) +int __weak arch_init_chip_data(struct irq_desc *desc, int cpu) { + return 0; } static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) @@ -132,7 +133,7 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm /* FIXME: use bootmem alloc ...*/ static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS]; -void __init early_irq_init(void) +int __init early_irq_init(void) { struct irq_desc *desc; int legacy_count; @@ -151,7 +152,7 @@ void __init early_irq_init(void) for (i = legacy_count; i < NR_IRQS; i++) irq_desc_ptrs[i] = NULL; - arch_early_irq_init(); + return arch_early_irq_init(); } struct irq_desc *irq_to_desc(unsigned int irq) -- cgit v1.2.3 From 70a7d3cc1308a55104fbe505d76f2aca8a4cf53e Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Mon, 22 Dec 2008 10:26:05 -0800 Subject: swiotlb: add hwdev to swiotlb_phys_to_bus() / swiotlb_sg_to_bus() Impact: extend functions with a (yet unused) parameter, update callsites Some architectures need it - in preparation for highmem swiotlb. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/kernel/pci-swiotlb_64.c | 2 +- include/linux/swiotlb.h | 3 ++- lib/swiotlb.c | 53 +++++++++++++++++----------------------- 3 files changed, 25 insertions(+), 33 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c index 242c3440687f..6cf8a816dc29 100644 --- a/arch/x86/kernel/pci-swiotlb_64.c +++ b/arch/x86/kernel/pci-swiotlb_64.c @@ -23,7 +23,7 @@ void *swiotlb_alloc(unsigned order, unsigned long nslabs) return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); } -dma_addr_t swiotlb_phys_to_bus(phys_addr_t paddr) +dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) { return paddr; } diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 325af1de0351..dedd3c0cfe30 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -27,7 +27,8 @@ swiotlb_init(void); extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs); extern void *swiotlb_alloc(unsigned order, unsigned long nslabs); -extern dma_addr_t swiotlb_phys_to_bus(phys_addr_t address); +extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, + phys_addr_t address); extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address); extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size); diff --git a/lib/swiotlb.c b/lib/swiotlb.c index fa2dc4e5f9ba..3657da8ebbc3 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -126,7 +126,7 @@ void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs) return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); } -dma_addr_t __weak swiotlb_phys_to_bus(phys_addr_t paddr) +dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) { return paddr; } @@ -136,9 +136,10 @@ phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr) return baddr; } -static dma_addr_t swiotlb_virt_to_bus(volatile void *address) +static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, + volatile void *address) { - return swiotlb_phys_to_bus(virt_to_phys(address)); + return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); } static void *swiotlb_bus_to_virt(dma_addr_t address) @@ -151,35 +152,23 @@ int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size) return 0; } -static dma_addr_t swiotlb_sg_to_bus(struct scatterlist *sg) +static dma_addr_t swiotlb_sg_to_bus(struct device *hwdev, struct scatterlist *sg) { - return swiotlb_phys_to_bus(page_to_phys(sg_page(sg)) + sg->offset); + return swiotlb_phys_to_bus(hwdev, page_to_phys(sg_page(sg)) + sg->offset); } static void swiotlb_print_info(unsigned long bytes) { phys_addr_t pstart, pend; - dma_addr_t bstart, bend; pstart = virt_to_phys(io_tlb_start); pend = virt_to_phys(io_tlb_end); - bstart = swiotlb_phys_to_bus(pstart); - bend = swiotlb_phys_to_bus(pend); - printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n", bytes >> 20, io_tlb_start, io_tlb_end); - if (pstart != bstart || pend != bend) - printk(KERN_INFO "software IO TLB at phys %#llx - %#llx" - " bus %#llx - %#llx\n", - (unsigned long long)pstart, - (unsigned long long)pend, - (unsigned long long)bstart, - (unsigned long long)bend); - else - printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n", - (unsigned long long)pstart, - (unsigned long long)pend); + printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n", + (unsigned long long)pstart, + (unsigned long long)pend); } /* @@ -406,7 +395,7 @@ map_single(struct device *hwdev, struct swiotlb_phys_addr buffer, size_t size, i struct swiotlb_phys_addr slot_buf; mask = dma_get_seg_boundary(hwdev); - start_dma_addr = swiotlb_virt_to_bus(io_tlb_start) & mask; + start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask; offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; @@ -585,7 +574,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_mask = hwdev->coherent_dma_mask; ret = (void *)__get_free_pages(flags, order); - if (ret && !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(ret), size)) { + if (ret && + !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret), + size)) { /* * The allocated memory isn't reachable by the device. * Fall back on swiotlb_map_single(). @@ -609,7 +600,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, } memset(ret, 0, size); - dev_addr = swiotlb_virt_to_bus(ret); + dev_addr = swiotlb_virt_to_bus(hwdev, ret); /* Confirm address can be DMA'd by device */ if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { @@ -669,7 +660,7 @@ dma_addr_t swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, int dir, struct dma_attrs *attrs) { - dma_addr_t dev_addr = swiotlb_virt_to_bus(ptr); + dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, ptr); void *map; struct swiotlb_phys_addr buffer; @@ -694,7 +685,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, map = io_tlb_overflow_buffer; } - dev_addr = swiotlb_virt_to_bus(map); + dev_addr = swiotlb_virt_to_bus(hwdev, map); /* * Ensure that the address returned is DMA'ble @@ -840,7 +831,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) { - dev_addr = swiotlb_sg_to_bus(sg); + dev_addr = swiotlb_sg_to_bus(hwdev, sg); if (range_needs_mapping(sg_virt(sg), sg->length) || address_needs_mapping(hwdev, dev_addr, sg->length)) { void *map; @@ -856,7 +847,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, sgl[0].dma_length = 0; return 0; } - sg->dma_address = swiotlb_virt_to_bus(map); + sg->dma_address = swiotlb_virt_to_bus(hwdev, map); } else sg->dma_address = dev_addr; sg->dma_length = sg->length; @@ -886,7 +877,7 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) { - if (sg->dma_address != swiotlb_sg_to_bus(sg)) + if (sg->dma_address != swiotlb_sg_to_bus(hwdev, sg)) unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), sg->dma_length, dir); else if (dir == DMA_FROM_DEVICE) @@ -919,7 +910,7 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) { - if (sg->dma_address != swiotlb_sg_to_bus(sg)) + if (sg->dma_address != swiotlb_sg_to_bus(hwdev, sg)) sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), sg->dma_length, dir, target); else if (dir == DMA_FROM_DEVICE) @@ -944,7 +935,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) { - return (dma_addr == swiotlb_virt_to_bus(io_tlb_overflow_buffer)); + return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer)); } /* @@ -956,7 +947,7 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) int swiotlb_dma_supported(struct device *hwdev, u64 mask) { - return swiotlb_virt_to_bus(io_tlb_end - 1) <= mask; + return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask; } EXPORT_SYMBOL(swiotlb_map_single); -- cgit v1.2.3 From 1da4f9894c243a9c58c505fd8f3e6cc0709a8825 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Sun, 28 Dec 2008 15:02:05 +0900 Subject: swiotlb: replace architecture-specific swiotlb.h with linux/swiotlb.h Impact: cleanup This replaces architecture-specific swiotlb.h (X86 and IA64) with linux/swiotlb.h. Signed-off-by: FUJITA Tomonori Signed-off-by: Ingo Molnar --- arch/ia64/include/asm/swiotlb.h | 39 +-------------------------------------- arch/x86/include/asm/swiotlb.h | 38 +------------------------------------- 2 files changed, 2 insertions(+), 75 deletions(-) (limited to 'arch/x86') diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h index fb79423834d0..dcbaea7ce128 100644 --- a/arch/ia64/include/asm/swiotlb.h +++ b/arch/ia64/include/asm/swiotlb.h @@ -2,44 +2,7 @@ #define ASM_IA64__SWIOTLB_H #include - -/* SWIOTLB interface */ - -extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, - size_t size, int dir); -extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags); -extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir); -extern void swiotlb_sync_single_for_cpu(struct device *hwdev, - dma_addr_t dev_addr, - size_t size, int dir); -extern void swiotlb_sync_single_for_device(struct device *hwdev, - dma_addr_t dev_addr, - size_t size, int dir); -extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev, - dma_addr_t dev_addr, - unsigned long offset, - size_t size, int dir); -extern void swiotlb_sync_single_range_for_device(struct device *hwdev, - dma_addr_t dev_addr, - unsigned long offset, - size_t size, int dir); -extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, - struct scatterlist *sg, int nelems, - int dir); -extern void swiotlb_sync_sg_for_device(struct device *hwdev, - struct scatterlist *sg, int nelems, - int dir); -extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, - int nents, int direction); -extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, - int nents, int direction); -extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); -extern void swiotlb_free_coherent(struct device *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle); -extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); -extern void swiotlb_init(void); +#include extern int swiotlb_force; diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h index 51fb2c76ad74..b9e4e20174fb 100644 --- a/arch/x86/include/asm/swiotlb.h +++ b/arch/x86/include/asm/swiotlb.h @@ -1,46 +1,10 @@ #ifndef _ASM_X86_SWIOTLB_H #define _ASM_X86_SWIOTLB_H -#include +#include /* SWIOTLB interface */ -extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, - size_t size, int dir); -extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags); -extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir); -extern void swiotlb_sync_single_for_cpu(struct device *hwdev, - dma_addr_t dev_addr, - size_t size, int dir); -extern void swiotlb_sync_single_for_device(struct device *hwdev, - dma_addr_t dev_addr, - size_t size, int dir); -extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev, - dma_addr_t dev_addr, - unsigned long offset, - size_t size, int dir); -extern void swiotlb_sync_single_range_for_device(struct device *hwdev, - dma_addr_t dev_addr, - unsigned long offset, - size_t size, int dir); -extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, - struct scatterlist *sg, int nelems, - int dir); -extern void swiotlb_sync_sg_for_device(struct device *hwdev, - struct scatterlist *sg, int nelems, - int dir); -extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, - int nents, int direction); -extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, - int nents, int direction); -extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); -extern void swiotlb_free_coherent(struct device *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle); -extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); -extern void swiotlb_init(void); - extern int swiotlb_force; #ifdef CONFIG_SWIOTLB -- cgit v1.2.3 From 6092848a2a23b660150a38bc06f59d75838d70c8 Mon Sep 17 00:00:00 2001 From: Sergio Luis Date: Sun, 28 Dec 2008 04:12:26 -0300 Subject: x86: mark get_cpu_leaves() with __cpuinit annotation Impact: fix section mismatch warning Commit b2bb85549134c005e997e5a7ed303bda6a1ae738 ("x86: Remove cpumask games in x86/kernel/cpu/intel_cacheinfo.c") introduced get_cpu_leaves(), which references __cpuinit cpuid4_cache_lookup(). Mark get_cpu_leaves() with a __cpuinit annotation. Signed-off-by: Sergio Luis Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/intel_cacheinfo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index fb7f946cb65e..7bd00a565672 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -534,7 +534,7 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) per_cpu(cpuid4_info, cpu) = NULL; } -static void get_cpu_leaves(void *_retval) +static void __cpuinit get_cpu_leaves(void *_retval) { int j, *retval = _retval, cpu = smp_processor_id(); -- cgit v1.2.3 From c805b7300ed20ec4f10ea385988d6d3fa935b26c Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Sat, 27 Dec 2008 17:10:18 +0300 Subject: x86: mach-default setup.c cleanups Impact: cleanup - Break long lines into shorter form. - Use pr_ macros instead of plain printk. Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/mach-default/setup.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c index 37b9ae4d44c5..df167f265622 100644 --- a/arch/x86/mach-default/setup.c +++ b/arch/x86/mach-default/setup.c @@ -133,29 +133,28 @@ void __init time_init_hook(void) **/ void mca_nmi_hook(void) { - /* If I recall correctly, there's a whole bunch of other things that + /* + * If I recall correctly, there's a whole bunch of other things that * we can do to check for NMI problems, but that's all I know about * at the moment. */ - - printk("NMI generated from unknown source!\n"); + pr_warning("NMI generated from unknown source!\n"); } #endif static __init int no_ipi_broadcast(char *str) { get_option(&str, &no_broadcast); - printk ("Using %s mode\n", no_broadcast ? "No IPI Broadcast" : - "IPI Broadcast"); + pr_info("Using %s mode\n", + no_broadcast ? "No IPI Broadcast" : "IPI Broadcast"); return 1; } - __setup("no_ipi_broadcast=", no_ipi_broadcast); static int __init print_ipi_mode(void) { - printk ("Using IPI %s mode\n", no_broadcast ? "No-Shortcut" : - "Shortcut"); + pr_info("Using IPI %s mode\n", + no_broadcast ? "No-Shortcut" : "Shortcut"); return 0; } -- cgit v1.2.3 From 2f06de0671096e19350c9efe21cfdbc0891aab20 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 27 Dec 2008 21:37:10 +0530 Subject: x86: introducing asm/sys_ia32.h Impact: cleanup, avoid 44 sparse warnings, new file asm/sys_ia32.h Fixes following sparse warnings: CHECK arch/x86/ia32/sys_ia32.c arch/x86/ia32/sys_ia32.c:53:17: warning: symbol 'sys32_truncate64' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:60:17: warning: symbol 'sys32_ftruncate64' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:98:17: warning: symbol 'sys32_stat64' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:109:17: warning: symbol 'sys32_lstat64' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:119:17: warning: symbol 'sys32_fstat64' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:128:17: warning: symbol 'sys32_fstatat' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:164:17: warning: symbol 'sys32_mmap' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:195:17: warning: symbol 'sys32_mprotect' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:201:17: warning: symbol 'sys32_pipe' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:215:17: warning: symbol 'sys32_rt_sigaction' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:291:17: warning: symbol 'sys32_sigaction' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:330:17: warning: symbol 'sys32_rt_sigprocmask' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:370:17: warning: symbol 'sys32_alarm' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:383:17: warning: symbol 'sys32_old_select' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:393:17: warning: symbol 'sys32_waitpid' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:401:17: warning: symbol 'sys32_sysfs' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:406:17: warning: symbol 'sys32_sched_rr_get_interval' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:421:17: warning: symbol 'sys32_rt_sigpending' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:445:17: warning: symbol 'sys32_rt_sigqueueinfo' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:472:17: warning: symbol 'sys32_sysctl' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:517:17: warning: symbol 'sys32_pread' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:524:17: warning: symbol 'sys32_pwrite' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:532:17: warning: symbol 'sys32_personality' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:545:17: warning: symbol 'sys32_sendfile' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:565:17: warning: symbol 'sys32_mmap2' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:589:17: warning: symbol 'sys32_olduname' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:626:6: warning: symbol 'sys32_uname' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:641:6: warning: symbol 'sys32_ustat' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:663:17: warning: symbol 'sys32_execve' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:678:17: warning: symbol 'sys32_clone' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:693:6: warning: symbol 'sys32_lseek' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:698:6: warning: symbol 'sys32_kill' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:703:6: warning: symbol 'sys32_fadvise64_64' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:712:6: warning: symbol 'sys32_vm86_warning' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:726:6: warning: symbol 'sys32_lookup_dcookie' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:732:20: warning: symbol 'sys32_readahead' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:738:17: warning: symbol 'sys32_sync_file_range' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:746:17: warning: symbol 'sys32_fadvise64' was not declared. Should it be static? arch/x86/ia32/sys_ia32.c:753:17: warning: symbol 'sys32_fallocate' was not declared. Should it be static? CHECK arch/x86/ia32/ia32_signal.c arch/x86/ia32/ia32_signal.c:126:17: warning: symbol 'sys32_sigsuspend' was not declared. Should it be static? arch/x86/ia32/ia32_signal.c:141:17: warning: symbol 'sys32_sigaltstack' was not declared. Should it be static? arch/x86/ia32/ia32_signal.c:249:17: warning: symbol 'sys32_sigreturn' was not declared. Should it be static? arch/x86/ia32/ia32_signal.c:279:17: warning: symbol 'sys32_rt_sigreturn' was not declared. Should it be static? CHECK arch/x86/ia32/ipc32.c arch/x86/ia32/ipc32.c:12:17: warning: symbol 'sys32_ipc' was not declared. Should it be static? Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/ia32/ia32_signal.c | 3 +- arch/x86/ia32/ipc32.c | 1 + arch/x86/ia32/sys_ia32.c | 2 +- arch/x86/include/asm/sys_ia32.h | 101 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 104 insertions(+), 3 deletions(-) create mode 100644 arch/x86/include/asm/sys_ia32.h (limited to 'arch/x86') diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index b195f85526e3..9dabd00e9805 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -24,15 +24,14 @@ #include #include #include -#include #include #include #include #include #include #include - #include +#include #define DEBUG_SIG 0 diff --git a/arch/x86/ia32/ipc32.c b/arch/x86/ia32/ipc32.c index d21991ce606c..29cdcd02ead3 100644 --- a/arch/x86/ia32/ipc32.c +++ b/arch/x86/ia32/ipc32.c @@ -8,6 +8,7 @@ #include #include #include +#include asmlinkage long sys32_ipc(u32 call, int first, int second, int third, compat_uptr_t ptr, u32 fifth) diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index 2e09dcd3c0a6..6c0d7f6231af 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c @@ -44,8 +44,8 @@ #include #include #include -#include #include +#include #define AA(__x) ((unsigned long)(__x)) diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h new file mode 100644 index 000000000000..ffb08be2a530 --- /dev/null +++ b/arch/x86/include/asm/sys_ia32.h @@ -0,0 +1,101 @@ +/* + * sys_ia32.h - Linux ia32 syscall interfaces + * + * Copyright (c) 2008 Jaswinder Singh Rajput + * + * This file is released under the GPLv2. + * See the file COPYING for more details. + */ + +#ifndef _ASM_X86_SYS_IA32_H +#define _ASM_X86_SYS_IA32_H + +#include +#include +#include +#include +#include +#include + +/* ia32/sys_ia32.c */ +asmlinkage long sys32_truncate64(char __user *, unsigned long, unsigned long); +asmlinkage long sys32_ftruncate64(unsigned int, unsigned long, unsigned long); + +asmlinkage long sys32_stat64(char __user *, struct stat64 __user *); +asmlinkage long sys32_lstat64(char __user *, struct stat64 __user *); +asmlinkage long sys32_fstat64(unsigned int, struct stat64 __user *); +asmlinkage long sys32_fstatat(unsigned int, char __user *, + struct stat64 __user *, int); +struct mmap_arg_struct; +asmlinkage long sys32_mmap(struct mmap_arg_struct __user *); +asmlinkage long sys32_mprotect(unsigned long, size_t, unsigned long); + +asmlinkage long sys32_pipe(int __user *); +struct sigaction32; +struct old_sigaction32; +asmlinkage long sys32_rt_sigaction(int, struct sigaction32 __user *, + struct sigaction32 __user *, unsigned int); +asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *, + struct old_sigaction32 __user *); +asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *, + compat_sigset_t __user *, unsigned int); +asmlinkage long sys32_alarm(unsigned int); + +struct sel_arg_struct; +asmlinkage long sys32_old_select(struct sel_arg_struct __user *); +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int); +asmlinkage long sys32_sysfs(int, u32, u32); + +asmlinkage long sys32_sched_rr_get_interval(compat_pid_t, + struct compat_timespec __user *); +asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *, compat_size_t); +asmlinkage long sys32_rt_sigqueueinfo(int, int, compat_siginfo_t __user *); + +#ifdef CONFIG_SYSCTL_SYSCALL +struct sysctl_ia32; +asmlinkage long sys32_sysctl(struct sysctl_ia32 __user *); +#endif + +asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32); +asmlinkage long sys32_pwrite(unsigned int, char __user *, u32, u32, u32); + +asmlinkage long sys32_personality(unsigned long); +asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); + +asmlinkage long sys32_mmap2(unsigned long, unsigned long, unsigned long, + unsigned long, unsigned long, unsigned long); + +struct oldold_utsname; +struct old_utsname; +asmlinkage long sys32_olduname(struct oldold_utsname __user *); +long sys32_uname(struct old_utsname __user *); + +long sys32_ustat(unsigned, struct ustat32 __user *); + +asmlinkage long sys32_execve(char __user *, compat_uptr_t __user *, + compat_uptr_t __user *, struct pt_regs *); +asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *); + +long sys32_lseek(unsigned int, int, unsigned int); +long sys32_kill(int, int); +long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int); +long sys32_vm86_warning(void); +long sys32_lookup_dcookie(u32, u32, char __user *, size_t); + +asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t); +asmlinkage long sys32_sync_file_range(int, unsigned, unsigned, + unsigned, unsigned, int); +asmlinkage long sys32_fadvise64(int, unsigned, unsigned, size_t, int); +asmlinkage long sys32_fallocate(int, int, unsigned, + unsigned, unsigned, unsigned); + +/* ia32/ia32_signal.c */ +asmlinkage long sys32_sigsuspend(int, int, old_sigset_t); +asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *, + stack_ia32_t __user *, struct pt_regs *); +asmlinkage long sys32_sigreturn(struct pt_regs *); +asmlinkage long sys32_rt_sigreturn(struct pt_regs *); + +/* ia32/ipc32.c */ +asmlinkage long sys32_ipc(u32, int, int, int, compat_uptr_t, u32); +#endif /* _ASM_X86_SYS_IA32_H */ -- cgit v1.2.3 From 83bd9243956f30d91851b272988a237999b35b10 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Mon, 15 Dec 2008 15:09:50 +0100 Subject: x86/oprofile: fix pci_dev use count for AMD northbridge devices This patch fixes the PCI device use count for AMD northbridge devices. In case of an IBS LVT initialization failure, the PCI device is released now by calling pci_dev_put(). If there are no initialization errors, the devices are released in pci_get_device() while iterating. Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_amd.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86') diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 98658f25f542..c5b5c7fb3ced 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -409,6 +409,7 @@ static int init_ibs_nmi(void) | IBSCTL_LVTOFFSETVAL); pci_read_config_dword(cpu_cfg, IBSCTL, &value); if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) { + pci_dev_put(cpu_cfg); printk(KERN_DEBUG "Failed to setup IBS LVT offset, " "IBSCTL = 0x%08x", value); return 1; -- cgit v1.2.3 From a1ae299dfb6ef219b296b61d1f222732391973b5 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Mon, 29 Dec 2008 20:32:52 +0530 Subject: x86: apic.c declare pic_mode before they get used Impact: cleanup, avoid sparse warning In asm/mpspec.h moved out pic_mode from CONFIG_X86_32 as it is common for both 32 and 64 bit. Fixes this sparse warning for x86_64: arch/x86/kernel/apic.c:128:5: warning: symbol 'pic_mode' was not declared. Should it be static? Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mpspec.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 91885c28f66b..62d14ce3cd00 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h @@ -6,13 +6,13 @@ #include extern int apic_version[MAX_APICS]; +extern int pic_mode; #ifdef CONFIG_X86_32 #include extern unsigned int def_to_bigsmp; extern u8 apicid_2_node[]; -extern int pic_mode; #ifdef CONFIG_X86_NUMAQ extern int mp_bus_id_to_node[MAX_MP_BUSSES]; -- cgit v1.2.3 From 7f3e632f9d8d234819bcdef7a68fc8b84f7d3d3d Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Mon, 29 Dec 2008 20:34:35 +0530 Subject: x86: io_apic.c io_apic_sync should be static Impact: cleanup, reduce kernel size a bit, avoid sparse warning Fixes sparse warning: arch/x86/kernel/io_apic.c:709:6: warning: symbol 'io_apic_sync' was not declared. Should it be static? Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 679e7bbbbcd6..b8c8a8e99341 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -481,7 +481,7 @@ static void __unmask_IO_APIC_irq(unsigned int irq) } #ifdef CONFIG_X86_64 -void io_apic_sync(struct irq_pin_list *entry) +static void io_apic_sync(struct irq_pin_list *entry) { /* * Synchronize the IO-APIC and the CPU by doing -- cgit v1.2.3 From cbafbc826bf645f7fbbfbb2ff20138e5ccb4700e Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Mon, 29 Dec 2008 20:36:40 +0530 Subject: x86: efi.c declare add_efi_memmap before they get used Impact: cleanup, avoid sparse warning Fixes this sparse warning: arch/x86/kernel/efi.c:67:5: warning: symbol 'add_efi_memmap' was not declared. Should it be static? Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/efi.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index a2e545c91c35..ca5ffb2856b6 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -90,6 +90,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size); #endif /* CONFIG_X86_32 */ +extern int add_efi_memmap; extern void efi_reserve_early(void); extern void efi_call_phys_prelog(void); extern void efi_call_phys_epilog(void); -- cgit v1.2.3 From c854c91979e0717c619bc55e124d41d60d5eb3d6 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Mon, 29 Dec 2008 20:38:09 +0530 Subject: x86_64: pci-gart_64.c iommu_fullflush should be static Impact: cleanup, reduce kernel size a bit, avoid sparse warning Fixes sparse warning: arch/x86/kernel/pci-gart_64.c:55:5: warning: symbol 'iommu_fullflush' was not declared. Should it be static? Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/pci-gart_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index a35eaa379ff6..00c2bcd41463 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -52,7 +52,7 @@ static u32 *iommu_gatt_base; /* Remapping table */ * to trigger bugs with some popular PCI cards, in particular 3ware (but * has been also also seen with Qlogic at least). */ -int iommu_fullflush = 1; +static int iommu_fullflush = 1; /* Allocation bitmap for the remapping area: */ static DEFINE_SPINLOCK(iommu_bitmap_lock); -- cgit v1.2.3 From 824877111cd7f2b4fd2fe6947c5c5cbbb3ac5bd8 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 27 Dec 2008 18:32:28 +0530 Subject: x86, pci: move arch/x86/pci/pci.h to arch/x86/include/asm/pci_x86.h Impact: cleanup Now that arch/x86/pci/pci.h is used in a number of other places as well, move the lowlevel x86 pci definitions into the architecture include files. (not to be confused with the existing arch/x86/include/asm/pci.h file, which provides public details about x86 PCI) Tested on: X86_32_UP, X86_32_SMP and X86_64_SMP Signed-off-by: Jaswinder Singh Rajput Acked-by: Jesse Barnes Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pci_x86.h | 165 +++++++++++++++++++++++++++++++++++++ arch/x86/kernel/mmconf-fam10h_64.c | 3 +- arch/x86/kernel/reboot.c | 3 +- arch/x86/pci/acpi.c | 2 +- arch/x86/pci/amd_bus.c | 2 +- arch/x86/pci/common.c | 3 +- arch/x86/pci/direct.c | 2 +- arch/x86/pci/early.c | 2 +- arch/x86/pci/fixup.c | 3 +- arch/x86/pci/i386.c | 2 +- arch/x86/pci/init.c | 2 +- arch/x86/pci/irq.c | 3 +- arch/x86/pci/legacy.c | 2 +- arch/x86/pci/mmconfig-shared.c | 3 +- arch/x86/pci/mmconfig_32.c | 2 +- arch/x86/pci/mmconfig_64.c | 3 +- arch/x86/pci/numaq_32.c | 2 +- arch/x86/pci/olpc.c | 2 +- arch/x86/pci/pcbios.c | 5 +- arch/x86/pci/pci.h | 162 ------------------------------------ arch/x86/pci/visws.c | 3 +- drivers/pci/hotplug/cpqphp_core.c | 2 +- drivers/pci/hotplug/cpqphp_pci.c | 2 +- drivers/pci/hotplug/ibmphp_core.c | 2 +- 24 files changed, 188 insertions(+), 194 deletions(-) create mode 100644 arch/x86/include/asm/pci_x86.h delete mode 100644 arch/x86/pci/pci.h (limited to 'arch/x86') diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h new file mode 100644 index 000000000000..e60fd3e14bdf --- /dev/null +++ b/arch/x86/include/asm/pci_x86.h @@ -0,0 +1,165 @@ +/* + * Low-Level PCI Access for i386 machines. + * + * (c) 1999 Martin Mares + */ + +#undef DEBUG + +#ifdef DEBUG +#define DBG(x...) printk(x) +#else +#define DBG(x...) +#endif + +#define PCI_PROBE_BIOS 0x0001 +#define PCI_PROBE_CONF1 0x0002 +#define PCI_PROBE_CONF2 0x0004 +#define PCI_PROBE_MMCONF 0x0008 +#define PCI_PROBE_MASK 0x000f +#define PCI_PROBE_NOEARLY 0x0010 + +#define PCI_NO_CHECKS 0x0400 +#define PCI_USE_PIRQ_MASK 0x0800 +#define PCI_ASSIGN_ROMS 0x1000 +#define PCI_BIOS_IRQ_SCAN 0x2000 +#define PCI_ASSIGN_ALL_BUSSES 0x4000 +#define PCI_CAN_SKIP_ISA_ALIGN 0x8000 +#define PCI_USE__CRS 0x10000 +#define PCI_CHECK_ENABLE_AMD_MMCONF 0x20000 +#define PCI_HAS_IO_ECS 0x40000 +#define PCI_NOASSIGN_ROMS 0x80000 + +extern unsigned int pci_probe; +extern unsigned long pirq_table_addr; + +enum pci_bf_sort_state { + pci_bf_sort_default, + pci_force_nobf, + pci_force_bf, + pci_dmi_bf, +}; + +/* pci-i386.c */ + +extern unsigned int pcibios_max_latency; + +void pcibios_resource_survey(void); + +/* pci-pc.c */ + +extern int pcibios_last_bus; +extern struct pci_bus *pci_root_bus; +extern struct pci_ops pci_root_ops; + +/* pci-irq.c */ + +struct irq_info { + u8 bus, devfn; /* Bus, device and function */ + struct { + u8 link; /* IRQ line ID, chipset dependent, + 0 = not routed */ + u16 bitmap; /* Available IRQs */ + } __attribute__((packed)) irq[4]; + u8 slot; /* Slot number, 0=onboard */ + u8 rfu; +} __attribute__((packed)); + +struct irq_routing_table { + u32 signature; /* PIRQ_SIGNATURE should be here */ + u16 version; /* PIRQ_VERSION */ + u16 size; /* Table size in bytes */ + u8 rtr_bus, rtr_devfn; /* Where the interrupt router lies */ + u16 exclusive_irqs; /* IRQs devoted exclusively to + PCI usage */ + u16 rtr_vendor, rtr_device; /* Vendor and device ID of + interrupt router */ + u32 miniport_data; /* Crap */ + u8 rfu[11]; + u8 checksum; /* Modulo 256 checksum must give 0 */ + struct irq_info slots[0]; +} __attribute__((packed)); + +extern unsigned int pcibios_irq_mask; + +extern int pcibios_scanned; +extern spinlock_t pci_config_lock; + +extern int (*pcibios_enable_irq)(struct pci_dev *dev); +extern void (*pcibios_disable_irq)(struct pci_dev *dev); + +struct pci_raw_ops { + int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 *val); + int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 val); +}; + +extern struct pci_raw_ops *raw_pci_ops; +extern struct pci_raw_ops *raw_pci_ext_ops; + +extern struct pci_raw_ops pci_direct_conf1; +extern bool port_cf9_safe; + +/* arch_initcall level */ +extern int pci_direct_probe(void); +extern void pci_direct_init(int type); +extern void pci_pcbios_init(void); +extern int pci_olpc_init(void); +extern void __init dmi_check_pciprobe(void); +extern void __init dmi_check_skip_isa_align(void); + +/* some common used subsys_initcalls */ +extern int __init pci_acpi_init(void); +extern int __init pcibios_irq_init(void); +extern int __init pci_visws_init(void); +extern int __init pci_numaq_init(void); +extern int __init pcibios_init(void); + +/* pci-mmconfig.c */ + +extern int __init pci_mmcfg_arch_init(void); +extern void __init pci_mmcfg_arch_free(void); + +/* + * AMD Fam10h CPUs are buggy, and cannot access MMIO config space + * on their northbrige except through the * %eax register. As such, you MUST + * NOT use normal IOMEM accesses, you need to only use the magic mmio-config + * accessor functions. + * In fact just use pci_config_*, nothing else please. + */ +static inline unsigned char mmio_config_readb(void __iomem *pos) +{ + u8 val; + asm volatile("movb (%1),%%al" : "=a" (val) : "r" (pos)); + return val; +} + +static inline unsigned short mmio_config_readw(void __iomem *pos) +{ + u16 val; + asm volatile("movw (%1),%%ax" : "=a" (val) : "r" (pos)); + return val; +} + +static inline unsigned int mmio_config_readl(void __iomem *pos) +{ + u32 val; + asm volatile("movl (%1),%%eax" : "=a" (val) : "r" (pos)); + return val; +} + +static inline void mmio_config_writeb(void __iomem *pos, u8 val) +{ + asm volatile("movb %%al,(%1)" : : "a" (val), "r" (pos) : "memory"); +} + +static inline void mmio_config_writew(void __iomem *pos, u16 val) +{ + asm volatile("movw %%ax,(%1)" : : "a" (val), "r" (pos) : "memory"); +} + +static inline void mmio_config_writel(void __iomem *pos, u32 val) +{ + asm volatile("movl %%eax,(%1)" : : "a" (val), "r" (pos) : "memory"); +} diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c index efc2f361fe85..666e43df51f9 100644 --- a/arch/x86/kernel/mmconf-fam10h_64.c +++ b/arch/x86/kernel/mmconf-fam10h_64.c @@ -13,8 +13,7 @@ #include #include #include - -#include "../pci/pci.h" +#include struct pci_hostbridge_probe { u32 bus; diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index b165eb0884ed..a90913cccfb7 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -12,6 +12,7 @@ #include #include #include +#include #ifdef CONFIG_X86_32 # include @@ -22,8 +23,6 @@ #endif #include -#include "../pci/pci.h" - /* * Power off function, if any diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 1d88d2b39771..9e5752fe4d15 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -4,7 +4,7 @@ #include #include #include -#include "pci.h" +#include struct pci_root_info { char *name; diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index 22e057665e55..9bb09823b362 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c @@ -2,7 +2,7 @@ #include #include #include -#include "pci.h" +#include #ifdef CONFIG_X86_64 #include diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index bb1a01f089e2..62ddb73e09ed 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -14,8 +14,7 @@ #include #include #include - -#include "pci.h" +#include unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 | PCI_PROBE_MMCONF; diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c index 9a5af6c8fbe9..bd13c3e4c6db 100644 --- a/arch/x86/pci/direct.c +++ b/arch/x86/pci/direct.c @@ -5,7 +5,7 @@ #include #include #include -#include "pci.h" +#include /* * Functions for accessing PCI base (first 256 bytes) and extended diff --git a/arch/x86/pci/early.c b/arch/x86/pci/early.c index 86631ccbc25a..f6adf2c6d751 100644 --- a/arch/x86/pci/early.c +++ b/arch/x86/pci/early.c @@ -2,7 +2,7 @@ #include #include #include -#include "pci.h" +#include /* Direct PCI access. This is used for PCI accesses in early boot before the PCI subsystem works. */ diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 2051dc96b8e9..7d388d5cf548 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -6,8 +6,7 @@ #include #include #include -#include "pci.h" - +#include static void __devinit pci_fixup_i450nx(struct pci_dev *d) { diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 844df0cbbd3e..e51bf2cda4b0 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c @@ -34,8 +34,8 @@ #include #include +#include -#include "pci.h" static int skip_isa_ioresource_align(struct pci_dev *dev) { diff --git a/arch/x86/pci/init.c b/arch/x86/pci/init.c index d6c950f81858..bec3b048e72b 100644 --- a/arch/x86/pci/init.c +++ b/arch/x86/pci/init.c @@ -1,6 +1,6 @@ #include #include -#include "pci.h" +#include /* arch_initcall has too random ordering, so call the initializers in the right sequence from here. */ diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index bf69dbe08bff..373b9afe6d44 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -16,8 +16,7 @@ #include #include #include - -#include "pci.h" +#include #define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24)) #define PIRQ_VERSION 0x0100 diff --git a/arch/x86/pci/legacy.c b/arch/x86/pci/legacy.c index b722dd481b39..f1065b129e9c 100644 --- a/arch/x86/pci/legacy.c +++ b/arch/x86/pci/legacy.c @@ -3,7 +3,7 @@ */ #include #include -#include "pci.h" +#include /* * Discover remaining PCI buses in case there are peer host bridges. diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index 654a2234f8f3..89bf9242c80a 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c @@ -15,8 +15,7 @@ #include #include #include - -#include "pci.h" +#include /* aperture is up to 256MB but BIOS may reserve less */ #define MMCONFIG_APER_MIN (2 * 1024*1024) diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c index f3c761dce695..8b2d561046a3 100644 --- a/arch/x86/pci/mmconfig_32.c +++ b/arch/x86/pci/mmconfig_32.c @@ -13,7 +13,7 @@ #include #include #include -#include "pci.h" +#include /* Assume systems with more busses have correct MCFG */ #define mmcfg_virt_addr ((void __iomem *) fix_to_virt(FIX_PCIE_MCFG)) diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c index a1994163c99d..30007ffc8e11 100644 --- a/arch/x86/pci/mmconfig_64.c +++ b/arch/x86/pci/mmconfig_64.c @@ -10,8 +10,7 @@ #include #include #include - -#include "pci.h" +#include /* Static virtual mapping of the MMCONFIG aperture */ struct mmcfg_virt { diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c index 1177845d3186..2089354968a2 100644 --- a/arch/x86/pci/numaq_32.c +++ b/arch/x86/pci/numaq_32.c @@ -7,7 +7,7 @@ #include #include #include -#include "pci.h" +#include #define XQUAD_PORTIO_BASE 0xfe400000 #define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c index e11e9e803d5f..b889d824f7c6 100644 --- a/arch/x86/pci/olpc.c +++ b/arch/x86/pci/olpc.c @@ -29,7 +29,7 @@ #include #include #include -#include "pci.h" +#include /* * In the tables below, the first two line (8 longwords) are the diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c index 37472fc6f729..b82cae970dfd 100644 --- a/arch/x86/pci/pcbios.c +++ b/arch/x86/pci/pcbios.c @@ -6,9 +6,8 @@ #include #include #include -#include "pci.h" -#include "pci-functions.h" - +#include +#include /* BIOS32 signature: "_32_" */ #define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24)) diff --git a/arch/x86/pci/pci.h b/arch/x86/pci/pci.h deleted file mode 100644 index 1959018aac02..000000000000 --- a/arch/x86/pci/pci.h +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Low-Level PCI Access for i386 machines. - * - * (c) 1999 Martin Mares - */ - -#undef DEBUG - -#ifdef DEBUG -#define DBG(x...) printk(x) -#else -#define DBG(x...) -#endif - -#define PCI_PROBE_BIOS 0x0001 -#define PCI_PROBE_CONF1 0x0002 -#define PCI_PROBE_CONF2 0x0004 -#define PCI_PROBE_MMCONF 0x0008 -#define PCI_PROBE_MASK 0x000f -#define PCI_PROBE_NOEARLY 0x0010 - -#define PCI_NO_CHECKS 0x0400 -#define PCI_USE_PIRQ_MASK 0x0800 -#define PCI_ASSIGN_ROMS 0x1000 -#define PCI_BIOS_IRQ_SCAN 0x2000 -#define PCI_ASSIGN_ALL_BUSSES 0x4000 -#define PCI_CAN_SKIP_ISA_ALIGN 0x8000 -#define PCI_USE__CRS 0x10000 -#define PCI_CHECK_ENABLE_AMD_MMCONF 0x20000 -#define PCI_HAS_IO_ECS 0x40000 -#define PCI_NOASSIGN_ROMS 0x80000 - -extern unsigned int pci_probe; -extern unsigned long pirq_table_addr; - -enum pci_bf_sort_state { - pci_bf_sort_default, - pci_force_nobf, - pci_force_bf, - pci_dmi_bf, -}; - -/* pci-i386.c */ - -extern unsigned int pcibios_max_latency; - -void pcibios_resource_survey(void); - -/* pci-pc.c */ - -extern int pcibios_last_bus; -extern struct pci_bus *pci_root_bus; -extern struct pci_ops pci_root_ops; - -/* pci-irq.c */ - -struct irq_info { - u8 bus, devfn; /* Bus, device and function */ - struct { - u8 link; /* IRQ line ID, chipset dependent, 0=not routed */ - u16 bitmap; /* Available IRQs */ - } __attribute__((packed)) irq[4]; - u8 slot; /* Slot number, 0=onboard */ - u8 rfu; -} __attribute__((packed)); - -struct irq_routing_table { - u32 signature; /* PIRQ_SIGNATURE should be here */ - u16 version; /* PIRQ_VERSION */ - u16 size; /* Table size in bytes */ - u8 rtr_bus, rtr_devfn; /* Where the interrupt router lies */ - u16 exclusive_irqs; /* IRQs devoted exclusively to PCI usage */ - u16 rtr_vendor, rtr_device; /* Vendor and device ID of interrupt router */ - u32 miniport_data; /* Crap */ - u8 rfu[11]; - u8 checksum; /* Modulo 256 checksum must give zero */ - struct irq_info slots[0]; -} __attribute__((packed)); - -extern unsigned int pcibios_irq_mask; - -extern int pcibios_scanned; -extern spinlock_t pci_config_lock; - -extern int (*pcibios_enable_irq)(struct pci_dev *dev); -extern void (*pcibios_disable_irq)(struct pci_dev *dev); - -struct pci_raw_ops { - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, - int reg, int len, u32 *val); - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn, - int reg, int len, u32 val); -}; - -extern struct pci_raw_ops *raw_pci_ops; -extern struct pci_raw_ops *raw_pci_ext_ops; - -extern struct pci_raw_ops pci_direct_conf1; -extern bool port_cf9_safe; - -/* arch_initcall level */ -extern int pci_direct_probe(void); -extern void pci_direct_init(int type); -extern void pci_pcbios_init(void); -extern int pci_olpc_init(void); -extern void __init dmi_check_pciprobe(void); -extern void __init dmi_check_skip_isa_align(void); - -/* some common used subsys_initcalls */ -extern int __init pci_acpi_init(void); -extern int __init pcibios_irq_init(void); -extern int __init pci_visws_init(void); -extern int __init pci_numaq_init(void); -extern int __init pcibios_init(void); - -/* pci-mmconfig.c */ - -extern int __init pci_mmcfg_arch_init(void); -extern void __init pci_mmcfg_arch_free(void); - -/* - * AMD Fam10h CPUs are buggy, and cannot access MMIO config space - * on their northbrige except through the * %eax register. As such, you MUST - * NOT use normal IOMEM accesses, you need to only use the magic mmio-config - * accessor functions. - * In fact just use pci_config_*, nothing else please. - */ -static inline unsigned char mmio_config_readb(void __iomem *pos) -{ - u8 val; - asm volatile("movb (%1),%%al" : "=a" (val) : "r" (pos)); - return val; -} - -static inline unsigned short mmio_config_readw(void __iomem *pos) -{ - u16 val; - asm volatile("movw (%1),%%ax" : "=a" (val) : "r" (pos)); - return val; -} - -static inline unsigned int mmio_config_readl(void __iomem *pos) -{ - u32 val; - asm volatile("movl (%1),%%eax" : "=a" (val) : "r" (pos)); - return val; -} - -static inline void mmio_config_writeb(void __iomem *pos, u8 val) -{ - asm volatile("movb %%al,(%1)" :: "a" (val), "r" (pos) : "memory"); -} - -static inline void mmio_config_writew(void __iomem *pos, u16 val) -{ - asm volatile("movw %%ax,(%1)" :: "a" (val), "r" (pos) : "memory"); -} - -static inline void mmio_config_writel(void __iomem *pos, u32 val) -{ - asm volatile("movl %%eax,(%1)" :: "a" (val), "r" (pos) : "memory"); -} diff --git a/arch/x86/pci/visws.c b/arch/x86/pci/visws.c index 42f4cb19faca..16d0c0eb0d19 100644 --- a/arch/x86/pci/visws.c +++ b/arch/x86/pci/visws.c @@ -9,11 +9,10 @@ #include #include +#include #include #include -#include "pci.h" - static int pci_visws_enable_irq(struct pci_dev *dev) { return 0; } static void pci_visws_disable_irq(struct pci_dev *dev) { } diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c index 8514c3a1746a..c2e1bcbb28a7 100644 --- a/drivers/pci/hotplug/cpqphp_core.c +++ b/drivers/pci/hotplug/cpqphp_core.c @@ -45,7 +45,7 @@ #include "cpqphp.h" #include "cpqphp_nvram.h" -#include "../../../arch/x86/pci/pci.h" /* horrible hack showing how processor dependent we are... */ +#include /* Global variables */ diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c index 09021930589f..df146be9d2e9 100644 --- a/drivers/pci/hotplug/cpqphp_pci.c +++ b/drivers/pci/hotplug/cpqphp_pci.c @@ -37,7 +37,7 @@ #include "../pci.h" #include "cpqphp.h" #include "cpqphp_nvram.h" -#include "../../../arch/x86/pci/pci.h" /* horrible hack showing how processor dependent we are... */ +#include u8 cpqhp_nic_irq; diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c index 633e743442ac..dd18f857dfb0 100644 --- a/drivers/pci/hotplug/ibmphp_core.c +++ b/drivers/pci/hotplug/ibmphp_core.c @@ -35,7 +35,7 @@ #include #include #include "../pci.h" -#include "../../../arch/x86/pci/pci.h" /* for struct irq_routing_table */ +#include /* for struct irq_routing_table */ #include "ibmphp.h" #define attn_on(sl) ibmphp_hpc_writeslot (sl, HPC_SLOT_ATTNON) -- cgit v1.2.3 From 412a1be265b894a45cebbfc2b57eb7a593bf34b2 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Mon, 29 Dec 2008 21:44:12 +0530 Subject: x86: amd_iommu_init.c: iommu_enable and iommu_enable_event_logging should be static Impact: cleanup, reduce kernel size a bit, avoid sparse warning Fixes sparse warning: arch/x86/kernel/amd_iommu_init.c:246:13: warning: symbol 'iommu_enable' was not declared. Should it be static? arch/x86/kernel/amd_iommu_init.c:259:13: warning: symbol 'iommu_enable_event_logging' was not declared. Should it be static? Signed-off-by: Jaswinder Singh Rajput Signed-off-by: H. Peter Anvin --- arch/x86/kernel/amd_iommu_init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index c625800c55ca..fb85e8d466cc 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c @@ -243,7 +243,7 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit) } /* Function to enable the hardware */ -void __init iommu_enable(struct amd_iommu *iommu) +static void __init iommu_enable(struct amd_iommu *iommu) { printk(KERN_INFO "AMD IOMMU: Enabling IOMMU " "at %02x:%02x.%x cap 0x%hx\n", @@ -256,7 +256,7 @@ void __init iommu_enable(struct amd_iommu *iommu) } /* Function to enable IOMMU event logging and event interrupts */ -void __init iommu_enable_event_logging(struct amd_iommu *iommu) +static void __init iommu_enable_event_logging(struct amd_iommu *iommu) { iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); -- cgit v1.2.3 From 557f687c87ddb8adb094b2dad4e1c83c7717982d Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Mon, 29 Dec 2008 21:45:22 +0530 Subject: x86: amd_iommu.c: prealloc_protection_domains should be static Impact: cleanup, reduce kernel size a bit, avoid sparse warning Fixes sparse warning: arch/x86/kernel/amd_iommu.c:1299:6: warning: symbol 'prealloc_protection_domains' was not declared. Should it be static? Signed-off-by: Jaswinder Singh Rajput Signed-off-by: H. Peter Anvin --- arch/x86/kernel/amd_iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 2e2da717b350..658e29e0f49b 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -1296,7 +1296,7 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask) * we don't need to preallocate the protection domains anymore. * For now we have to. */ -void prealloc_protection_domains(void) +static void prealloc_protection_domains(void) { struct pci_dev *dev = NULL; struct dma_ops_domain *dma_dom; -- cgit v1.2.3 From 4d08d97f5262dab4482af5bc91b30af4ca02269e Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Mon, 29 Dec 2008 22:11:40 +0530 Subject: x86: genx2apic_phys.c: x2apic_send_IPI_self and init_x2apic_ldr should be static Impact: cleanup, reduce kernel size a bit, avoid sparse warnings Fixes sparse warnings: arch/x86/kernel/genx2apic_phys.c:164:6: warning: symbol 'x2apic_send_IPI_self' was not declared. Should it be static? arch/x86/kernel/genx2apic_phys.c:169:6: warning: symbol 'init_x2apic_ldr' was not declared. Should it be static? Signed-off-by: Jaswinder Singh Rajput Signed-off-by: H. Peter Anvin --- arch/x86/kernel/genx2apic_phys.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index d042211768b7..a177c7880ab5 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c @@ -123,12 +123,12 @@ static unsigned int phys_pkg_id(int index_msb) return current_cpu_data.initial_apicid >> index_msb; } -void x2apic_send_IPI_self(int vector) +static void x2apic_send_IPI_self(int vector) { apic_write(APIC_SELF_IPI, vector); } -void init_x2apic_ldr(void) +static void init_x2apic_ldr(void) { return; } -- cgit v1.2.3 From c62e9d56ea90ef94f9708ce3f11860c20fa5e135 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Mon, 29 Dec 2008 22:12:50 +0530 Subject: x86: bios_uv.c: uv_systab should be static Impact: cleanup, reduce kernel size a bit, avoid sparse warning Fixes sparse warning: arch/x86/kernel/bios_uv.c:28:18: warning: symbol 'uv_systab' was not declared. Should it be static? Signed-off-by: Jaswinder Singh Rajput Signed-off-by: H. Peter Anvin --- arch/x86/kernel/bios_uv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c index 2a0a2a3cac26..f63882728d91 100644 --- a/arch/x86/kernel/bios_uv.c +++ b/arch/x86/kernel/bios_uv.c @@ -25,7 +25,7 @@ #include #include -struct uv_systab uv_systab; +static struct uv_systab uv_systab; s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) { -- cgit v1.2.3 From ec8c842a524888fdcccece337d91798e3e8af880 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Tue, 30 Dec 2008 22:46:36 +0530 Subject: x86: apic.c: xapic_icr_read and x2apic_icr_read should be static Impact: cleanup, reduce kernel size a bit, avoid sparse warning Fixes sparse warning: arch/x86/kernel/apic.c:270:5: warning: symbol 'x2apic_icr_read' was not declared. Should it be static? Signed-off-by: Jaswinder Singh Rajput Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/apic.h | 1 - arch/x86/kernel/apic.c | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index e644bf6f90dc..ab1d51a8855e 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -54,7 +54,6 @@ extern int disable_apic; extern int is_vsmp_box(void); extern void xapic_wait_icr_idle(void); extern u32 safe_xapic_wait_icr_idle(void); -extern u64 xapic_icr_read(void); extern void xapic_icr_write(u32, u32); extern int setup_profiling_timer(unsigned int); diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index c67722f010bd..66198cbe464d 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -228,7 +228,7 @@ void xapic_icr_write(u32 low, u32 id) apic_write(APIC_ICR, low); } -u64 xapic_icr_read(void) +static u64 xapic_icr_read(void) { u32 icr1, icr2; @@ -268,7 +268,7 @@ void x2apic_icr_write(u32 low, u32 id) wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low); } -u64 x2apic_icr_read(void) +static u64 x2apic_icr_read(void) { unsigned long val; -- cgit v1.2.3 From fa95826fe0ddbc2a55373134d8d1a21b49d13434 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Tue, 30 Dec 2008 20:13:49 +0530 Subject: x86: uv_bau.h: fix dubious bitfield Impact: cleanup, avoid sparse warnings declare bitfield as unsigned to avoid dubious bitfield issue CHECK arch/x86/kernel/tlb_64.c arch/x86/include/asm/uv/uv_bau.h:136:22: warning: dubious bitfield without explicit `signed' or `unsigned' arch/x86/include/asm/uv/uv_bau.h:138:25: warning: dubious bitfield without explicit `signed' or `unsigned' arch/x86/include/asm/uv/uv_bau.h:140:15: warning: dubious bitfield without explicit `signed' or `unsigned' arch/x86/include/asm/uv/uv_bau.h:143:14: warning: dubious bitfield without explicit `signed' or `unsigned' arch/x86/include/asm/uv/uv_bau.h:146:14: warning: dubious bitfield without explicit `signed' or `unsigned' arch/x86/include/asm/uv/uv_bau.h:149:18: warning: dubious bitfield without explicit `signed' or `unsigned' arch/x86/include/asm/uv/uv_bau.h:151:18: warning: dubious bitfield without explicit `signed' or `unsigned' arch/x86/include/asm/uv/uv_bau.h:155:14: error: dubious one-bit signed bitfield arch/x86/include/asm/uv/uv_bau.h:159:18: error: dubious one-bit signed bitfield arch/x86/include/asm/uv/uv_bau.h:173:19: error: dubious one-bit signed bitfield arch/x86/include/asm/uv/uv_bau.h:181:16: error: dubious one-bit signed bitfield arch/x86/include/asm/uv/uv_bau.h:185:18: error: dubious one-bit signed bitfield arch/x86/include/asm/uv/uv_bau.h:188:16: error: dubious one-bit signed bitfield CHECK arch/x86/kernel/tlb_uv.c arch/x86/include/asm/uv/uv_bau.h:136:22: warning: dubious bitfield without explicit `signed' or `unsigned' arch/x86/include/asm/uv/uv_bau.h:138:25: warning: dubious bitfield without explicit `signed' or `unsigned' arch/x86/include/asm/uv/uv_bau.h:140:15: warning: dubious bitfield without explicit `signed' or `unsigned' arch/x86/include/asm/uv/uv_bau.h:143:14: warning: dubious bitfield without explicit `signed' or `unsigned' arch/x86/include/asm/uv/uv_bau.h:146:14: warning: dubious bitfield without explicit `signed' or `unsigned' arch/x86/include/asm/uv/uv_bau.h:149:18: warning: dubious bitfield without explicit `signed' or `unsigned' arch/x86/include/asm/uv/uv_bau.h:151:18: warning: dubious bitfield without explicit `signed' or `unsigned' arch/x86/include/asm/uv/uv_bau.h:155:14: error: dubious one-bit signed bitfield arch/x86/include/asm/uv/uv_bau.h:159:18: error: dubious one-bit signed bitfield arch/x86/include/asm/uv/uv_bau.h:173:19: error: dubious one-bit signed bitfield arch/x86/include/asm/uv/uv_bau.h:181:16: error: dubious one-bit signed bitfield arch/x86/include/asm/uv/uv_bau.h:185:18: error: dubious one-bit signed bitfield arch/x86/include/asm/uv/uv_bau.h:188:16: error: dubious one-bit signed bitfield Signed-off-by: Jaswinder Singh Rajput Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/uv/uv_bau.h | 46 ++++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 23 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index e2363253bbbf..50423c7b56b2 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h @@ -133,61 +133,61 @@ struct bau_msg_payload { * see table 4.2.3.0.1 in broacast_assist spec. */ struct bau_msg_header { - int dest_subnodeid:6; /* must be zero */ + unsigned int dest_subnodeid:6; /* must be zero */ /* bits 5:0 */ - int base_dest_nodeid:15; /* nasid>>1 (pnode) of first bit in node_map */ - /* bits 20:6 */ - int command:8; /* message type */ + unsigned int base_dest_nodeid:15; /* nasid>>1 (pnode) of */ + /* bits 20:6 */ /* first bit in node_map */ + unsigned int command:8; /* message type */ /* bits 28:21 */ /* 0x38: SN3net EndPoint Message */ - int rsvd_1:3; /* must be zero */ + unsigned int rsvd_1:3; /* must be zero */ /* bits 31:29 */ /* int will align on 32 bits */ - int rsvd_2:9; /* must be zero */ + unsigned int rsvd_2:9; /* must be zero */ /* bits 40:32 */ /* Suppl_A is 56-41 */ - int payload_2a:8; /* becomes byte 16 of msg */ + unsigned int payload_2a:8;/* becomes byte 16 of msg */ /* bits 48:41 */ /* not currently using */ - int payload_2b:8; /* becomes byte 17 of msg */ + unsigned int payload_2b:8;/* becomes byte 17 of msg */ /* bits 56:49 */ /* not currently using */ /* Address field (96:57) is never used as an address (these are address bits 42:3) */ - int rsvd_3:1; /* must be zero */ + unsigned int rsvd_3:1; /* must be zero */ /* bit 57 */ /* address bits 27:4 are payload */ /* these 24 bits become bytes 12-14 of msg */ - int replied_to:1; /* sent as 0 by the source to byte 12 */ + unsigned int replied_to:1;/* sent as 0 by the source to byte 12 */ /* bit 58 */ - int payload_1a:5; /* not currently used */ + unsigned int payload_1a:5;/* not currently used */ /* bits 63:59 */ - int payload_1b:8; /* not currently used */ + unsigned int payload_1b:8;/* not currently used */ /* bits 71:64 */ - int payload_1c:8; /* not currently used */ + unsigned int payload_1c:8;/* not currently used */ /* bits 79:72 */ - int payload_1d:2; /* not currently used */ + unsigned int payload_1d:2;/* not currently used */ /* bits 81:80 */ - int rsvd_4:7; /* must be zero */ + unsigned int rsvd_4:7; /* must be zero */ /* bits 88:82 */ - int sw_ack_flag:1; /* software acknowledge flag */ + unsigned int sw_ack_flag:1;/* software acknowledge flag */ /* bit 89 */ /* INTD trasactions at destination are to wait for software acknowledge */ - int rsvd_5:6; /* must be zero */ + unsigned int rsvd_5:6; /* must be zero */ /* bits 95:90 */ - int rsvd_6:5; /* must be zero */ + unsigned int rsvd_6:5; /* must be zero */ /* bits 100:96 */ - int int_both:1; /* if 1, interrupt both sockets on the blade */ + unsigned int int_both:1;/* if 1, interrupt both sockets on the blade */ /* bit 101*/ - int fairness:3; /* usually zero */ + unsigned int fairness:3;/* usually zero */ /* bits 104:102 */ - int multilevel:1; /* multi-level multicast format */ + unsigned int multilevel:1; /* multi-level multicast format */ /* bit 105 */ /* 0 for TLB: endpoint multi-unicast messages */ - int chaining:1; /* next descriptor is part of this activation*/ + unsigned int chaining:1;/* next descriptor is part of this activation*/ /* bit 106 */ - int rsvd_7:21; /* must be zero */ + unsigned int rsvd_7:21; /* must be zero */ /* bits 127:107 */ }; -- cgit v1.2.3 From 7820b75643a763abf595c99fab963000ffc8b5f0 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Tue, 30 Dec 2008 22:05:55 +0530 Subject: x86: xsave.c: restore_user_xstate should be static Impact: cleanup, reduce kernel size a bit, avoid sparse warning Fixes sparse warning: arch/x86/kernel/xsave.c:162:5: warning: symbol 'restore_user_xstate' was not declared. Should it be static? Signed-off-by: Jaswinder Singh Rajput Signed-off-by: H. Peter Anvin --- arch/x86/kernel/xsave.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 15c3e6999182..2b54fe002e94 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c @@ -159,7 +159,7 @@ int save_i387_xstate(void __user *buf) * Restore the extended state if present. Otherwise, restore the FP/SSE * state. */ -int restore_user_xstate(void __user *buf) +static int restore_user_xstate(void __user *buf) { struct _fpx_sw_bytes fx_sw_user; u64 mask; -- cgit v1.2.3 From 79741dd35713ff4f6fd0eafd59fa94e8a4ba922d Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 31 Dec 2008 15:11:38 +0100 Subject: [PATCH] idle cputime accounting The cpu time spent by the idle process actually doing something is currently accounted as idle time. This is plain wrong, the architectures that support VIRT_CPU_ACCOUNTING=y can do better: distinguish between the time spent doing nothing and the time spent by idle doing work. The first is accounted with account_idle_time and the second with account_system_time. The architectures that use the account_xxx_time interface directly and not the account_xxx_ticks interface now need to do the check for the idle process in their arch code. In particular to improve the system vs true idle time accounting the arch code needs to measure the true idle time instead of just testing for the idle process. To improve the tick based accounting as well we would need an architecture primitive that can tell us if the pt_regs of the interrupted context points to the magic instruction that halts the cpu. In addition idle time is no more added to the stime of the idle process. This field now contains the system time of the idle process as it should be. On systems without VIRT_CPU_ACCOUNTING this will always be zero as every tick that occurs while idle is running will be accounted as idle time. This patch contains the necessary common code changes to be able to distinguish idle system time and true idle time. The architectures with support for VIRT_CPU_ACCOUNTING need some changes to exploit this. Signed-off-by: Martin Schwidefsky --- arch/ia64/kernel/time.c | 10 ++++-- arch/powerpc/kernel/process.c | 1 + arch/powerpc/kernel/time.c | 13 +++++-- arch/s390/kernel/vtime.c | 20 ++++++++--- arch/x86/xen/time.c | 10 +++--- include/linux/kernel_stat.h | 7 +++- include/linux/sched.h | 1 - kernel/sched.c | 80 ++++++++++++++++++++++++++++++++++--------- kernel/time/tick-sched.c | 13 ++++--- kernel/timer.c | 13 ------- 10 files changed, 114 insertions(+), 54 deletions(-) (limited to 'arch/x86') diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 4ee367817049..f0ebb342409d 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -93,7 +93,10 @@ void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next) now = ia64_get_itc(); delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp)); - account_system_time(prev, 0, delta_stime, delta_stime); + if (idle_task(smp_processor_id()) != prev) + account_system_time(prev, 0, delta_stime, delta_stime); + else + account_idle_time(delta_stime); if (pi->ac_utime) { delta_utime = cycle_to_cputime(pi->ac_utime); @@ -120,7 +123,10 @@ void account_system_vtime(struct task_struct *tsk) now = ia64_get_itc(); delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp)); - account_system_time(tsk, 0, delta_stime, delta_stime); + if (irq_count() || idle_task(smp_processor_id()) != tsk) + account_system_time(tsk, 0, delta_stime, delta_stime); + else + account_idle_time(delta_stime); ti->ac_stime = 0; ti->ac_stamp = now; diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 51b201ddf9a1..fb7049c054c0 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 92650ccad2e1..3be355c1cfa7 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -256,7 +256,10 @@ void account_system_vtime(struct task_struct *tsk) delta += sys_time; get_paca()->system_time = 0; } - account_system_time(tsk, 0, delta, deltascaled); + if (in_irq() || idle_task(smp_processor_id()) != tsk) + account_system_time(tsk, 0, delta, deltascaled); + else + account_idle_time(delta); per_cpu(cputime_last_delta, smp_processor_id()) = delta; per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled; local_irq_restore(flags); @@ -335,8 +338,12 @@ void calculate_steal_time(void) tb = mftb(); purr = mfspr(SPRN_PURR); stolen = (tb - pme->tb) - (purr - pme->purr); - if (stolen > 0) - account_steal_time(current, stolen); + if (stolen > 0) { + if (idle_task(smp_processor_id()) != current) + account_steal_time(stolen); + else + account_idle_time(stolen); + } pme->tb = tb; pme->purr = purr; } diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 07283aea2e56..4a4a34caec55 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -55,13 +55,19 @@ void account_process_tick(struct task_struct *tsk, int user_tick) cputime = S390_lowcore.system_timer >> 12; S390_lowcore.system_timer -= cputime << 12; S390_lowcore.steal_clock -= cputime << 12; - account_system_time(tsk, HARDIRQ_OFFSET, cputime, cputime); + if (idle_task(smp_processor_id()) != current) + account_system_time(tsk, HARDIRQ_OFFSET, cputime, cputime); + else + account_idle_time(cputime); cputime = S390_lowcore.steal_clock; if ((__s64) cputime > 0) { cputime >>= 12; S390_lowcore.steal_clock -= cputime << 12; - account_steal_time(tsk, cputime); + if (idle_task(smp_processor_id()) != current) + account_steal_time(cputime); + else + account_idle_time(cputime); } } @@ -87,7 +93,10 @@ void account_vtime(struct task_struct *tsk) cputime = S390_lowcore.system_timer >> 12; S390_lowcore.system_timer -= cputime << 12; S390_lowcore.steal_clock -= cputime << 12; - account_system_time(tsk, 0, cputime, cputime); + if (idle_task(smp_processor_id()) != current) + account_system_time(tsk, 0, cputime, cputime); + else + account_idle_time(cputime); } /* @@ -107,7 +116,10 @@ void account_system_vtime(struct task_struct *tsk) cputime = S390_lowcore.system_timer >> 12; S390_lowcore.system_timer -= cputime << 12; S390_lowcore.steal_clock -= cputime << 12; - account_system_time(tsk, 0, cputime, cputime); + if (in_irq() || idle_task(smp_processor_id()) != current) + account_system_time(tsk, 0, cputime, cputime); + else + account_idle_time(cputime); } EXPORT_SYMBOL_GPL(account_system_vtime); diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index c9f7cda48ed7..732e52dc991a 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -132,8 +132,7 @@ static void do_stolen_accounting(void) *snap = state; /* Add the appropriate number of ticks of stolen time, - including any left-overs from last time. Passing NULL to - account_steal_time accounts the time as stolen. */ + including any left-overs from last time. */ stolen = runnable + offline + __get_cpu_var(residual_stolen); if (stolen < 0) @@ -141,11 +140,10 @@ static void do_stolen_accounting(void) ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); __get_cpu_var(residual_stolen) = stolen; - account_steal_time(NULL, ticks); + account_steal_ticks(ticks); /* Add the appropriate number of ticks of blocked time, - including any left-overs from last time. Passing idle to - account_steal_time accounts the time as idle/wait. */ + including any left-overs from last time. */ blocked += __get_cpu_var(residual_blocked); if (blocked < 0) @@ -153,7 +151,7 @@ static void do_stolen_accounting(void) ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); __get_cpu_var(residual_blocked) = blocked; - account_steal_time(idle_task(smp_processor_id()), ticks); + account_idle_ticks(ticks); } /* diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index c78a459662a6..570d20413119 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -81,6 +81,11 @@ static inline unsigned int kstat_irqs(unsigned int irq) extern unsigned long long task_delta_exec(struct task_struct *); extern void account_user_time(struct task_struct *, cputime_t, cputime_t); extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); -extern void account_steal_time(struct task_struct *, cputime_t); +extern void account_steal_time(cputime_t); +extern void account_idle_time(cputime_t); + +extern void account_process_tick(struct task_struct *, int user); +extern void account_steal_ticks(unsigned long ticks); +extern void account_idle_ticks(unsigned long ticks); #endif /* _LINUX_KERNEL_STAT_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 8395e715809d..b475d4db8053 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -284,7 +284,6 @@ long io_schedule_timeout(long timeout); extern void cpu_init (void); extern void trap_init(void); -extern void account_process_tick(struct task_struct *task, int user); extern void update_process_times(int user); extern void scheduler_tick(void); diff --git a/kernel/sched.c b/kernel/sched.c index 5b03679ff712..635eaffe1e4c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4139,7 +4139,6 @@ void account_system_time(struct task_struct *p, int hardirq_offset, cputime_t cputime, cputime_t cputime_scaled) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; - struct rq *rq = this_rq(); cputime64_t tmp; if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { @@ -4158,37 +4157,84 @@ void account_system_time(struct task_struct *p, int hardirq_offset, cpustat->irq = cputime64_add(cpustat->irq, tmp); else if (softirq_count()) cpustat->softirq = cputime64_add(cpustat->softirq, tmp); - else if (p != rq->idle) - cpustat->system = cputime64_add(cpustat->system, tmp); - else if (atomic_read(&rq->nr_iowait) > 0) - cpustat->iowait = cputime64_add(cpustat->iowait, tmp); else - cpustat->idle = cputime64_add(cpustat->idle, tmp); + cpustat->system = cputime64_add(cpustat->system, tmp); + /* Account for system time used */ acct_update_integrals(p); } /* * Account for involuntary wait time. - * @p: the process from which the cpu time has been stolen * @steal: the cpu time spent in involuntary wait */ -void account_steal_time(struct task_struct *p, cputime_t steal) +void account_steal_time(cputime_t cputime) +{ + struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; + cputime64_t cputime64 = cputime_to_cputime64(cputime); + + cpustat->steal = cputime64_add(cpustat->steal, cputime64); +} + +/* + * Account for idle time. + * @cputime: the cpu time spent in idle wait + */ +void account_idle_time(cputime_t cputime) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; - cputime64_t tmp = cputime_to_cputime64(steal); + cputime64_t cputime64 = cputime_to_cputime64(cputime); struct rq *rq = this_rq(); - if (p == rq->idle) { - p->stime = cputime_add(p->stime, steal); - if (atomic_read(&rq->nr_iowait) > 0) - cpustat->iowait = cputime64_add(cpustat->iowait, tmp); - else - cpustat->idle = cputime64_add(cpustat->idle, tmp); - } else - cpustat->steal = cputime64_add(cpustat->steal, tmp); + if (atomic_read(&rq->nr_iowait) > 0) + cpustat->iowait = cputime64_add(cpustat->iowait, cputime64); + else + cpustat->idle = cputime64_add(cpustat->idle, cputime64); +} + +#ifndef CONFIG_VIRT_CPU_ACCOUNTING + +/* + * Account a single tick of cpu time. + * @p: the process that the cpu time gets accounted to + * @user_tick: indicates if the tick is a user or a system tick + */ +void account_process_tick(struct task_struct *p, int user_tick) +{ + cputime_t one_jiffy = jiffies_to_cputime(1); + cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy); + struct rq *rq = this_rq(); + + if (user_tick) + account_user_time(p, one_jiffy, one_jiffy_scaled); + else if (p != rq->idle) + account_system_time(p, HARDIRQ_OFFSET, one_jiffy, + one_jiffy_scaled); + else + account_idle_time(one_jiffy); +} + +/* + * Account multiple ticks of steal time. + * @p: the process from which the cpu time has been stolen + * @ticks: number of stolen ticks + */ +void account_steal_ticks(unsigned long ticks) +{ + account_steal_time(jiffies_to_cputime(ticks)); +} + +/* + * Account multiple ticks of idle time. + * @ticks: number of stolen ticks + */ +void account_idle_ticks(unsigned long ticks) +{ + account_idle_time(jiffies_to_cputime(ticks)); } +#endif + /* * Use precise platform statistics if available: */ diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 1f2fce2479fe..611fa4c0baab 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -419,8 +419,9 @@ void tick_nohz_restart_sched_tick(void) { int cpu = smp_processor_id(); struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); +#ifndef CONFIG_VIRT_CPU_ACCOUNTING unsigned long ticks; - cputime_t cputime; +#endif ktime_t now; local_irq_disable(); @@ -442,6 +443,7 @@ void tick_nohz_restart_sched_tick(void) tick_do_update_jiffies64(now); cpu_clear(cpu, nohz_cpu_mask); +#ifndef CONFIG_VIRT_CPU_ACCOUNTING /* * We stopped the tick in idle. Update process times would miss the * time we slept as update_process_times does only a 1 tick @@ -451,12 +453,9 @@ void tick_nohz_restart_sched_tick(void) /* * We might be one off. Do not randomly account a huge number of ticks! */ - if (ticks && ticks < LONG_MAX) { - add_preempt_count(HARDIRQ_OFFSET); - cputime = jiffies_to_cputime(ticks); - account_system_time(current, HARDIRQ_OFFSET, cputime, cputime); - sub_preempt_count(HARDIRQ_OFFSET); - } + if (ticks && ticks < LONG_MAX) + account_idle_ticks(ticks); +#endif touch_softlockup_watchdog(); /* diff --git a/kernel/timer.c b/kernel/timer.c index b5efb528aa1d..dee3f641a7a7 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1018,19 +1018,6 @@ unsigned long get_next_timer_interrupt(unsigned long now) } #endif -#ifndef CONFIG_VIRT_CPU_ACCOUNTING -void account_process_tick(struct task_struct *p, int user_tick) -{ - cputime_t one_jiffy = jiffies_to_cputime(1); - - if (user_tick) - account_user_time(p, one_jiffy, cputime_to_scaled(one_jiffy)); - else - account_system_time(p, HARDIRQ_OFFSET, one_jiffy, - cputime_to_scaled(one_jiffy)); -} -#endif - /* * Called from the timer interrupt handler to charge one tick to the current * process. user_tick is 1 if the tick is user time, 0 for system. -- cgit v1.2.3 From 2786b014ec893c301ea52ef9962e7cc60f89f9b3 Mon Sep 17 00:00:00 2001 From: Guillaume Thouvenin Date: Mon, 22 Sep 2008 16:08:06 +0200 Subject: KVM: x86 emulator: consolidate push reg This patch consolidate the emulation of push reg instruction. Signed-off-by: Guillaume Thouvenin Signed-off-by: Laurent Vivier Signed-off-by: Avi Kivity --- arch/x86/kvm/x86_emulate.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index ea051173b0da..a391e213fe61 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -1415,13 +1415,7 @@ special_insn: emulate_1op("dec", c->dst, ctxt->eflags); break; case 0x50 ... 0x57: /* push reg */ - c->dst.type = OP_MEM; - c->dst.bytes = c->op_bytes; - c->dst.val = c->src.val; - register_address_increment(c, &c->regs[VCPU_REGS_RSP], - -c->op_bytes); - c->dst.ptr = (void *) register_address( - c, ss_base(ctxt), c->regs[VCPU_REGS_RSP]); + emulate_push(ctxt); break; case 0x58 ... 0x5f: /* pop reg */ pop_instruction: -- cgit v1.2.3 From a26bf12afb608eb5a96192eaee35fc08ffbf85aa Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Fri, 26 Sep 2008 09:30:45 +0200 Subject: KVM: VMX: include all IRQ window exits in statistics irq_window_exits only tracks IRQ window exits due to user space requests, nmi_window_exits include all exits. The latter makes more sense, so let's adjust irq_window_exits accounting. Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a4018b01e1f9..ac3453799c17 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2767,6 +2767,7 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu, vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); KVMTRACE_0D(PEND_INTR, vcpu, handler); + ++vcpu->stat.irq_window_exits; /* * If the user space waits to inject interrupts, exit as soon as @@ -2775,7 +2776,6 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu, if (kvm_run->request_interrupt_window && !vcpu->arch.irq_summary) { kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; - ++vcpu->stat.irq_window_exits; return 0; } return 1; -- cgit v1.2.3 From e4a41889ece6c95f390a7fa3a94255ab62470968 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Fri, 26 Sep 2008 09:30:46 +0200 Subject: KVM: VMX: Use INTR_TYPE_NMI_INTR instead of magic value Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ac3453799c17..81cf12b8d12a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2492,7 +2492,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary); } - if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */ + if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR) return 1; /* already handled by vmx_vcpu_run() */ if (is_no_device(intr_info)) { @@ -3337,7 +3337,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) intr_info = vmcs_read32(VM_EXIT_INTR_INFO); /* We need to handle NMIs before interrupts are enabled */ - if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200 && + if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR && (intr_info & INTR_INFO_VALID_MASK)) { KVMTRACE_0D(NMI, vcpu, handler); asm("int $2"); -- cgit v1.2.3 From 60637aacfd95c368e1fbc2157275d1b621b5dcdd Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Fri, 26 Sep 2008 09:30:47 +0200 Subject: KVM: VMX: Support for NMI task gates Properly set GUEST_INTR_STATE_NMI and reset nmi_injected when a task-switch vmexit happened due to a task gate being used for handling NMIs. Also avoid the false warning about valid vectoring info in kvm_handle_exit. Based on original patch by Gleb Natapov. Signed-off-by: Gleb Natapov Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 81cf12b8d12a..8d0fc68fd4ec 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2832,6 +2832,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { + struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long exit_qualification; u16 tss_selector; int reason; @@ -2839,6 +2840,15 @@ static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) exit_qualification = vmcs_readl(EXIT_QUALIFICATION); reason = (u32)exit_qualification >> 30; + if (reason == TASK_SWITCH_GATE && vmx->vcpu.arch.nmi_injected && + (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && + (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK) + == INTR_TYPE_NMI_INTR) { + vcpu->arch.nmi_injected = false; + if (cpu_has_virtual_nmis()) + vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, + GUEST_INTR_STATE_NMI); + } tss_selector = exit_qualification; return kvm_task_switch(vcpu, tss_selector, reason); @@ -3012,9 +3022,11 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) if ((vectoring_info & VECTORING_INFO_VALID_MASK) && (exit_reason != EXIT_REASON_EXCEPTION_NMI && - exit_reason != EXIT_REASON_EPT_VIOLATION)) - printk(KERN_WARNING "%s: unexpected, valid vectoring info and " - "exit reason is 0x%x\n", __func__, exit_reason); + exit_reason != EXIT_REASON_EPT_VIOLATION && + exit_reason != EXIT_REASON_TASK_SWITCH)) + printk(KERN_WARNING "%s: unexpected, valid vectoring info " + "(0x%x) and exit reason is 0x%x\n", + __func__, vectoring_info, exit_reason); if (exit_reason < kvm_vmx_max_exit_handlers && kvm_vmx_exit_handlers[exit_reason]) return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run); -- cgit v1.2.3 From 448fa4a9c5dbc6941dd19ed09692c588d815bb06 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Fri, 26 Sep 2008 09:30:48 +0200 Subject: KVM: x86: Reset pending/inject NMI state on CPU reset CPU reset invalidates pending or already injected NMIs, therefore reset the related state variables. Based on original patch by Gleb Natapov. Signed-off-by: Gleb Natapov Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f1f8ff2f1fa2..1a71f6735593 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3925,6 +3925,9 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu) { + vcpu->arch.nmi_pending = false; + vcpu->arch.nmi_injected = false; + return kvm_x86_ops->vcpu_reset(vcpu); } -- cgit v1.2.3 From 33f089ca5a61f7aead26e8e1866dfc961dd88a9e Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Fri, 26 Sep 2008 09:30:49 +0200 Subject: KVM: VMX: refactor/fix IRQ and NMI injectability determination There are currently two ways in VMX to check if an IRQ or NMI can be injected: - vmx_{nmi|irq}_enabled and - vcpu.arch.{nmi|interrupt}_window_open. Even worse, one test (at the end of vmx_vcpu_run) uses an inconsistent, likely incorrect logic. This patch consolidates and unifies the tests over {nmi|interrupt}_window_open as cache + vmx_update_window_states for updating the cache content. Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/vmx.c | 46 +++++++++++++++++++---------------------- 2 files changed, 22 insertions(+), 25 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 8346be87cfa1..bfbbdea869bf 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -327,6 +327,7 @@ struct kvm_vcpu_arch { bool nmi_pending; bool nmi_injected; + bool nmi_window_open; u64 mtrr[0x100]; }; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 8d0fc68fd4ec..f0866e1d20ee 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2362,6 +2362,21 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu) INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); } +static void vmx_update_window_states(struct kvm_vcpu *vcpu) +{ + u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); + + vcpu->arch.nmi_window_open = + !(guest_intr & (GUEST_INTR_STATE_STI | + GUEST_INTR_STATE_MOV_SS | + GUEST_INTR_STATE_NMI)); + + vcpu->arch.interrupt_window_open = + ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && + !(guest_intr & (GUEST_INTR_STATE_STI | + GUEST_INTR_STATE_MOV_SS))); +} + static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) { int word_index = __ffs(vcpu->arch.irq_summary); @@ -2374,15 +2389,12 @@ static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) kvm_queue_interrupt(vcpu, irq); } - static void do_interrupt_requests(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { u32 cpu_based_vm_exec_control; - vcpu->arch.interrupt_window_open = - ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && - (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0); + vmx_update_window_states(vcpu); if (vcpu->arch.interrupt_window_open && vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending) @@ -3075,22 +3087,6 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu) vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); } -static int vmx_nmi_enabled(struct kvm_vcpu *vcpu) -{ - u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); - return !(guest_intr & (GUEST_INTR_STATE_NMI | - GUEST_INTR_STATE_MOV_SS | - GUEST_INTR_STATE_STI)); -} - -static int vmx_irq_enabled(struct kvm_vcpu *vcpu) -{ - u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); - return (!(guest_intr & (GUEST_INTR_STATE_MOV_SS | - GUEST_INTR_STATE_STI)) && - (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF)); -} - static void enable_intr_window(struct kvm_vcpu *vcpu) { if (vcpu->arch.nmi_pending) @@ -3159,11 +3155,13 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) { update_tpr_threshold(vcpu); + vmx_update_window_states(vcpu); + if (cpu_has_virtual_nmis()) { if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { if (vcpu->arch.interrupt.pending) { enable_nmi_window(vcpu); - } else if (vmx_nmi_enabled(vcpu)) { + } else if (vcpu->arch.nmi_window_open) { vcpu->arch.nmi_pending = false; vcpu->arch.nmi_injected = true; } else { @@ -3178,7 +3176,7 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) } } if (!vcpu->arch.interrupt.pending && kvm_cpu_has_interrupt(vcpu)) { - if (vmx_irq_enabled(vcpu)) + if (vcpu->arch.interrupt_window_open) kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); else enable_irq_window(vcpu); @@ -3339,9 +3337,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) if (vmx->rmode.irq.pending) fixup_rmode_irq(vmx); - vcpu->arch.interrupt_window_open = - (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & - (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)) == 0; + vmx_update_window_states(vcpu); asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); vmx->launched = 1; -- cgit v1.2.3 From f460ee43e250b675376246b1c4c9fe9b9af4ab16 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Fri, 26 Sep 2008 09:30:50 +0200 Subject: KVM: VMX: refactor IRQ and NMI window enabling do_interrupt_requests and vmx_intr_assist go different way for achieving the same: enabling the nmi/irq window start notification. Unify their code over enable_{irq|nmi}_window, get rid of a redundant call to enable_intr_window instead of direct enable_nmi_window invocation and unroll enable_intr_window for both in-kernel and user space irq injection accordingly. Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 78 ++++++++++++++++++++++-------------------------------- 1 file changed, 32 insertions(+), 46 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f0866e1d20ee..440f56cd4bdd 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2389,30 +2389,42 @@ static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) kvm_queue_interrupt(vcpu, irq); } -static void do_interrupt_requests(struct kvm_vcpu *vcpu, - struct kvm_run *kvm_run) +static void enable_irq_window(struct kvm_vcpu *vcpu) { u32 cpu_based_vm_exec_control; - vmx_update_window_states(vcpu); + cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); + cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; + vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); +} - if (vcpu->arch.interrupt_window_open && - vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending) - kvm_do_inject_irq(vcpu); +static void enable_nmi_window(struct kvm_vcpu *vcpu) +{ + u32 cpu_based_vm_exec_control; - if (vcpu->arch.interrupt_window_open && vcpu->arch.interrupt.pending) - vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); + if (!cpu_has_virtual_nmis()) + return; cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); + cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING; + vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); +} + +static void do_interrupt_requests(struct kvm_vcpu *vcpu, + struct kvm_run *kvm_run) +{ + vmx_update_window_states(vcpu); + + if (vcpu->arch.interrupt_window_open) { + if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending) + kvm_do_inject_irq(vcpu); + + if (vcpu->arch.interrupt.pending) + vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); + } if (!vcpu->arch.interrupt_window_open && (vcpu->arch.irq_summary || kvm_run->request_interrupt_window)) - /* - * Interrupts blocked. Wait for unblock. - */ - cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; - else - cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); + enable_irq_window(vcpu); } static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) @@ -3066,35 +3078,6 @@ static void update_tpr_threshold(struct kvm_vcpu *vcpu) vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4); } -static void enable_irq_window(struct kvm_vcpu *vcpu) -{ - u32 cpu_based_vm_exec_control; - - cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); - cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); -} - -static void enable_nmi_window(struct kvm_vcpu *vcpu) -{ - u32 cpu_based_vm_exec_control; - - if (!cpu_has_virtual_nmis()) - return; - - cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); - cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING; - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); -} - -static void enable_intr_window(struct kvm_vcpu *vcpu) -{ - if (vcpu->arch.nmi_pending) - enable_nmi_window(vcpu); - else if (kvm_cpu_has_interrupt(vcpu)) - enable_irq_window(vcpu); -} - static void vmx_complete_interrupts(struct vcpu_vmx *vmx) { u32 exit_intr_info; @@ -3165,13 +3148,16 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) vcpu->arch.nmi_pending = false; vcpu->arch.nmi_injected = true; } else { - enable_intr_window(vcpu); + enable_nmi_window(vcpu); return; } } if (vcpu->arch.nmi_injected) { vmx_inject_nmi(vcpu); - enable_intr_window(vcpu); + if (vcpu->arch.nmi_pending) + enable_nmi_window(vcpu); + else if (kvm_cpu_has_interrupt(vcpu)) + enable_irq_window(vcpu); return; } } -- cgit v1.2.3 From 66a5a347c2690db4c0756524a8eb5a05e0437aa8 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Fri, 26 Sep 2008 09:30:51 +0200 Subject: KVM: VMX: fix real-mode NMI support Fix NMI injection in real-mode with the same pattern we perform IRQ injection. Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 440f56cd4bdd..38d138566617 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2358,6 +2358,19 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq) static void vmx_inject_nmi(struct kvm_vcpu *vcpu) { + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (vcpu->arch.rmode.active) { + vmx->rmode.irq.pending = true; + vmx->rmode.irq.vector = NMI_VECTOR; + vmx->rmode.irq.rip = kvm_rip_read(vcpu); + vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, + NMI_VECTOR | INTR_TYPE_SOFT_INTR | + INTR_INFO_VALID_MASK); + vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); + kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1); + return; + } vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); } -- cgit v1.2.3 From 23930f9521c9c4d4aa96cdb9d1e1703f3782bb94 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Fri, 26 Sep 2008 09:30:52 +0200 Subject: KVM: x86: Enable NMI Watchdog via in-kernel PIT source LINT0 of the LAPIC can be used to route PIT events as NMI watchdog ticks into the guest. This patch aligns the in-kernel irqchip emulation with the user space irqchip with already supports this feature. The trick is to route PIT interrupts to all LAPIC's LVT0 lines. Rebased and slightly polished patch originally posted by Sheng Yang. Signed-off-by: Jan Kiszka Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/kvm/i8254.c | 15 +++++++++++++++ arch/x86/kvm/irq.h | 1 + arch/x86/kvm/lapic.c | 34 +++++++++++++++++++++++++++++----- 3 files changed, 45 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 59ebd37ad79e..580cc1d01c7d 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -603,10 +603,25 @@ void kvm_free_pit(struct kvm *kvm) static void __inject_pit_timer_intr(struct kvm *kvm) { + struct kvm_vcpu *vcpu; + int i; + mutex_lock(&kvm->lock); kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1); kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0); mutex_unlock(&kvm->lock); + + /* + * Provides NMI watchdog support in IOAPIC mode. + * The route is: PIT -> PIC -> LVT0 in NMI mode, + * timer IRQs will continue to flow through the IOAPIC. + */ + for (i = 0; i < KVM_MAX_VCPUS; ++i) { + vcpu = kvm->vcpus[i]; + if (!vcpu) + continue; + kvm_apic_local_deliver(vcpu, APIC_LVT0); + } } void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index f17c8f5bbf31..71e37a530cf7 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h @@ -87,6 +87,7 @@ void kvm_pic_reset(struct kvm_kpic_state *s); void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec); void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu); void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu); +int kvm_apic_local_deliver(struct kvm_vcpu *vcpu, int lvt_type); void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu); void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu); void __kvm_migrate_timers(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 0fc3cab48943..206cc11a1c97 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -380,6 +380,14 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, } break; + case APIC_DM_EXTINT: + /* + * Should only be called by kvm_apic_local_deliver() with LVT0, + * before NMI watchdog was enabled. Already handled by + * kvm_apic_accept_pic_intr(). + */ + break; + default: printk(KERN_ERR "TODO: unsupported delivery mode %x\n", delivery_mode); @@ -743,10 +751,13 @@ static void apic_mmio_write(struct kvm_io_device *this, apic_set_reg(apic, APIC_ICR2, val & 0xff000000); break; + case APIC_LVT0: + if (val == APIC_DM_NMI) + apic_debug("Receive NMI setting on APIC_LVT0 " + "for cpu %d\n", apic->vcpu->vcpu_id); case APIC_LVTT: case APIC_LVTTHMR: case APIC_LVTPC: - case APIC_LVT0: case APIC_LVT1: case APIC_LVTERR: /* TODO: Check vector */ @@ -961,12 +972,25 @@ int apic_has_pending_timer(struct kvm_vcpu *vcpu) return 0; } -static int __inject_apic_timer_irq(struct kvm_lapic *apic) +int kvm_apic_local_deliver(struct kvm_vcpu *vcpu, int lvt_type) { - int vector; + struct kvm_lapic *apic = vcpu->arch.apic; + int vector, mode, trig_mode; + u32 reg; + + if (apic && apic_enabled(apic)) { + reg = apic_get_reg(apic, lvt_type); + vector = reg & APIC_VECTOR_MASK; + mode = reg & APIC_MODE_MASK; + trig_mode = reg & APIC_LVT_LEVEL_TRIGGER; + return __apic_accept_irq(apic, mode, vector, 1, trig_mode); + } + return 0; +} - vector = apic_lvt_vector(apic, APIC_LVTT); - return __apic_accept_irq(apic, APIC_DM_FIXED, vector, 1, 0); +static inline int __inject_apic_timer_irq(struct kvm_lapic *apic) +{ + return kvm_apic_local_deliver(apic->vcpu, APIC_LVTT); } static enum hrtimer_restart apic_timer_fn(struct hrtimer *data) -- cgit v1.2.3 From 0496fbb973ccc9477082e859ed0faab5acb805ba Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Fri, 26 Sep 2008 09:30:53 +0200 Subject: KVM: x86: VCPU with pending NMI is runnabled Ensure that a VCPU with pending NMIs is considered runnable. Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1a71f6735593..1fa9a6db633d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4130,7 +4130,8 @@ void kvm_arch_flush_shadow(struct kvm *kvm) int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE - || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED; + || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED + || vcpu->arch.nmi_pending; } static void vcpu_kick_intr(void *info) -- cgit v1.2.3 From 26df99c6c5807115f06d4e1abae397b7f5f3e00c Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Fri, 26 Sep 2008 09:30:54 +0200 Subject: KVM: Kick NMI receiving VCPU Kick the NMI receiving VCPU in case the triggering caller runs in a different context. Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/kvm/lapic.c | 1 + virt/kvm/ioapic.c | 1 + 2 files changed, 2 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 206cc11a1c97..304f9ddbdd51 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -354,6 +354,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, case APIC_DM_NMI: kvm_inject_nmi(vcpu); + kvm_vcpu_kick(vcpu); break; case APIC_DM_INIT: diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 53772bb46320..c8f939c55075 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c @@ -150,6 +150,7 @@ static int ioapic_inj_irq(struct kvm_ioapic *ioapic, static void ioapic_inj_nmi(struct kvm_vcpu *vcpu) { kvm_inject_nmi(vcpu); + kvm_vcpu_kick(vcpu); } static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, -- cgit v1.2.3 From c4abb7c9cde24b7351a47328ef866e6a2bbb1ad0 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Fri, 26 Sep 2008 09:30:55 +0200 Subject: KVM: x86: Support for user space injected NMIs Introduces the KVM_NMI IOCTL to the generic x86 part of KVM for injecting NMIs from user space and also extends the statistic report accordingly. Based on the original patch by Sheng Yang. Signed-off-by: Jan Kiszka Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/x86.c | 46 +++++++++++++++++++++++++++++++++++++++-- include/linux/kvm.h | 11 ++++++++-- 3 files changed, 55 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index bfbbdea869bf..a40fa8478920 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -398,6 +398,7 @@ struct kvm_vcpu_stat { u32 halt_exits; u32 halt_wakeup; u32 request_irq_exits; + u32 request_nmi_exits; u32 irq_exits; u32 host_state_reload; u32 efer_reload; @@ -406,6 +407,7 @@ struct kvm_vcpu_stat { u32 insn_emulation_fail; u32 hypercalls; u32 irq_injections; + u32 nmi_injections; }; struct descriptor_table { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1fa9a6db633d..07971451b947 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -86,6 +86,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "hypercalls", VCPU_STAT(hypercalls) }, { "request_irq", VCPU_STAT(request_irq_exits) }, + { "request_nmi", VCPU_STAT(request_nmi_exits) }, { "irq_exits", VCPU_STAT(irq_exits) }, { "host_state_reload", VCPU_STAT(host_state_reload) }, { "efer_reload", VCPU_STAT(efer_reload) }, @@ -93,6 +94,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "insn_emulation", VCPU_STAT(insn_emulation) }, { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, { "irq_injections", VCPU_STAT(irq_injections) }, + { "nmi_injections", VCPU_STAT(nmi_injections) }, { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, { "mmu_pte_write", VM_STAT(mmu_pte_write) }, { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, @@ -1318,6 +1320,15 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, return 0; } +static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) +{ + vcpu_load(vcpu); + kvm_inject_nmi(vcpu); + vcpu_put(vcpu); + + return 0; +} + static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, struct kvm_tpr_access_ctl *tac) { @@ -1377,6 +1388,13 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = 0; break; } + case KVM_NMI: { + r = kvm_vcpu_ioctl_nmi(vcpu); + if (r) + goto out; + r = 0; + break; + } case KVM_SET_CPUID: { struct kvm_cpuid __user *cpuid_arg = argp; struct kvm_cpuid cpuid; @@ -2812,18 +2830,37 @@ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu, (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF)); } +/* + * Check if userspace requested a NMI window, and that the NMI window + * is open. + * + * No need to exit to userspace if we already have a NMI queued. + */ +static int dm_request_for_nmi_injection(struct kvm_vcpu *vcpu, + struct kvm_run *kvm_run) +{ + return (!vcpu->arch.nmi_pending && + kvm_run->request_nmi_window && + vcpu->arch.nmi_window_open); +} + static void post_kvm_run_save(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0; kvm_run->cr8 = kvm_get_cr8(vcpu); kvm_run->apic_base = kvm_get_apic_base(vcpu); - if (irqchip_in_kernel(vcpu->kvm)) + if (irqchip_in_kernel(vcpu->kvm)) { kvm_run->ready_for_interrupt_injection = 1; - else + kvm_run->ready_for_nmi_injection = 1; + } else { kvm_run->ready_for_interrupt_injection = (vcpu->arch.interrupt_window_open && vcpu->arch.irq_summary == 0); + kvm_run->ready_for_nmi_injection = + (vcpu->arch.nmi_window_open && + vcpu->arch.nmi_pending == 0); + } } static void vapic_enter(struct kvm_vcpu *vcpu) @@ -2999,6 +3036,11 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) } if (r > 0) { + if (dm_request_for_nmi_injection(vcpu, kvm_run)) { + r = -EINTR; + kvm_run->exit_reason = KVM_EXIT_NMI; + ++vcpu->stat.request_nmi_exits; + } if (dm_request_for_irq_injection(vcpu, kvm_run)) { r = -EINTR; kvm_run->exit_reason = KVM_EXIT_INTR; diff --git a/include/linux/kvm.h b/include/linux/kvm.h index f18b86fa8655..44fd7fa0af2b 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -83,18 +83,22 @@ struct kvm_irqchip { #define KVM_EXIT_S390_SIEIC 13 #define KVM_EXIT_S390_RESET 14 #define KVM_EXIT_DCR 15 +#define KVM_EXIT_NMI 16 +#define KVM_EXIT_NMI_WINDOW_OPEN 17 /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ struct kvm_run { /* in */ __u8 request_interrupt_window; - __u8 padding1[7]; + __u8 request_nmi_window; + __u8 padding1[6]; /* out */ __u32 exit_reason; __u8 ready_for_interrupt_injection; __u8 if_flag; - __u8 padding2[2]; + __u8 ready_for_nmi_injection; + __u8 padding2; /* in (pre_kvm_run), out (post_kvm_run) */ __u64 cr8; @@ -387,6 +391,7 @@ struct kvm_trace_rec { #define KVM_CAP_DEVICE_ASSIGNMENT 17 #endif #define KVM_CAP_IOMMU 18 +#define KVM_CAP_NMI 19 /* * ioctls for VM fds @@ -458,6 +463,8 @@ struct kvm_trace_rec { #define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97) #define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state) #define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state) +/* Available with KVM_CAP_NMI */ +#define KVM_NMI _IO(KVMIO, 0x9a) #define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02) #define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03) -- cgit v1.2.3 From 487b391d6ea9b1d0e2e0440466fb3130e78c98d9 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Fri, 26 Sep 2008 09:30:56 +0200 Subject: KVM: VMX: Provide support for user space injected NMIs This patch adds the required bits to the VMX side for user space injected NMIs. As with the preexisting in-kernel irqchip support, the CPU must provide the "virtual NMI" feature for proper tracking of the NMI blocking state. Based on the original patch by Sheng Yang. Signed-off-by: Jan Kiszka Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 38d138566617..f16a62c79267 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2360,6 +2360,7 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); + ++vcpu->stat.nmi_injections; if (vcpu->arch.rmode.active) { vmx->rmode.irq.pending = true; vmx->rmode.irq.vector = NMI_VECTOR; @@ -2428,6 +2429,30 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, { vmx_update_window_states(vcpu); + if (cpu_has_virtual_nmis()) { + if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { + if (vcpu->arch.nmi_window_open) { + vcpu->arch.nmi_pending = false; + vcpu->arch.nmi_injected = true; + } else { + enable_nmi_window(vcpu); + return; + } + } + if (vcpu->arch.nmi_injected) { + vmx_inject_nmi(vcpu); + if (vcpu->arch.nmi_pending + || kvm_run->request_nmi_window) + enable_nmi_window(vcpu); + else if (vcpu->arch.irq_summary + || kvm_run->request_interrupt_window) + enable_irq_window(vcpu); + return; + } + if (!vcpu->arch.nmi_window_open || kvm_run->request_nmi_window) + enable_nmi_window(vcpu); + } + if (vcpu->arch.interrupt_window_open) { if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending) kvm_do_inject_irq(vcpu); @@ -2959,6 +2984,14 @@ static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); ++vcpu->stat.nmi_window_exits; + /* + * If the user space waits to inject a NMI, exit as soon as possible + */ + if (kvm_run->request_nmi_window && !vcpu->arch.nmi_pending) { + kvm_run->exit_reason = KVM_EXIT_NMI_WINDOW_OPEN; + return 0; + } + return 1; } -- cgit v1.2.3 From 3b86cd9967242f3f3d775ee015fb814a349ed5e6 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Fri, 26 Sep 2008 09:30:57 +0200 Subject: KVM: VMX: work around lacking VNMI support Older VMX supporting CPUs do not provide the "Virtual NMI" feature for tracking the NMI-blocked state after injecting such events. For now KVM is unable to inject NMIs on those CPUs. Derived from Sheng Yang's suggestion to use the IRQ window notification for detecting the end of NMI handlers, this patch implements virtual NMI support without impact on the host's ability to receive real NMIs. The downside is that the given approach requires some heuristics that can cause NMI nesting in vary rare corner cases. The approach works as follows: - inject NMI and set a software-based NMI-blocked flag - arm the IRQ window start notification whenever an NMI window is requested - if the guest exits due to an opening IRQ window, clear the emulated NMI-blocked flag - if the guest net execution time with NMI-blocked but without an IRQ window exceeds 1 second, force NMI-blocked reset and inject anyway This approach covers most practical scenarios: - succeeding NMIs are seperated by at least one open IRQ window - the guest may spin with IRQs disabled (e.g. due to a bug), but leaving the NMI handler takes much less time than one second - the guest does not rely on strict ordering or timing of NMIs (would be problematic in virtualized environments anyway) Successfully tested with the 'nmi n' monitor command, the kgdbts testsuite on smp guests (additional patches required to add debug register support to kvm) + the kernel's nmi_watchdog=1, and a Siemens- specific board emulation (+ guest) that comes with its own NMI watchdog mechanism. Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 174 +++++++++++++++++++++++++++++++++++------------------ 1 file changed, 115 insertions(+), 59 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f16a62c79267..2180109d794c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -90,6 +90,11 @@ struct vcpu_vmx { } rmode; int vpid; bool emulation_required; + + /* Support for vnmi-less CPUs */ + int soft_vnmi_blocked; + ktime_t entry_time; + s64 vnmi_blocked_time; }; static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) @@ -2230,6 +2235,8 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) vmx->vcpu.arch.rmode.active = 0; + vmx->soft_vnmi_blocked = 0; + vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); kvm_set_cr8(&vmx->vcpu, 0); msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; @@ -2335,6 +2342,29 @@ out: return ret; } +static void enable_irq_window(struct kvm_vcpu *vcpu) +{ + u32 cpu_based_vm_exec_control; + + cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); + cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; + vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); +} + +static void enable_nmi_window(struct kvm_vcpu *vcpu) +{ + u32 cpu_based_vm_exec_control; + + if (!cpu_has_virtual_nmis()) { + enable_irq_window(vcpu); + return; + } + + cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); + cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING; + vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); +} + static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -2360,6 +2390,19 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); + if (!cpu_has_virtual_nmis()) { + /* + * Tracking the NMI-blocked state in software is built upon + * finding the next open IRQ window. This, in turn, depends on + * well-behaving guests: They have to keep IRQs disabled at + * least as long as the NMI handler runs. Otherwise we may + * cause NMI nesting, maybe breaking the guest. But as this is + * highly unlikely, we can live with the residual risk. + */ + vmx->soft_vnmi_blocked = 1; + vmx->vnmi_blocked_time = 0; + } + ++vcpu->stat.nmi_injections; if (vcpu->arch.rmode.active) { vmx->rmode.irq.pending = true; @@ -2384,6 +2427,8 @@ static void vmx_update_window_states(struct kvm_vcpu *vcpu) !(guest_intr & (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_NMI)); + if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked) + vcpu->arch.nmi_window_open = 0; vcpu->arch.interrupt_window_open = ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && @@ -2403,55 +2448,31 @@ static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) kvm_queue_interrupt(vcpu, irq); } -static void enable_irq_window(struct kvm_vcpu *vcpu) -{ - u32 cpu_based_vm_exec_control; - - cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); - cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); -} - -static void enable_nmi_window(struct kvm_vcpu *vcpu) -{ - u32 cpu_based_vm_exec_control; - - if (!cpu_has_virtual_nmis()) - return; - - cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); - cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING; - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); -} - static void do_interrupt_requests(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { vmx_update_window_states(vcpu); - if (cpu_has_virtual_nmis()) { - if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { - if (vcpu->arch.nmi_window_open) { - vcpu->arch.nmi_pending = false; - vcpu->arch.nmi_injected = true; - } else { - enable_nmi_window(vcpu); - return; - } - } - if (vcpu->arch.nmi_injected) { - vmx_inject_nmi(vcpu); - if (vcpu->arch.nmi_pending - || kvm_run->request_nmi_window) - enable_nmi_window(vcpu); - else if (vcpu->arch.irq_summary - || kvm_run->request_interrupt_window) - enable_irq_window(vcpu); + if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { + if (vcpu->arch.nmi_window_open) { + vcpu->arch.nmi_pending = false; + vcpu->arch.nmi_injected = true; + } else { + enable_nmi_window(vcpu); return; } - if (!vcpu->arch.nmi_window_open || kvm_run->request_nmi_window) + } + if (vcpu->arch.nmi_injected) { + vmx_inject_nmi(vcpu); + if (vcpu->arch.nmi_pending || kvm_run->request_nmi_window) enable_nmi_window(vcpu); + else if (vcpu->arch.irq_summary + || kvm_run->request_interrupt_window) + enable_irq_window(vcpu); + return; } + if (!vcpu->arch.nmi_window_open || kvm_run->request_nmi_window) + enable_nmi_window(vcpu); if (vcpu->arch.interrupt_window_open) { if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending) @@ -3097,6 +3118,37 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) printk(KERN_WARNING "%s: unexpected, valid vectoring info " "(0x%x) and exit reason is 0x%x\n", __func__, vectoring_info, exit_reason); + + if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) { + if (vcpu->arch.interrupt_window_open) { + vmx->soft_vnmi_blocked = 0; + vcpu->arch.nmi_window_open = 1; + } else if (vmx->vnmi_blocked_time > 1000000000LL && + (kvm_run->request_nmi_window || vcpu->arch.nmi_pending)) { + /* + * This CPU don't support us in finding the end of an + * NMI-blocked window if the guest runs with IRQs + * disabled. So we pull the trigger after 1 s of + * futile waiting, but inform the user about this. + */ + printk(KERN_WARNING "%s: Breaking out of NMI-blocked " + "state on VCPU %d after 1 s timeout\n", + __func__, vcpu->vcpu_id); + vmx->soft_vnmi_blocked = 0; + vmx->vcpu.arch.nmi_window_open = 1; + } + + /* + * If the user space waits to inject an NNI, exit ASAP + */ + if (vcpu->arch.nmi_window_open && kvm_run->request_nmi_window + && !vcpu->arch.nmi_pending) { + kvm_run->exit_reason = KVM_EXIT_NMI_WINDOW_OPEN; + ++vcpu->stat.nmi_window_exits; + return 0; + } + } + if (exit_reason < kvm_vmx_max_exit_handlers && kvm_vmx_exit_handlers[exit_reason]) return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run); @@ -3146,7 +3198,9 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx) if (unblock_nmi && vector != DF_VECTOR) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); - } + } else if (unlikely(vmx->soft_vnmi_blocked)) + vmx->vnmi_blocked_time += + ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time)); idt_vectoring_info = vmx->idt_vectoring_info; idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; @@ -3186,27 +3240,25 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) vmx_update_window_states(vcpu); - if (cpu_has_virtual_nmis()) { - if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { - if (vcpu->arch.interrupt.pending) { - enable_nmi_window(vcpu); - } else if (vcpu->arch.nmi_window_open) { - vcpu->arch.nmi_pending = false; - vcpu->arch.nmi_injected = true; - } else { - enable_nmi_window(vcpu); - return; - } - } - if (vcpu->arch.nmi_injected) { - vmx_inject_nmi(vcpu); - if (vcpu->arch.nmi_pending) - enable_nmi_window(vcpu); - else if (kvm_cpu_has_interrupt(vcpu)) - enable_irq_window(vcpu); + if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { + if (vcpu->arch.interrupt.pending) { + enable_nmi_window(vcpu); + } else if (vcpu->arch.nmi_window_open) { + vcpu->arch.nmi_pending = false; + vcpu->arch.nmi_injected = true; + } else { + enable_nmi_window(vcpu); return; } } + if (vcpu->arch.nmi_injected) { + vmx_inject_nmi(vcpu); + if (vcpu->arch.nmi_pending) + enable_nmi_window(vcpu); + else if (kvm_cpu_has_interrupt(vcpu)) + enable_irq_window(vcpu); + return; + } if (!vcpu->arch.interrupt.pending && kvm_cpu_has_interrupt(vcpu)) { if (vcpu->arch.interrupt_window_open) kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); @@ -3255,6 +3307,10 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) struct vcpu_vmx *vmx = to_vmx(vcpu); u32 intr_info; + /* Record the guest's net vcpu time for enforced NMI injections. */ + if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) + vmx->entry_time = ktime_get(); + /* Handle invalid guest state instead of entering VMX */ if (vmx->emulation_required && emulate_invalid_guest_state) { handle_invalid_guest_state(vcpu, kvm_run); -- cgit v1.2.3 From 5f179287fa02723215eecf681d812b303c243973 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Tue, 7 Oct 2008 15:42:33 +0200 Subject: KVM: call kvm_arch_vcpu_reset() instead of the kvm_x86_ops callback Call kvm_arch_vcpu_reset() instead of directly using arch callback. The function does additional things. Signed-off-by: Gleb Natapov Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 07971451b947..a2c4b5594555 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3010,7 +3010,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) pr_debug("vcpu %d received sipi with vector # %x\n", vcpu->vcpu_id, vcpu->arch.sipi_vector); kvm_lapic_reset(vcpu); - r = kvm_x86_ops->vcpu_reset(vcpu); + r = kvm_arch_vcpu_reset(vcpu); if (r) return r; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; -- cgit v1.2.3 From b558bc0a25c82ef2a9d2683b0beb3e4b87cea20b Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Thu, 9 Oct 2008 16:01:52 +0800 Subject: x86: Rename mtrr_state struct and macro names Prepare for exporting them. Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/kernel/cpu/mtrr/generic.c | 8 ++++---- arch/x86/kernel/cpu/mtrr/main.c | 4 ++-- arch/x86/kernel/cpu/mtrr/mtrr.h | 7 ++++--- 3 files changed, 10 insertions(+), 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 4e8d77f01eeb..90db91e15931 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -14,9 +14,9 @@ #include #include "mtrr.h" -struct mtrr_state { - struct mtrr_var_range var_ranges[MAX_VAR_RANGES]; - mtrr_type fixed_ranges[NUM_FIXED_RANGES]; +struct mtrr_state_type { + struct mtrr_var_range var_ranges[MTRR_MAX_VAR_RANGES]; + mtrr_type fixed_ranges[MTRR_NUM_FIXED_RANGES]; unsigned char enabled; unsigned char have_fixed; mtrr_type def_type; @@ -35,7 +35,7 @@ static struct fixed_range_block fixed_range_blocks[] = { }; static unsigned long smp_changes_mask; -static struct mtrr_state mtrr_state = {}; +static struct mtrr_state_type mtrr_state = {}; static int mtrr_state_set; u64 mtrr_tom2; diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 1159e269e596..d6ec7ec30274 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -49,7 +49,7 @@ u32 num_var_ranges = 0; -unsigned int mtrr_usage_table[MAX_VAR_RANGES]; +unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; static DEFINE_MUTEX(mtrr_mutex); u64 size_or_mask, size_and_mask; @@ -574,7 +574,7 @@ struct mtrr_value { unsigned long lsize; }; -static struct mtrr_value mtrr_state[MAX_VAR_RANGES]; +static struct mtrr_value mtrr_state[MTRR_MAX_VAR_RANGES]; static int mtrr_save(struct sys_device * sysdev, pm_message_t state) { diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h index 2dc4ec656b23..988538202ddc 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h @@ -11,8 +11,9 @@ #define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg)) #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1) -#define NUM_FIXED_RANGES 88 -#define MAX_VAR_RANGES 256 +#define MTRR_NUM_FIXED_RANGES 88 +#define MTRR_MAX_VAR_RANGES 256 + #define MTRRfix64K_00000_MSR 0x250 #define MTRRfix16K_80000_MSR 0x258 #define MTRRfix16K_A0000_MSR 0x259 @@ -33,7 +34,7 @@ an 8 bit field: */ typedef u8 mtrr_type; -extern unsigned int mtrr_usage_table[MAX_VAR_RANGES]; +extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; struct mtrr_ops { u32 vendor; -- cgit v1.2.3 From 932d27a7913fc6b3c64c6e6082628b0a1561dec9 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Thu, 9 Oct 2008 16:01:53 +0800 Subject: x86: Export some definition of MTRR For KVM can reuse the type define, and need them to support shadow MTRR. Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/include/asm/mtrr.h | 25 +++++++++++++++++++++++++ arch/x86/kernel/cpu/mtrr/generic.c | 12 +++--------- arch/x86/kernel/cpu/mtrr/mtrr.h | 17 ----------------- 3 files changed, 28 insertions(+), 26 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h index 7c1e4258b31e..cb988aab716d 100644 --- a/arch/x86/include/asm/mtrr.h +++ b/arch/x86/include/asm/mtrr.h @@ -57,6 +57,31 @@ struct mtrr_gentry { }; #endif /* !__i386__ */ +struct mtrr_var_range { + u32 base_lo; + u32 base_hi; + u32 mask_lo; + u32 mask_hi; +}; + +/* In the Intel processor's MTRR interface, the MTRR type is always held in + an 8 bit field: */ +typedef u8 mtrr_type; + +#define MTRR_NUM_FIXED_RANGES 88 +#define MTRR_MAX_VAR_RANGES 256 + +struct mtrr_state_type { + struct mtrr_var_range var_ranges[MTRR_MAX_VAR_RANGES]; + mtrr_type fixed_ranges[MTRR_NUM_FIXED_RANGES]; + unsigned char enabled; + unsigned char have_fixed; + mtrr_type def_type; +}; + +#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg)) +#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1) + /* These are the various ioctls */ #define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry) #define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry) diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 90db91e15931..b59ddcc88cd8 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -14,14 +14,6 @@ #include #include "mtrr.h" -struct mtrr_state_type { - struct mtrr_var_range var_ranges[MTRR_MAX_VAR_RANGES]; - mtrr_type fixed_ranges[MTRR_NUM_FIXED_RANGES]; - unsigned char enabled; - unsigned char have_fixed; - mtrr_type def_type; -}; - struct fixed_range_block { int base_msr; /* start address of an MTRR block */ int ranges; /* number of MTRRs in this block */ @@ -35,10 +27,12 @@ static struct fixed_range_block fixed_range_blocks[] = { }; static unsigned long smp_changes_mask; -static struct mtrr_state_type mtrr_state = {}; static int mtrr_state_set; u64 mtrr_tom2; +struct mtrr_state_type mtrr_state = {}; +EXPORT_SYMBOL_GPL(mtrr_state); + #undef MODULE_PARAM_PREFIX #define MODULE_PARAM_PREFIX "mtrr." diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h index 988538202ddc..ffd60409cc6d 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h @@ -8,12 +8,6 @@ #define MTRRcap_MSR 0x0fe #define MTRRdefType_MSR 0x2ff -#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg)) -#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1) - -#define MTRR_NUM_FIXED_RANGES 88 -#define MTRR_MAX_VAR_RANGES 256 - #define MTRRfix64K_00000_MSR 0x250 #define MTRRfix16K_80000_MSR 0x258 #define MTRRfix16K_A0000_MSR 0x259 @@ -30,10 +24,6 @@ #define MTRR_CHANGE_MASK_VARIABLE 0x02 #define MTRR_CHANGE_MASK_DEFTYPE 0x04 -/* In the Intel processor's MTRR interface, the MTRR type is always held in - an 8 bit field: */ -typedef u8 mtrr_type; - extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; struct mtrr_ops { @@ -71,13 +61,6 @@ struct set_mtrr_context { u32 ccr3; }; -struct mtrr_var_range { - u32 base_lo; - u32 base_hi; - u32 mask_lo; - u32 mask_hi; -}; - void set_mtrr_done(struct set_mtrr_context *ctxt); void set_mtrr_cache_disable(struct set_mtrr_context *ctxt); void set_mtrr_prepare_save(struct set_mtrr_context *ctxt); -- cgit v1.2.3 From 0bed3b568b68e5835ef5da888a372b9beabf7544 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Thu, 9 Oct 2008 16:01:54 +0800 Subject: KVM: Improve MTRR structure As well as reset mmu context when set MTRR. Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 5 +++- arch/x86/kvm/x86.c | 61 +++++++++++++++++++++++++++++++++++++++-- 2 files changed, 63 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a40fa8478920..8082e87f628d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -21,6 +21,7 @@ #include #include +#include #define KVM_MAX_VCPUS 16 #define KVM_MEMORY_SLOTS 32 @@ -86,6 +87,7 @@ #define KVM_MIN_FREE_MMU_PAGES 5 #define KVM_REFILL_PAGES 25 #define KVM_MAX_CPUID_ENTRIES 40 +#define KVM_NR_FIXED_MTRR_REGION 88 #define KVM_NR_VAR_MTRR 8 extern spinlock_t kvm_lock; @@ -329,7 +331,8 @@ struct kvm_vcpu_arch { bool nmi_injected; bool nmi_window_open; - u64 mtrr[0x100]; + struct mtrr_state_type mtrr_state; + u32 pat; }; struct kvm_mem_alias { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a2c4b5594555..f5b2334c6bda 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -39,6 +39,7 @@ #include #include #include +#include #define MAX_IO_MSRS 256 #define CR0_RESERVED_BITS \ @@ -650,10 +651,38 @@ static bool msr_mtrr_valid(unsigned msr) static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) { + u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; + if (!msr_mtrr_valid(msr)) return 1; - vcpu->arch.mtrr[msr - 0x200] = data; + if (msr == MSR_MTRRdefType) { + vcpu->arch.mtrr_state.def_type = data; + vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10; + } else if (msr == MSR_MTRRfix64K_00000) + p[0] = data; + else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) + p[1 + msr - MSR_MTRRfix16K_80000] = data; + else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) + p[3 + msr - MSR_MTRRfix4K_C0000] = data; + else if (msr == MSR_IA32_CR_PAT) + vcpu->arch.pat = data; + else { /* Variable MTRRs */ + int idx, is_mtrr_mask; + u64 *pt; + + idx = (msr - 0x200) / 2; + is_mtrr_mask = msr - 0x200 - 2 * idx; + if (!is_mtrr_mask) + pt = + (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; + else + pt = + (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; + *pt = data; + } + + kvm_mmu_reset_context(vcpu); return 0; } @@ -749,10 +778,37 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { + u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; + if (!msr_mtrr_valid(msr)) return 1; - *pdata = vcpu->arch.mtrr[msr - 0x200]; + if (msr == MSR_MTRRdefType) + *pdata = vcpu->arch.mtrr_state.def_type + + (vcpu->arch.mtrr_state.enabled << 10); + else if (msr == MSR_MTRRfix64K_00000) + *pdata = p[0]; + else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) + *pdata = p[1 + msr - MSR_MTRRfix16K_80000]; + else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) + *pdata = p[3 + msr - MSR_MTRRfix4K_C0000]; + else if (msr == MSR_IA32_CR_PAT) + *pdata = vcpu->arch.pat; + else { /* Variable MTRRs */ + int idx, is_mtrr_mask; + u64 *pt; + + idx = (msr - 0x200) / 2; + is_mtrr_mask = msr - 0x200 - 2 * idx; + if (!is_mtrr_mask) + pt = + (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; + else + pt = + (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; + *pdata = *pt; + } + return 0; } @@ -3942,6 +3998,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) /* We do fxsave: this must be aligned. */ BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF); + vcpu->arch.mtrr_state.have_fixed = 1; vcpu_load(vcpu); r = kvm_arch_vcpu_reset(vcpu); if (r == 0) -- cgit v1.2.3 From 468d472f3f65100d5fb88c8d45043c85b874c294 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Thu, 9 Oct 2008 16:01:55 +0800 Subject: KVM: VMX: Add PAT support for EPT GUEST_PAT support is a new feature introduced by Intel Core i7 architecture. With this, cpu would save/load guest and host PAT automatically, for EPT memory type in guest depends on MSR_IA32_CR_PAT. Also add save/restore for MSR_IA32_CR_PAT. Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 29 ++++++++++++++++++++++++++--- arch/x86/kvm/vmx.h | 7 +++++++ arch/x86/kvm/x86.c | 2 +- 3 files changed, 34 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 2180109d794c..b4c95a501cca 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -962,6 +962,13 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", msr_index, data); break; + case MSR_IA32_CR_PAT: + if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { + vmcs_write64(GUEST_IA32_PAT, data); + vcpu->arch.pat = data; + break; + } + /* Otherwise falls through to kvm_set_msr_common */ default: vmx_load_host_state(vmx); msr = find_msr_entry(vmx, msr_index); @@ -1181,12 +1188,13 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) #ifdef CONFIG_X86_64 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; #endif - opt = 0; + opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, &_vmexit_control) < 0) return -EIO; - min = opt = 0; + min = 0; + opt = VM_ENTRY_LOAD_IA32_PAT; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, &_vmentry_control) < 0) return -EIO; @@ -2092,8 +2100,9 @@ static void vmx_disable_intercept_for_msr(struct page *msr_bitmap, u32 msr) */ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) { - u32 host_sysenter_cs; + u32 host_sysenter_cs, msr_low, msr_high; u32 junk; + u64 host_pat; unsigned long a; struct descriptor_table dt; int i; @@ -2181,6 +2190,20 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) rdmsrl(MSR_IA32_SYSENTER_EIP, a); vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */ + if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { + rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high); + host_pat = msr_low | ((u64) msr_high << 32); + vmcs_write64(HOST_IA32_PAT, host_pat); + } + if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { + rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high); + host_pat = msr_low | ((u64) msr_high << 32); + /* Write the default value follow host pat */ + vmcs_write64(GUEST_IA32_PAT, host_pat); + /* Keep arch.pat sync with GUEST_IA32_PAT */ + vmx->vcpu.arch.pat = host_pat; + } + for (i = 0; i < NR_VMX_MSR; ++i) { u32 index = vmx_msr_index[i]; u32 data_low, data_high; diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h index ec5edc339da6..18598afe52eb 100644 --- a/arch/x86/kvm/vmx.h +++ b/arch/x86/kvm/vmx.h @@ -63,10 +63,13 @@ #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 +#define VM_EXIT_SAVE_IA32_PAT 0x00040000 +#define VM_EXIT_LOAD_IA32_PAT 0x00080000 #define VM_ENTRY_IA32E_MODE 0x00000200 #define VM_ENTRY_SMM 0x00000400 #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 +#define VM_ENTRY_LOAD_IA32_PAT 0x00004000 /* VMCS Encodings */ enum vmcs_field { @@ -112,6 +115,8 @@ enum vmcs_field { VMCS_LINK_POINTER_HIGH = 0x00002801, GUEST_IA32_DEBUGCTL = 0x00002802, GUEST_IA32_DEBUGCTL_HIGH = 0x00002803, + GUEST_IA32_PAT = 0x00002804, + GUEST_IA32_PAT_HIGH = 0x00002805, GUEST_PDPTR0 = 0x0000280a, GUEST_PDPTR0_HIGH = 0x0000280b, GUEST_PDPTR1 = 0x0000280c, @@ -120,6 +125,8 @@ enum vmcs_field { GUEST_PDPTR2_HIGH = 0x0000280f, GUEST_PDPTR3 = 0x00002810, GUEST_PDPTR3_HIGH = 0x00002811, + HOST_IA32_PAT = 0x00002c00, + HOST_IA32_PAT_HIGH = 0x00002c01, PIN_BASED_VM_EXEC_CONTROL = 0x00004000, CPU_BASED_VM_EXEC_CONTROL = 0x00004002, EXCEPTION_BITMAP = 0x00004004, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f5b2334c6bda..0edf75339f3a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -452,7 +452,7 @@ static u32 msrs_to_save[] = { MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, #endif MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, - MSR_IA32_PERF_STATUS, + MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT }; static unsigned num_msrs_to_save; -- cgit v1.2.3 From 74be52e3e6285fc6e872a2a7baea544106f399ea Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Thu, 9 Oct 2008 16:01:56 +0800 Subject: KVM: Add local get_mtrr_type() to support MTRR For EPT memory type support. Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 104 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 410ddbc1aa2e..ac2304fd173e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1393,6 +1393,110 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) return page; } +/* + * The function is based on mtrr_type_lookup() in + * arch/x86/kernel/cpu/mtrr/generic.c + */ +static int get_mtrr_type(struct mtrr_state_type *mtrr_state, + u64 start, u64 end) +{ + int i; + u64 base, mask; + u8 prev_match, curr_match; + int num_var_ranges = KVM_NR_VAR_MTRR; + + if (!mtrr_state->enabled) + return 0xFF; + + /* Make end inclusive end, instead of exclusive */ + end--; + + /* Look in fixed ranges. Just return the type as per start */ + if (mtrr_state->have_fixed && (start < 0x100000)) { + int idx; + + if (start < 0x80000) { + idx = 0; + idx += (start >> 16); + return mtrr_state->fixed_ranges[idx]; + } else if (start < 0xC0000) { + idx = 1 * 8; + idx += ((start - 0x80000) >> 14); + return mtrr_state->fixed_ranges[idx]; + } else if (start < 0x1000000) { + idx = 3 * 8; + idx += ((start - 0xC0000) >> 12); + return mtrr_state->fixed_ranges[idx]; + } + } + + /* + * Look in variable ranges + * Look of multiple ranges matching this address and pick type + * as per MTRR precedence + */ + if (!(mtrr_state->enabled & 2)) + return mtrr_state->def_type; + + prev_match = 0xFF; + for (i = 0; i < num_var_ranges; ++i) { + unsigned short start_state, end_state; + + if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11))) + continue; + + base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) + + (mtrr_state->var_ranges[i].base_lo & PAGE_MASK); + mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) + + (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK); + + start_state = ((start & mask) == (base & mask)); + end_state = ((end & mask) == (base & mask)); + if (start_state != end_state) + return 0xFE; + + if ((start & mask) != (base & mask)) + continue; + + curr_match = mtrr_state->var_ranges[i].base_lo & 0xff; + if (prev_match == 0xFF) { + prev_match = curr_match; + continue; + } + + if (prev_match == MTRR_TYPE_UNCACHABLE || + curr_match == MTRR_TYPE_UNCACHABLE) + return MTRR_TYPE_UNCACHABLE; + + if ((prev_match == MTRR_TYPE_WRBACK && + curr_match == MTRR_TYPE_WRTHROUGH) || + (prev_match == MTRR_TYPE_WRTHROUGH && + curr_match == MTRR_TYPE_WRBACK)) { + prev_match = MTRR_TYPE_WRTHROUGH; + curr_match = MTRR_TYPE_WRTHROUGH; + } + + if (prev_match != curr_match) + return MTRR_TYPE_UNCACHABLE; + } + + if (prev_match != 0xFF) + return prev_match; + + return mtrr_state->def_type; +} + +static u8 get_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) +{ + u8 mtrr; + + mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT, + (gfn << PAGE_SHIFT) + PAGE_SIZE); + if (mtrr == 0xfe || mtrr == 0xff) + mtrr = MTRR_TYPE_WRBACK; + return mtrr; +} + static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { unsigned index; -- cgit v1.2.3 From 64d4d521757117aa5c1cfe79d3baa6cf57703f81 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Thu, 9 Oct 2008 16:01:57 +0800 Subject: KVM: Enable MTRR for EPT The effective memory type of EPT is the mixture of MSR_IA32_CR_PAT and memory type field of EPT entry. Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 3 ++- arch/x86/kvm/mmu.c | 11 ++++++++++- arch/x86/kvm/svm.c | 6 ++++++ arch/x86/kvm/vmx.c | 10 ++++++++-- arch/x86/kvm/x86.c | 2 +- 5 files changed, 27 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 8082e87f628d..93040b5eed96 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -483,6 +483,7 @@ struct kvm_x86_ops { int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); int (*get_tdp_level)(void); + int (*get_mt_mask_shift)(void); }; extern struct kvm_x86_ops *kvm_x86_ops; @@ -496,7 +497,7 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu); void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); void kvm_mmu_set_base_ptes(u64 base_pte); void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, - u64 dirty_mask, u64 nx_mask, u64 x_mask); + u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask); int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index ac2304fd173e..09d05f57bf66 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -168,6 +168,7 @@ static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ static u64 __read_mostly shadow_user_mask; static u64 __read_mostly shadow_accessed_mask; static u64 __read_mostly shadow_dirty_mask; +static u64 __read_mostly shadow_mt_mask; void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) { @@ -183,13 +184,14 @@ void kvm_mmu_set_base_ptes(u64 base_pte) EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes); void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, - u64 dirty_mask, u64 nx_mask, u64 x_mask) + u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask) { shadow_user_mask = user_mask; shadow_accessed_mask = accessed_mask; shadow_dirty_mask = dirty_mask; shadow_nx_mask = nx_mask; shadow_x_mask = x_mask; + shadow_mt_mask = mt_mask; } EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); @@ -1546,6 +1548,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, { u64 spte; int ret = 0; + u64 mt_mask = shadow_mt_mask; + /* * We don't set the accessed bit, since we sometimes want to see * whether the guest actually used the pte (in order to detect @@ -1564,6 +1568,11 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, spte |= shadow_user_mask; if (largepage) spte |= PT_PAGE_SIZE_MASK; + if (mt_mask) { + mt_mask = get_memory_type(vcpu, gfn) << + kvm_x86_ops->get_mt_mask_shift(); + spte |= mt_mask; + } spte |= (u64)pfn << PAGE_SHIFT; diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 9c4ce657d963..05efc4ef75a6 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1912,6 +1912,11 @@ static int get_npt_level(void) #endif } +static int svm_get_mt_mask_shift(void) +{ + return 0; +} + static struct kvm_x86_ops svm_x86_ops = { .cpu_has_kvm_support = has_svm, .disabled_by_bios = is_disabled, @@ -1967,6 +1972,7 @@ static struct kvm_x86_ops svm_x86_ops = { .set_tss_addr = svm_set_tss_addr, .get_tdp_level = get_npt_level, + .get_mt_mask_shift = svm_get_mt_mask_shift, }; static int __init svm_init(void) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b4c95a501cca..dae134fa09e7 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3574,6 +3574,11 @@ static int get_ept_level(void) return VMX_EPT_DEFAULT_GAW + 1; } +static int vmx_get_mt_mask_shift(void) +{ + return VMX_EPT_MT_EPTE_SHIFT; +} + static struct kvm_x86_ops vmx_x86_ops = { .cpu_has_kvm_support = cpu_has_kvm_support, .disabled_by_bios = vmx_disabled_by_bios, @@ -3629,6 +3634,7 @@ static struct kvm_x86_ops vmx_x86_ops = { .set_tss_addr = vmx_set_tss_addr, .get_tdp_level = get_ept_level, + .get_mt_mask_shift = vmx_get_mt_mask_shift, }; static int __init vmx_init(void) @@ -3685,10 +3691,10 @@ static int __init vmx_init(void) bypass_guest_pf = 0; kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | VMX_EPT_WRITABLE_MASK | - VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT | VMX_EPT_IGMT_BIT); kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, - VMX_EPT_EXECUTABLE_MASK); + VMX_EPT_EXECUTABLE_MASK, + VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); kvm_enable_tdp(); } else kvm_disable_tdp(); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0edf75339f3a..f175b796c2a6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2615,7 +2615,7 @@ int kvm_arch_init(void *opaque) kvm_mmu_set_nonpresent_ptes(0ull, 0ull); kvm_mmu_set_base_ptes(PT_PRESENT_MASK); kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, - PT_DIRTY_MASK, PT64_NX_MASK, 0); + PT_DIRTY_MASK, PT64_NX_MASK, 0, 0); return 0; out: -- cgit v1.2.3 From d73fa29a9b75b2af7f69dae276d2c602a23b329b Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Tue, 14 Oct 2008 15:59:10 +0800 Subject: KVM: Clean up kvm_x86_emulate.h Remove one left improper comment of removed CR2. Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_x86_emulate.h | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_x86_emulate.h b/arch/x86/include/asm/kvm_x86_emulate.h index 25179a29f208..16a002655f31 100644 --- a/arch/x86/include/asm/kvm_x86_emulate.h +++ b/arch/x86/include/asm/kvm_x86_emulate.h @@ -146,22 +146,18 @@ struct x86_emulate_ctxt { /* Register state before/after emulation. */ struct kvm_vcpu *vcpu; - /* Linear faulting address (if emulating a page-faulting instruction) */ unsigned long eflags; - /* Emulated execution mode, represented by an X86EMUL_MODE value. */ int mode; - u32 cs_base; /* decode cache */ - struct decode_cache decode; }; /* Repeat String Operation Prefix */ -#define REPE_PREFIX 1 -#define REPNE_PREFIX 2 +#define REPE_PREFIX 1 +#define REPNE_PREFIX 2 /* Execution mode, passed to the emulator. */ #define X86EMUL_MODE_REAL 0 /* Real mode. */ @@ -170,7 +166,7 @@ struct x86_emulate_ctxt { #define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ /* Host execution mode. */ -#if defined(__i386__) +#if defined(CONFIG_X86_32) #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32 #elif defined(CONFIG_X86_64) #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64 -- cgit v1.2.3 From 291f26bc0f89518ad7ee3207c09eb8a743ac8fcc Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Thu, 16 Oct 2008 17:30:57 +0800 Subject: KVM: MMU: Extend kvm_mmu_page->slot_bitmap size Otherwise set_bit() for private memory slot(above KVM_MEMORY_SLOTS) would corrupted memory in 32bit host. Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 8 +++++--- arch/x86/kvm/mmu.c | 6 +++--- 2 files changed, 8 insertions(+), 6 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 93040b5eed96..59c3ae10de6c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -192,9 +192,11 @@ struct kvm_mmu_page { u64 *spt; /* hold the gfn of each spte inside spt */ gfn_t *gfns; - unsigned long slot_bitmap; /* One bit set per slot which has memory - * in this shadow page. - */ + /* + * One bit set per slot which has memory + * in this shadow page. + */ + DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); int multimapped; /* More than one parent_pte? */ int root_count; /* Currently serving as active root */ bool unsync; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 09d05f57bf66..8687758b5295 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -789,7 +789,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, set_page_private(virt_to_page(sp->spt), (unsigned long)sp); list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); ASSERT(is_empty_shadow_page(sp->spt)); - sp->slot_bitmap = 0; + bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); sp->multimapped = 0; sp->parent_pte = parent_pte; --vcpu->kvm->arch.n_free_mmu_pages; @@ -1364,7 +1364,7 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn)); struct kvm_mmu_page *sp = page_header(__pa(pte)); - __set_bit(slot, &sp->slot_bitmap); + __set_bit(slot, sp->slot_bitmap); } static void mmu_convert_notrap(struct kvm_mmu_page *sp) @@ -2564,7 +2564,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) int i; u64 *pt; - if (!test_bit(slot, &sp->slot_bitmap)) + if (!test_bit(slot, sp->slot_bitmap)) continue; pt = sp->spt; -- cgit v1.2.3 From 6fe639792c7b8e462baeaac39ecc33541fd5da6e Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Thu, 16 Oct 2008 17:30:58 +0800 Subject: KVM: VMX: Move private memory slot position PCI device assignment would map guest MMIO spaces as separate slot, so it is possible that the device has more than 2 MMIO spaces and overwrite current private memslot. The patch move private memory slot to the top of userspace visible memory slots. Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 2 +- arch/x86/kvm/vmx.h | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index dae134fa09e7..7623eb7b68d5 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2513,7 +2513,7 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) { int ret; struct kvm_userspace_memory_region tss_mem = { - .slot = 8, + .slot = TSS_PRIVATE_MEMSLOT, .guest_phys_addr = addr, .memory_size = PAGE_SIZE * 3, .flags = 0, diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h index 18598afe52eb..3db236c26fa4 100644 --- a/arch/x86/kvm/vmx.h +++ b/arch/x86/kvm/vmx.h @@ -338,8 +338,9 @@ enum vmcs_field { #define AR_RESERVD_MASK 0xfffe0f00 -#define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT 9 -#define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT 10 +#define TSS_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 0) +#define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 1) +#define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 2) #define VMX_NR_VPIDS (1 << 16) #define VMX_VPID_EXTENT_SINGLE_CONTEXT 1 -- cgit v1.2.3 From 291fd39bfc2089c2dae79cf2d7cfca81b14ca769 Mon Sep 17 00:00:00 2001 From: Guillaume Thouvenin Date: Mon, 20 Oct 2008 13:11:58 +0200 Subject: KVM: x86 emulator: Add decode entries for 0x04 and 0x05 opcodes (add acc, imm) Add decode entries for 0x04 and 0x05 (ADD) opcodes, execution is already implemented. Signed-off-by: Guillaume Thouvenin Signed-off-by: Avi Kivity --- arch/x86/kvm/x86_emulate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index a391e213fe61..57d7cc45be44 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -80,7 +80,7 @@ static u16 opcode_table[256] = { /* 0x00 - 0x07 */ ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, - 0, 0, 0, 0, + ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, 0, 0, /* 0x08 - 0x0F */ ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, -- cgit v1.2.3 From 8fdb2351d51b040146f10a624387bbd102d851c0 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Mon, 20 Oct 2008 10:20:02 +0200 Subject: KVM: x86: Fix and refactor NMI watchdog emulation This patch refactors the NMI watchdog delivery patch, consolidating tests and providing a proper API for delivering watchdog events. An included micro-optimization is to check only for apic_hw_enabled in kvm_apic_local_deliver (the test for LVT mask is covering the soft-disabled case already). Signed-off-by: Jan Kiszka Acked-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/kvm/i8254.c | 15 +++++++++------ arch/x86/kvm/irq.h | 2 +- arch/x86/kvm/lapic.c | 20 ++++++++++---------- 3 files changed, 20 insertions(+), 17 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 580cc1d01c7d..b6fcf5a9e502 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -612,15 +612,18 @@ static void __inject_pit_timer_intr(struct kvm *kvm) mutex_unlock(&kvm->lock); /* - * Provides NMI watchdog support in IOAPIC mode. - * The route is: PIT -> PIC -> LVT0 in NMI mode, - * timer IRQs will continue to flow through the IOAPIC. + * Provides NMI watchdog support via Virtual Wire mode. + * The route is: PIT -> PIC -> LVT0 in NMI mode. + * + * Note: Our Virtual Wire implementation is simplified, only + * propagating PIT interrupts to all VCPUs when they have set + * LVT0 to NMI delivery. Other PIC interrupts are just sent to + * VCPU0, and only if its LVT0 is in EXTINT mode. */ for (i = 0; i < KVM_MAX_VCPUS; ++i) { vcpu = kvm->vcpus[i]; - if (!vcpu) - continue; - kvm_apic_local_deliver(vcpu, APIC_LVT0); + if (vcpu) + kvm_apic_nmi_wd_deliver(vcpu); } } diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index 71e37a530cf7..b9e9051650ea 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h @@ -87,7 +87,7 @@ void kvm_pic_reset(struct kvm_kpic_state *s); void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec); void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu); void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu); -int kvm_apic_local_deliver(struct kvm_vcpu *vcpu, int lvt_type); +void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu); void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu); void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu); void __kvm_migrate_timers(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 304f9ddbdd51..0b0d413f0af5 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -973,14 +973,12 @@ int apic_has_pending_timer(struct kvm_vcpu *vcpu) return 0; } -int kvm_apic_local_deliver(struct kvm_vcpu *vcpu, int lvt_type) +static int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type) { - struct kvm_lapic *apic = vcpu->arch.apic; + u32 reg = apic_get_reg(apic, lvt_type); int vector, mode, trig_mode; - u32 reg; - if (apic && apic_enabled(apic)) { - reg = apic_get_reg(apic, lvt_type); + if (apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) { vector = reg & APIC_VECTOR_MASK; mode = reg & APIC_MODE_MASK; trig_mode = reg & APIC_LVT_LEVEL_TRIGGER; @@ -989,9 +987,12 @@ int kvm_apic_local_deliver(struct kvm_vcpu *vcpu, int lvt_type) return 0; } -static inline int __inject_apic_timer_irq(struct kvm_lapic *apic) +void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu) { - return kvm_apic_local_deliver(apic->vcpu, APIC_LVTT); + struct kvm_lapic *apic = vcpu->arch.apic; + + if (apic) + kvm_apic_local_deliver(apic, APIC_LVT0); } static enum hrtimer_restart apic_timer_fn(struct hrtimer *data) @@ -1086,9 +1087,8 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; - if (apic && apic_lvt_enabled(apic, APIC_LVTT) && - atomic_read(&apic->timer.pending) > 0) { - if (__inject_apic_timer_irq(apic)) + if (apic && atomic_read(&apic->timer.pending) > 0) { + if (kvm_apic_local_deliver(apic, APIC_LVTT)) atomic_dec(&apic->timer.pending); } } -- cgit v1.2.3 From cc6e462cd54e64858ea25816df87d033229efe56 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Mon, 20 Oct 2008 10:20:03 +0200 Subject: KVM: x86: Optimize NMI watchdog delivery As suggested by Avi, this patch introduces a counter of VCPUs that have LVT0 set to NMI mode. Only if the counter > 0, we push the PIT ticks via all LAPIC LVT0 lines to enable NMI watchdog support. Signed-off-by: Jan Kiszka Acked-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/i8254.c | 11 ++++++----- arch/x86/kvm/lapic.c | 23 ++++++++++++++++++++--- 3 files changed, 27 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 59c3ae10de6c..09e6c56572cb 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -361,6 +361,7 @@ struct kvm_arch{ struct kvm_ioapic *vioapic; struct kvm_pit *vpit; struct hlist_head irq_ack_notifier_list; + int vapics_in_nmi_mode; int round_robin_prev_vcpu; unsigned int tss_addr; diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index b6fcf5a9e502..e665d1c623ca 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -620,11 +620,12 @@ static void __inject_pit_timer_intr(struct kvm *kvm) * LVT0 to NMI delivery. Other PIC interrupts are just sent to * VCPU0, and only if its LVT0 is in EXTINT mode. */ - for (i = 0; i < KVM_MAX_VCPUS; ++i) { - vcpu = kvm->vcpus[i]; - if (vcpu) - kvm_apic_nmi_wd_deliver(vcpu); - } + if (kvm->arch.vapics_in_nmi_mode > 0) + for (i = 0; i < KVM_MAX_VCPUS; ++i) { + vcpu = kvm->vcpus[i]; + if (vcpu) + kvm_apic_nmi_wd_deliver(vcpu); + } } void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 0b0d413f0af5..afac68c0815c 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -130,6 +130,11 @@ static inline int apic_lvtt_period(struct kvm_lapic *apic) return apic_get_reg(apic, APIC_LVTT) & APIC_LVT_TIMER_PERIODIC; } +static inline int apic_lvt_nmi_mode(u32 lvt_val) +{ + return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI; +} + static unsigned int apic_lvt_mask[APIC_LVT_NUM] = { LVT_MASK | APIC_LVT_TIMER_PERIODIC, /* LVTT */ LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */ @@ -672,6 +677,20 @@ static void start_apic_timer(struct kvm_lapic *apic) apic->timer.period))); } +static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) +{ + int nmi_wd_enabled = apic_lvt_nmi_mode(apic_get_reg(apic, APIC_LVT0)); + + if (apic_lvt_nmi_mode(lvt0_val)) { + if (!nmi_wd_enabled) { + apic_debug("Receive NMI setting on APIC_LVT0 " + "for cpu %d\n", apic->vcpu->vcpu_id); + apic->vcpu->kvm->arch.vapics_in_nmi_mode++; + } + } else if (nmi_wd_enabled) + apic->vcpu->kvm->arch.vapics_in_nmi_mode--; +} + static void apic_mmio_write(struct kvm_io_device *this, gpa_t address, int len, const void *data) { @@ -753,9 +772,7 @@ static void apic_mmio_write(struct kvm_io_device *this, break; case APIC_LVT0: - if (val == APIC_DM_NMI) - apic_debug("Receive NMI setting on APIC_LVT0 " - "for cpu %d\n", apic->vcpu->vcpu_id); + apic_manage_nmi_watchdog(apic, val); case APIC_LVTT: case APIC_LVTTHMR: case APIC_LVTPC: -- cgit v1.2.3 From b8222ad2e52fd2c0c4e5e1c53e65d131f911b767 Mon Sep 17 00:00:00 2001 From: Amit Shah Date: Wed, 22 Oct 2008 16:39:47 +0530 Subject: KVM: x86: Fix typo in function name get_segment_descritptor_dtable() contains an obvious type. Signed-off-by: Amit Shah Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f175b796c2a6..ceeac8897143 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3373,9 +3373,9 @@ static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector, kvm_desct->padding = 0; } -static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu, - u16 selector, - struct descriptor_table *dtable) +static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu, + u16 selector, + struct descriptor_table *dtable) { if (selector & 1 << 2) { struct kvm_segment kvm_seg; @@ -3400,7 +3400,7 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, struct descriptor_table dtable; u16 index = selector >> 3; - get_segment_descritptor_dtable(vcpu, selector, &dtable); + get_segment_descriptor_dtable(vcpu, selector, &dtable); if (dtable.limit < index * 8 + 7) { kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); @@ -3419,7 +3419,7 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, struct descriptor_table dtable; u16 index = selector >> 3; - get_segment_descritptor_dtable(vcpu, selector, &dtable); + get_segment_descriptor_dtable(vcpu, selector, &dtable); if (dtable.limit < index * 8 + 7) return 1; -- cgit v1.2.3 From 25022acc3dd5f0b54071c7ba7c371860f2971b52 Mon Sep 17 00:00:00 2001 From: Amit Shah Date: Mon, 27 Oct 2008 09:04:17 +0000 Subject: KVM: SVM: Set the 'g' bit of the cs selector for cross-vendor migration The hardware does not set the 'g' bit of the cs selector and this breaks migration from amd hosts to intel hosts. Set this bit if the segment limit is beyond 1 MB. Signed-off-by: Amit Shah Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 05efc4ef75a6..665008d97856 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -772,6 +772,15 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; + + /* + * SVM always stores 0 for the 'G' bit in the CS selector in + * the VMCB on a VMEXIT. This hurts cross-vendor migration: + * Intel's VMENTRY has a check on the 'G' bit. + */ + if (seg == VCPU_SREG_CS) + var->g = s->limit > 0xfffff; + var->unusable = !var->present; } -- cgit v1.2.3 From c0d09828c870f90c6bc72070ada281568f89c63b Mon Sep 17 00:00:00 2001 From: Amit Shah Date: Mon, 27 Oct 2008 09:04:18 +0000 Subject: KVM: SVM: Set the 'busy' flag of the TR selector The busy flag of the TR selector is not set by the hardware. This breaks migration from amd hosts to intel hosts. Signed-off-by: Amit Shah Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 665008d97856..743aebd7bfcc 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -781,6 +781,13 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, if (seg == VCPU_SREG_CS) var->g = s->limit > 0xfffff; + /* + * Work around a bug where the busy flag in the tr selector + * isn't exposed + */ + if (seg == VCPU_SREG_TR) + var->type |= 0x2; + var->unusable = !var->present; } -- cgit v1.2.3 From e93f36bcfaa9e899c595e1c446c784a69021854a Mon Sep 17 00:00:00 2001 From: Guillaume Thouvenin Date: Tue, 28 Oct 2008 10:51:30 +0100 Subject: KVM: allow emulator to adjust rip for emulated pio instructions If we call the emulator we shouldn't call skip_emulated_instruction() in the first place, since the emulator already computes the next rip for us. Thus we move ->skip_emulated_instruction() out of kvm_emulate_pio() and into handle_io() (and the svm equivalent). We also replaced "return 0" by "break" in the "do_io:" case because now the shadow register state needs to be committed. Otherwise eip will never be updated. Signed-off-by: Guillaume Thouvenin Signed-off-by: Avi Kivity --- arch/x86/kvm/svm.c | 1 + arch/x86/kvm/vmx.c | 1 + arch/x86/kvm/x86.c | 2 -- arch/x86/kvm/x86_emulate.c | 2 +- 4 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 743aebd7bfcc..f0ad4d4217e4 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1115,6 +1115,7 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) rep = (io_info & SVM_IOIO_REP_MASK) != 0; down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0; + skip_emulated_instruction(&svm->vcpu); return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port); } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 7623eb7b68d5..816d23185fb8 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2687,6 +2687,7 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) rep = (exit_qualification & 32) != 0; port = exit_qualification >> 16; + skip_emulated_instruction(vcpu); return kvm_emulate_pio(vcpu, kvm_run, in, size, port); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ceeac8897143..38f79b6aaf1e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2478,8 +2478,6 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, val = kvm_register_read(vcpu, VCPU_REGS_RAX); memcpy(vcpu->arch.pio_data, &val, 4); - kvm_x86_ops->skip_emulated_instruction(vcpu); - pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in); if (pio_dev) { kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data); diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 57d7cc45be44..8f60ace13874 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -1772,7 +1772,7 @@ special_insn: c->eip = saved_eip; goto cannot_emulate; } - return 0; + break; case 0xf4: /* hlt */ ctxt->vcpu->arch.halt_request = 1; break; -- cgit v1.2.3 From 1d5a4d9b92028d9fe77da34037bd5a1ebfecc733 Mon Sep 17 00:00:00 2001 From: Guillaume Thouvenin Date: Wed, 29 Oct 2008 09:39:42 +0100 Subject: KVM: VMX: Handle mmio emulation when guest state is invalid If emulate_invalid_guest_state is enabled, the emulator is called when guest state is invalid. Until now, we reported an mmio failure when emulate_instruction() returned EMULATE_DO_MMIO. This patch adds the case where emulate_instruction() failed and an MMIO emulation is needed. Signed-off-by: Guillaume Thouvenin Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 816d23185fb8..427dbc14fae9 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3052,16 +3052,12 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, while (!guest_state_valid(vcpu)) { err = emulate_instruction(vcpu, kvm_run, 0, 0, 0); - switch (err) { - case EMULATE_DONE: - break; - case EMULATE_DO_MMIO: - kvm_report_emulation_failure(vcpu, "mmio"); - /* TODO: Handle MMIO */ - return; - default: - kvm_report_emulation_failure(vcpu, "emulation failure"); - return; + if (err == EMULATE_DO_MMIO) + break; + + if (err != EMULATE_DONE) { + kvm_report_emulation_failure(vcpu, "emulation failure"); + return; } if (signal_pending(current)) @@ -3073,8 +3069,10 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, local_irq_disable(); preempt_disable(); - /* Guest state should be valid now, no more emulation should be needed */ - vmx->emulation_required = 0; + /* Guest state should be valid now except if we need to + * emulate an MMIO */ + if (guest_state_valid(vcpu)) + vmx->emulation_required = 0; } /* @@ -3121,6 +3119,11 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu), (u32)((u64)kvm_rip_read(vcpu) >> 32), entryexit); + /* If we need to emulate an MMIO from handle_invalid_guest_state + * we just return 0 */ + if (vmx->emulation_required && emulate_invalid_guest_state) + return 0; + /* Access CR3 don't cause VMExit in paging mode, so we need * to sync with guest real CR3. */ if (vm_need_ept() && is_paging(vcpu)) { -- cgit v1.2.3 From 6eb55818c043b097c83828da8430fcb9a02fdb89 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Fri, 31 Oct 2008 12:37:41 +0800 Subject: KVM: Enable Function Level Reset for assigned device Ideally, every assigned device should in a clear condition before and after assignment, so that the former state of device won't affect later work. Some devices provide a mechanism named Function Level Reset, which is defined in PCI/PCI-e document. We should execute it before and after device assignment. (But sadly, the feature is new, and most device on the market now don't support it. We are considering using D0/D3hot transmit to emulate it later, but not that elegant and reliable as FLR itself.) [Update: Reminded by Xiantao, execute FLR after we ensure that the device can be assigned to the guest.] Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 2 +- virt/kvm/kvm_main.c | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 38f79b6aaf1e..9a4a39cfe6ef 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4148,8 +4148,8 @@ static void kvm_free_vcpus(struct kvm *kvm) void kvm_arch_destroy_vm(struct kvm *kvm) { - kvm_iommu_unmap_guest(kvm); kvm_free_all_assigned_devices(kvm); + kvm_iommu_unmap_guest(kvm); kvm_free_pit(kvm); kfree(kvm->arch.vpic); kfree(kvm->arch.vioapic); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4f43abe198e4..1838052f3c9e 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -152,6 +152,8 @@ static void kvm_free_assigned_device(struct kvm *kvm, */ kvm_put_kvm(kvm); + pci_reset_function(assigned_dev->dev); + pci_release_regions(assigned_dev->dev); pci_disable_device(assigned_dev->dev); pci_dev_put(assigned_dev->dev); @@ -283,6 +285,9 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, __func__); goto out_disable; } + + pci_reset_function(dev); + match->assigned_dev_id = assigned_dev->assigned_dev_id; match->host_busnr = assigned_dev->busnr; match->host_devfn = assigned_dev->devfn; -- cgit v1.2.3 From 2843099fee32a6020e1caa95c6026f28b5d43bff Mon Sep 17 00:00:00 2001 From: Izik Eidus Date: Fri, 3 Oct 2008 17:40:32 +0300 Subject: KVM: MMU: Fix aliased gfns treated as unaliased Some areas of kvm x86 mmu are using gfn offset inside a slot without unaliasing the gfn first. This patch makes sure that the gfn will be unaliased and add gfn_to_memslot_unaliased() to save the calculating of the gfn unaliasing in case we have it unaliased already. Signed-off-by: Izik Eidus Acked-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/mmu.c | 14 ++++++++++---- virt/kvm/kvm_main.c | 9 +++++---- 3 files changed, 17 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 09e6c56572cb..99e3cc149d21 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -617,6 +617,8 @@ void kvm_disable_tdp(void); int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); int complete_pio(struct kvm_vcpu *vcpu); +struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn); + static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) { struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 8687758b5295..8904e8ada978 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -386,7 +386,9 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn) { int *write_count; - write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn)); + gfn = unalias_gfn(kvm, gfn); + write_count = slot_largepage_idx(gfn, + gfn_to_memslot_unaliased(kvm, gfn)); *write_count += 1; } @@ -394,16 +396,20 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) { int *write_count; - write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn)); + gfn = unalias_gfn(kvm, gfn); + write_count = slot_largepage_idx(gfn, + gfn_to_memslot_unaliased(kvm, gfn)); *write_count -= 1; WARN_ON(*write_count < 0); } static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn) { - struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); + struct kvm_memory_slot *slot; int *largepage_idx; + gfn = unalias_gfn(kvm, gfn); + slot = gfn_to_memslot_unaliased(kvm, gfn); if (slot) { largepage_idx = slot_largepage_idx(gfn, slot); return *largepage_idx; @@ -2973,8 +2979,8 @@ static void audit_write_protection(struct kvm_vcpu *vcpu) if (sp->role.metaphysical) continue; - slot = gfn_to_memslot(vcpu->kvm, sp->gfn); gfn = unalias_gfn(vcpu->kvm, sp->gfn); + slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn); rmapp = &slot->rmap[gfn - slot->base_gfn]; if (*rmapp) printk(KERN_ERR "%s: (%s) shadow page has writable" diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1838052f3c9e..a65baa9039d5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -923,7 +923,7 @@ int kvm_is_error_hva(unsigned long addr) } EXPORT_SYMBOL_GPL(kvm_is_error_hva); -static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn) +struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) { int i; @@ -936,11 +936,12 @@ static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn) } return NULL; } +EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) { gfn = unalias_gfn(kvm, gfn); - return __gfn_to_memslot(kvm, gfn); + return gfn_to_memslot_unaliased(kvm, gfn); } int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) @@ -964,7 +965,7 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) struct kvm_memory_slot *slot; gfn = unalias_gfn(kvm, gfn); - slot = __gfn_to_memslot(kvm, gfn); + slot = gfn_to_memslot_unaliased(kvm, gfn); if (!slot) return bad_hva(); return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); @@ -1215,7 +1216,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) struct kvm_memory_slot *memslot; gfn = unalias_gfn(kvm, gfn); - memslot = __gfn_to_memslot(kvm, gfn); + memslot = gfn_to_memslot_unaliased(kvm, gfn); if (memslot && memslot->dirty_bitmap) { unsigned long rel_gfn = gfn - memslot->base_gfn; -- cgit v1.2.3 From 0853d2c1d849ef69884d2447d90d04007590b72b Mon Sep 17 00:00:00 2001 From: Nitin A Kamble Date: Wed, 5 Nov 2008 15:37:36 -0800 Subject: KVM: Fix cpuid leaf 0xb loop termination For cpuid leaf 0xb the bits 8-15 in ECX register define the end of counting leaf. The previous code was using bits 0-7 for this purpose, which is a bug. Signed-off-by: Nitin A Kamble Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9a4a39cfe6ef..2889a0f359ea 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1276,7 +1276,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; /* read more entries until level_type is zero */ for (i = 1; *nent < maxnent; ++i) { - level_type = entry[i - 1].ecx & 0xff; + level_type = entry[i - 1].ecx & 0xff00; if (!level_type) break; do_cpuid_1_ent(&entry[i], function, i); -- cgit v1.2.3 From 0fdf8e59faa5c60e9d77c8e14abe3a0f8bfcf586 Mon Sep 17 00:00:00 2001 From: Nitin A Kamble Date: Wed, 5 Nov 2008 15:56:21 -0800 Subject: KVM: Fix cpuid iteration on multiple leaves per eac The code to traverse the cpuid data array list for counting type of leaves is currently broken. This patches fixes the 2 things in it. 1. Set the 1st counting entry's flag KVM_CPUID_FLAG_STATE_READ_NEXT. Without it the code will never find a valid entry. 2. Also the stop condition in the for loop while looking for the next unflaged entry is broken. It needs to stop when it find one matching entry; and in the case of count of 1, it will be the same entry found in this iteration. Signed-Off-By: Nitin A Kamble Signed-off-by: Avi Kivity --- arch/x86/kvm/x86.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2889a0f359ea..7a2aeba0bfbd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1246,6 +1246,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, int t, times = entry->eax & 0xff; entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; + entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; for (t = 1; t < times && *nent < maxnent; ++t) { do_cpuid_1_ent(&entry[t], function, 0); entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; @@ -2801,7 +2802,7 @@ static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; /* when no next entry is found, the current entry[i] is reselected */ - for (j = i + 1; j == i; j = (j + 1) % nent) { + for (j = i + 1; ; j = (j + 1) % nent) { struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j]; if (ej->function == e->function) { ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; -- cgit v1.2.3 From 13673a90f1cf88296f726265cc7cf3ec76ecba30 Mon Sep 17 00:00:00 2001 From: Eduardo Habkost Date: Mon, 17 Nov 2008 19:03:13 -0200 Subject: KVM: VMX: move vmx.h to include/asm vmx.h will be used by core code that is independent of KVM, so I am moving it outside the arch/x86/kvm directory. Signed-off-by: Eduardo Habkost Signed-off-by: Avi Kivity --- arch/x86/include/asm/vmx.h | 367 +++++++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/mmu.c | 2 +- arch/x86/kvm/vmx.c | 2 +- arch/x86/kvm/vmx.h | 367 --------------------------------------------- 4 files changed, 369 insertions(+), 369 deletions(-) create mode 100644 arch/x86/include/asm/vmx.h delete mode 100644 arch/x86/kvm/vmx.h (limited to 'arch/x86') diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h new file mode 100644 index 000000000000..3db236c26fa4 --- /dev/null +++ b/arch/x86/include/asm/vmx.h @@ -0,0 +1,367 @@ +#ifndef VMX_H +#define VMX_H + +/* + * vmx.h: VMX Architecture related definitions + * Copyright (c) 2004, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + * A few random additions are: + * Copyright (C) 2006 Qumranet + * Avi Kivity + * Yaniv Kamay + * + */ + +/* + * Definitions of Primary Processor-Based VM-Execution Controls. + */ +#define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004 +#define CPU_BASED_USE_TSC_OFFSETING 0x00000008 +#define CPU_BASED_HLT_EXITING 0x00000080 +#define CPU_BASED_INVLPG_EXITING 0x00000200 +#define CPU_BASED_MWAIT_EXITING 0x00000400 +#define CPU_BASED_RDPMC_EXITING 0x00000800 +#define CPU_BASED_RDTSC_EXITING 0x00001000 +#define CPU_BASED_CR3_LOAD_EXITING 0x00008000 +#define CPU_BASED_CR3_STORE_EXITING 0x00010000 +#define CPU_BASED_CR8_LOAD_EXITING 0x00080000 +#define CPU_BASED_CR8_STORE_EXITING 0x00100000 +#define CPU_BASED_TPR_SHADOW 0x00200000 +#define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000 +#define CPU_BASED_MOV_DR_EXITING 0x00800000 +#define CPU_BASED_UNCOND_IO_EXITING 0x01000000 +#define CPU_BASED_USE_IO_BITMAPS 0x02000000 +#define CPU_BASED_USE_MSR_BITMAPS 0x10000000 +#define CPU_BASED_MONITOR_EXITING 0x20000000 +#define CPU_BASED_PAUSE_EXITING 0x40000000 +#define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 +/* + * Definitions of Secondary Processor-Based VM-Execution Controls. + */ +#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 +#define SECONDARY_EXEC_ENABLE_EPT 0x00000002 +#define SECONDARY_EXEC_ENABLE_VPID 0x00000020 +#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 + + +#define PIN_BASED_EXT_INTR_MASK 0x00000001 +#define PIN_BASED_NMI_EXITING 0x00000008 +#define PIN_BASED_VIRTUAL_NMIS 0x00000020 + +#define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 +#define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 +#define VM_EXIT_SAVE_IA32_PAT 0x00040000 +#define VM_EXIT_LOAD_IA32_PAT 0x00080000 + +#define VM_ENTRY_IA32E_MODE 0x00000200 +#define VM_ENTRY_SMM 0x00000400 +#define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 +#define VM_ENTRY_LOAD_IA32_PAT 0x00004000 + +/* VMCS Encodings */ +enum vmcs_field { + VIRTUAL_PROCESSOR_ID = 0x00000000, + GUEST_ES_SELECTOR = 0x00000800, + GUEST_CS_SELECTOR = 0x00000802, + GUEST_SS_SELECTOR = 0x00000804, + GUEST_DS_SELECTOR = 0x00000806, + GUEST_FS_SELECTOR = 0x00000808, + GUEST_GS_SELECTOR = 0x0000080a, + GUEST_LDTR_SELECTOR = 0x0000080c, + GUEST_TR_SELECTOR = 0x0000080e, + HOST_ES_SELECTOR = 0x00000c00, + HOST_CS_SELECTOR = 0x00000c02, + HOST_SS_SELECTOR = 0x00000c04, + HOST_DS_SELECTOR = 0x00000c06, + HOST_FS_SELECTOR = 0x00000c08, + HOST_GS_SELECTOR = 0x00000c0a, + HOST_TR_SELECTOR = 0x00000c0c, + IO_BITMAP_A = 0x00002000, + IO_BITMAP_A_HIGH = 0x00002001, + IO_BITMAP_B = 0x00002002, + IO_BITMAP_B_HIGH = 0x00002003, + MSR_BITMAP = 0x00002004, + MSR_BITMAP_HIGH = 0x00002005, + VM_EXIT_MSR_STORE_ADDR = 0x00002006, + VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007, + VM_EXIT_MSR_LOAD_ADDR = 0x00002008, + VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009, + VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a, + VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b, + TSC_OFFSET = 0x00002010, + TSC_OFFSET_HIGH = 0x00002011, + VIRTUAL_APIC_PAGE_ADDR = 0x00002012, + VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013, + APIC_ACCESS_ADDR = 0x00002014, + APIC_ACCESS_ADDR_HIGH = 0x00002015, + EPT_POINTER = 0x0000201a, + EPT_POINTER_HIGH = 0x0000201b, + GUEST_PHYSICAL_ADDRESS = 0x00002400, + GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, + VMCS_LINK_POINTER = 0x00002800, + VMCS_LINK_POINTER_HIGH = 0x00002801, + GUEST_IA32_DEBUGCTL = 0x00002802, + GUEST_IA32_DEBUGCTL_HIGH = 0x00002803, + GUEST_IA32_PAT = 0x00002804, + GUEST_IA32_PAT_HIGH = 0x00002805, + GUEST_PDPTR0 = 0x0000280a, + GUEST_PDPTR0_HIGH = 0x0000280b, + GUEST_PDPTR1 = 0x0000280c, + GUEST_PDPTR1_HIGH = 0x0000280d, + GUEST_PDPTR2 = 0x0000280e, + GUEST_PDPTR2_HIGH = 0x0000280f, + GUEST_PDPTR3 = 0x00002810, + GUEST_PDPTR3_HIGH = 0x00002811, + HOST_IA32_PAT = 0x00002c00, + HOST_IA32_PAT_HIGH = 0x00002c01, + PIN_BASED_VM_EXEC_CONTROL = 0x00004000, + CPU_BASED_VM_EXEC_CONTROL = 0x00004002, + EXCEPTION_BITMAP = 0x00004004, + PAGE_FAULT_ERROR_CODE_MASK = 0x00004006, + PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008, + CR3_TARGET_COUNT = 0x0000400a, + VM_EXIT_CONTROLS = 0x0000400c, + VM_EXIT_MSR_STORE_COUNT = 0x0000400e, + VM_EXIT_MSR_LOAD_COUNT = 0x00004010, + VM_ENTRY_CONTROLS = 0x00004012, + VM_ENTRY_MSR_LOAD_COUNT = 0x00004014, + VM_ENTRY_INTR_INFO_FIELD = 0x00004016, + VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018, + VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, + TPR_THRESHOLD = 0x0000401c, + SECONDARY_VM_EXEC_CONTROL = 0x0000401e, + VM_INSTRUCTION_ERROR = 0x00004400, + VM_EXIT_REASON = 0x00004402, + VM_EXIT_INTR_INFO = 0x00004404, + VM_EXIT_INTR_ERROR_CODE = 0x00004406, + IDT_VECTORING_INFO_FIELD = 0x00004408, + IDT_VECTORING_ERROR_CODE = 0x0000440a, + VM_EXIT_INSTRUCTION_LEN = 0x0000440c, + VMX_INSTRUCTION_INFO = 0x0000440e, + GUEST_ES_LIMIT = 0x00004800, + GUEST_CS_LIMIT = 0x00004802, + GUEST_SS_LIMIT = 0x00004804, + GUEST_DS_LIMIT = 0x00004806, + GUEST_FS_LIMIT = 0x00004808, + GUEST_GS_LIMIT = 0x0000480a, + GUEST_LDTR_LIMIT = 0x0000480c, + GUEST_TR_LIMIT = 0x0000480e, + GUEST_GDTR_LIMIT = 0x00004810, + GUEST_IDTR_LIMIT = 0x00004812, + GUEST_ES_AR_BYTES = 0x00004814, + GUEST_CS_AR_BYTES = 0x00004816, + GUEST_SS_AR_BYTES = 0x00004818, + GUEST_DS_AR_BYTES = 0x0000481a, + GUEST_FS_AR_BYTES = 0x0000481c, + GUEST_GS_AR_BYTES = 0x0000481e, + GUEST_LDTR_AR_BYTES = 0x00004820, + GUEST_TR_AR_BYTES = 0x00004822, + GUEST_INTERRUPTIBILITY_INFO = 0x00004824, + GUEST_ACTIVITY_STATE = 0X00004826, + GUEST_SYSENTER_CS = 0x0000482A, + HOST_IA32_SYSENTER_CS = 0x00004c00, + CR0_GUEST_HOST_MASK = 0x00006000, + CR4_GUEST_HOST_MASK = 0x00006002, + CR0_READ_SHADOW = 0x00006004, + CR4_READ_SHADOW = 0x00006006, + CR3_TARGET_VALUE0 = 0x00006008, + CR3_TARGET_VALUE1 = 0x0000600a, + CR3_TARGET_VALUE2 = 0x0000600c, + CR3_TARGET_VALUE3 = 0x0000600e, + EXIT_QUALIFICATION = 0x00006400, + GUEST_LINEAR_ADDRESS = 0x0000640a, + GUEST_CR0 = 0x00006800, + GUEST_CR3 = 0x00006802, + GUEST_CR4 = 0x00006804, + GUEST_ES_BASE = 0x00006806, + GUEST_CS_BASE = 0x00006808, + GUEST_SS_BASE = 0x0000680a, + GUEST_DS_BASE = 0x0000680c, + GUEST_FS_BASE = 0x0000680e, + GUEST_GS_BASE = 0x00006810, + GUEST_LDTR_BASE = 0x00006812, + GUEST_TR_BASE = 0x00006814, + GUEST_GDTR_BASE = 0x00006816, + GUEST_IDTR_BASE = 0x00006818, + GUEST_DR7 = 0x0000681a, + GUEST_RSP = 0x0000681c, + GUEST_RIP = 0x0000681e, + GUEST_RFLAGS = 0x00006820, + GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822, + GUEST_SYSENTER_ESP = 0x00006824, + GUEST_SYSENTER_EIP = 0x00006826, + HOST_CR0 = 0x00006c00, + HOST_CR3 = 0x00006c02, + HOST_CR4 = 0x00006c04, + HOST_FS_BASE = 0x00006c06, + HOST_GS_BASE = 0x00006c08, + HOST_TR_BASE = 0x00006c0a, + HOST_GDTR_BASE = 0x00006c0c, + HOST_IDTR_BASE = 0x00006c0e, + HOST_IA32_SYSENTER_ESP = 0x00006c10, + HOST_IA32_SYSENTER_EIP = 0x00006c12, + HOST_RSP = 0x00006c14, + HOST_RIP = 0x00006c16, +}; + +#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000 + +#define EXIT_REASON_EXCEPTION_NMI 0 +#define EXIT_REASON_EXTERNAL_INTERRUPT 1 +#define EXIT_REASON_TRIPLE_FAULT 2 + +#define EXIT_REASON_PENDING_INTERRUPT 7 +#define EXIT_REASON_NMI_WINDOW 8 +#define EXIT_REASON_TASK_SWITCH 9 +#define EXIT_REASON_CPUID 10 +#define EXIT_REASON_HLT 12 +#define EXIT_REASON_INVLPG 14 +#define EXIT_REASON_RDPMC 15 +#define EXIT_REASON_RDTSC 16 +#define EXIT_REASON_VMCALL 18 +#define EXIT_REASON_VMCLEAR 19 +#define EXIT_REASON_VMLAUNCH 20 +#define EXIT_REASON_VMPTRLD 21 +#define EXIT_REASON_VMPTRST 22 +#define EXIT_REASON_VMREAD 23 +#define EXIT_REASON_VMRESUME 24 +#define EXIT_REASON_VMWRITE 25 +#define EXIT_REASON_VMOFF 26 +#define EXIT_REASON_VMON 27 +#define EXIT_REASON_CR_ACCESS 28 +#define EXIT_REASON_DR_ACCESS 29 +#define EXIT_REASON_IO_INSTRUCTION 30 +#define EXIT_REASON_MSR_READ 31 +#define EXIT_REASON_MSR_WRITE 32 +#define EXIT_REASON_MWAIT_INSTRUCTION 36 +#define EXIT_REASON_TPR_BELOW_THRESHOLD 43 +#define EXIT_REASON_APIC_ACCESS 44 +#define EXIT_REASON_EPT_VIOLATION 48 +#define EXIT_REASON_EPT_MISCONFIG 49 +#define EXIT_REASON_WBINVD 54 + +/* + * Interruption-information format + */ +#define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */ +#define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */ +#define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */ +#define INTR_INFO_UNBLOCK_NMI 0x1000 /* 12 */ +#define INTR_INFO_VALID_MASK 0x80000000 /* 31 */ +#define INTR_INFO_RESVD_BITS_MASK 0x7ffff000 + +#define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK +#define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK +#define VECTORING_INFO_DELIVER_CODE_MASK INTR_INFO_DELIVER_CODE_MASK +#define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK + +#define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ +#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ +#define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */ +#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ + +/* GUEST_INTERRUPTIBILITY_INFO flags. */ +#define GUEST_INTR_STATE_STI 0x00000001 +#define GUEST_INTR_STATE_MOV_SS 0x00000002 +#define GUEST_INTR_STATE_SMI 0x00000004 +#define GUEST_INTR_STATE_NMI 0x00000008 + +/* + * Exit Qualifications for MOV for Control Register Access + */ +#define CONTROL_REG_ACCESS_NUM 0x7 /* 2:0, number of control reg.*/ +#define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */ +#define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose reg. */ +#define LMSW_SOURCE_DATA_SHIFT 16 +#define LMSW_SOURCE_DATA (0xFFFF << LMSW_SOURCE_DATA_SHIFT) /* 16:31 lmsw source */ +#define REG_EAX (0 << 8) +#define REG_ECX (1 << 8) +#define REG_EDX (2 << 8) +#define REG_EBX (3 << 8) +#define REG_ESP (4 << 8) +#define REG_EBP (5 << 8) +#define REG_ESI (6 << 8) +#define REG_EDI (7 << 8) +#define REG_R8 (8 << 8) +#define REG_R9 (9 << 8) +#define REG_R10 (10 << 8) +#define REG_R11 (11 << 8) +#define REG_R12 (12 << 8) +#define REG_R13 (13 << 8) +#define REG_R14 (14 << 8) +#define REG_R15 (15 << 8) + +/* + * Exit Qualifications for MOV for Debug Register Access + */ +#define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug reg. */ +#define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */ +#define TYPE_MOV_TO_DR (0 << 4) +#define TYPE_MOV_FROM_DR (1 << 4) +#define DEBUG_REG_ACCESS_REG 0xf00 /* 11:8, general purpose reg. */ + + +/* segment AR */ +#define SEGMENT_AR_L_MASK (1 << 13) + +#define AR_TYPE_ACCESSES_MASK 1 +#define AR_TYPE_READABLE_MASK (1 << 1) +#define AR_TYPE_WRITEABLE_MASK (1 << 2) +#define AR_TYPE_CODE_MASK (1 << 3) +#define AR_TYPE_MASK 0x0f +#define AR_TYPE_BUSY_64_TSS 11 +#define AR_TYPE_BUSY_32_TSS 11 +#define AR_TYPE_BUSY_16_TSS 3 +#define AR_TYPE_LDT 2 + +#define AR_UNUSABLE_MASK (1 << 16) +#define AR_S_MASK (1 << 4) +#define AR_P_MASK (1 << 7) +#define AR_L_MASK (1 << 13) +#define AR_DB_MASK (1 << 14) +#define AR_G_MASK (1 << 15) +#define AR_DPL_SHIFT 5 +#define AR_DPL(ar) (((ar) >> AR_DPL_SHIFT) & 3) + +#define AR_RESERVD_MASK 0xfffe0f00 + +#define TSS_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 0) +#define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 1) +#define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 2) + +#define VMX_NR_VPIDS (1 << 16) +#define VMX_VPID_EXTENT_SINGLE_CONTEXT 1 +#define VMX_VPID_EXTENT_ALL_CONTEXT 2 + +#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0 +#define VMX_EPT_EXTENT_CONTEXT 1 +#define VMX_EPT_EXTENT_GLOBAL 2 +#define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24) +#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) +#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) +#define VMX_EPT_DEFAULT_GAW 3 +#define VMX_EPT_MAX_GAW 0x4 +#define VMX_EPT_MT_EPTE_SHIFT 3 +#define VMX_EPT_GAW_EPTP_SHIFT 3 +#define VMX_EPT_DEFAULT_MT 0x6ull +#define VMX_EPT_READABLE_MASK 0x1ull +#define VMX_EPT_WRITABLE_MASK 0x2ull +#define VMX_EPT_EXECUTABLE_MASK 0x4ull +#define VMX_EPT_IGMT_BIT (1ull << 6) + +#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul + +#endif diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 8904e8ada978..fa3486d64078 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -17,7 +17,6 @@ * */ -#include "vmx.h" #include "mmu.h" #include @@ -33,6 +32,7 @@ #include #include #include +#include /* * When setting this variable to true it enables Two-Dimensional-Paging diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 427dbc14fae9..ec71f6464cfa 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -16,7 +16,6 @@ */ #include "irq.h" -#include "vmx.h" #include "mmu.h" #include @@ -31,6 +30,7 @@ #include #include +#include #define __ex(x) __kvm_handle_fault_on_reboot(x) diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h deleted file mode 100644 index 3db236c26fa4..000000000000 --- a/arch/x86/kvm/vmx.h +++ /dev/null @@ -1,367 +0,0 @@ -#ifndef VMX_H -#define VMX_H - -/* - * vmx.h: VMX Architecture related definitions - * Copyright (c) 2004, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - * A few random additions are: - * Copyright (C) 2006 Qumranet - * Avi Kivity - * Yaniv Kamay - * - */ - -/* - * Definitions of Primary Processor-Based VM-Execution Controls. - */ -#define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004 -#define CPU_BASED_USE_TSC_OFFSETING 0x00000008 -#define CPU_BASED_HLT_EXITING 0x00000080 -#define CPU_BASED_INVLPG_EXITING 0x00000200 -#define CPU_BASED_MWAIT_EXITING 0x00000400 -#define CPU_BASED_RDPMC_EXITING 0x00000800 -#define CPU_BASED_RDTSC_EXITING 0x00001000 -#define CPU_BASED_CR3_LOAD_EXITING 0x00008000 -#define CPU_BASED_CR3_STORE_EXITING 0x00010000 -#define CPU_BASED_CR8_LOAD_EXITING 0x00080000 -#define CPU_BASED_CR8_STORE_EXITING 0x00100000 -#define CPU_BASED_TPR_SHADOW 0x00200000 -#define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000 -#define CPU_BASED_MOV_DR_EXITING 0x00800000 -#define CPU_BASED_UNCOND_IO_EXITING 0x01000000 -#define CPU_BASED_USE_IO_BITMAPS 0x02000000 -#define CPU_BASED_USE_MSR_BITMAPS 0x10000000 -#define CPU_BASED_MONITOR_EXITING 0x20000000 -#define CPU_BASED_PAUSE_EXITING 0x40000000 -#define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 -/* - * Definitions of Secondary Processor-Based VM-Execution Controls. - */ -#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 -#define SECONDARY_EXEC_ENABLE_EPT 0x00000002 -#define SECONDARY_EXEC_ENABLE_VPID 0x00000020 -#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 - - -#define PIN_BASED_EXT_INTR_MASK 0x00000001 -#define PIN_BASED_NMI_EXITING 0x00000008 -#define PIN_BASED_VIRTUAL_NMIS 0x00000020 - -#define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 -#define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 -#define VM_EXIT_SAVE_IA32_PAT 0x00040000 -#define VM_EXIT_LOAD_IA32_PAT 0x00080000 - -#define VM_ENTRY_IA32E_MODE 0x00000200 -#define VM_ENTRY_SMM 0x00000400 -#define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 -#define VM_ENTRY_LOAD_IA32_PAT 0x00004000 - -/* VMCS Encodings */ -enum vmcs_field { - VIRTUAL_PROCESSOR_ID = 0x00000000, - GUEST_ES_SELECTOR = 0x00000800, - GUEST_CS_SELECTOR = 0x00000802, - GUEST_SS_SELECTOR = 0x00000804, - GUEST_DS_SELECTOR = 0x00000806, - GUEST_FS_SELECTOR = 0x00000808, - GUEST_GS_SELECTOR = 0x0000080a, - GUEST_LDTR_SELECTOR = 0x0000080c, - GUEST_TR_SELECTOR = 0x0000080e, - HOST_ES_SELECTOR = 0x00000c00, - HOST_CS_SELECTOR = 0x00000c02, - HOST_SS_SELECTOR = 0x00000c04, - HOST_DS_SELECTOR = 0x00000c06, - HOST_FS_SELECTOR = 0x00000c08, - HOST_GS_SELECTOR = 0x00000c0a, - HOST_TR_SELECTOR = 0x00000c0c, - IO_BITMAP_A = 0x00002000, - IO_BITMAP_A_HIGH = 0x00002001, - IO_BITMAP_B = 0x00002002, - IO_BITMAP_B_HIGH = 0x00002003, - MSR_BITMAP = 0x00002004, - MSR_BITMAP_HIGH = 0x00002005, - VM_EXIT_MSR_STORE_ADDR = 0x00002006, - VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007, - VM_EXIT_MSR_LOAD_ADDR = 0x00002008, - VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009, - VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a, - VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b, - TSC_OFFSET = 0x00002010, - TSC_OFFSET_HIGH = 0x00002011, - VIRTUAL_APIC_PAGE_ADDR = 0x00002012, - VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013, - APIC_ACCESS_ADDR = 0x00002014, - APIC_ACCESS_ADDR_HIGH = 0x00002015, - EPT_POINTER = 0x0000201a, - EPT_POINTER_HIGH = 0x0000201b, - GUEST_PHYSICAL_ADDRESS = 0x00002400, - GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, - VMCS_LINK_POINTER = 0x00002800, - VMCS_LINK_POINTER_HIGH = 0x00002801, - GUEST_IA32_DEBUGCTL = 0x00002802, - GUEST_IA32_DEBUGCTL_HIGH = 0x00002803, - GUEST_IA32_PAT = 0x00002804, - GUEST_IA32_PAT_HIGH = 0x00002805, - GUEST_PDPTR0 = 0x0000280a, - GUEST_PDPTR0_HIGH = 0x0000280b, - GUEST_PDPTR1 = 0x0000280c, - GUEST_PDPTR1_HIGH = 0x0000280d, - GUEST_PDPTR2 = 0x0000280e, - GUEST_PDPTR2_HIGH = 0x0000280f, - GUEST_PDPTR3 = 0x00002810, - GUEST_PDPTR3_HIGH = 0x00002811, - HOST_IA32_PAT = 0x00002c00, - HOST_IA32_PAT_HIGH = 0x00002c01, - PIN_BASED_VM_EXEC_CONTROL = 0x00004000, - CPU_BASED_VM_EXEC_CONTROL = 0x00004002, - EXCEPTION_BITMAP = 0x00004004, - PAGE_FAULT_ERROR_CODE_MASK = 0x00004006, - PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008, - CR3_TARGET_COUNT = 0x0000400a, - VM_EXIT_CONTROLS = 0x0000400c, - VM_EXIT_MSR_STORE_COUNT = 0x0000400e, - VM_EXIT_MSR_LOAD_COUNT = 0x00004010, - VM_ENTRY_CONTROLS = 0x00004012, - VM_ENTRY_MSR_LOAD_COUNT = 0x00004014, - VM_ENTRY_INTR_INFO_FIELD = 0x00004016, - VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018, - VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, - TPR_THRESHOLD = 0x0000401c, - SECONDARY_VM_EXEC_CONTROL = 0x0000401e, - VM_INSTRUCTION_ERROR = 0x00004400, - VM_EXIT_REASON = 0x00004402, - VM_EXIT_INTR_INFO = 0x00004404, - VM_EXIT_INTR_ERROR_CODE = 0x00004406, - IDT_VECTORING_INFO_FIELD = 0x00004408, - IDT_VECTORING_ERROR_CODE = 0x0000440a, - VM_EXIT_INSTRUCTION_LEN = 0x0000440c, - VMX_INSTRUCTION_INFO = 0x0000440e, - GUEST_ES_LIMIT = 0x00004800, - GUEST_CS_LIMIT = 0x00004802, - GUEST_SS_LIMIT = 0x00004804, - GUEST_DS_LIMIT = 0x00004806, - GUEST_FS_LIMIT = 0x00004808, - GUEST_GS_LIMIT = 0x0000480a, - GUEST_LDTR_LIMIT = 0x0000480c, - GUEST_TR_LIMIT = 0x0000480e, - GUEST_GDTR_LIMIT = 0x00004810, - GUEST_IDTR_LIMIT = 0x00004812, - GUEST_ES_AR_BYTES = 0x00004814, - GUEST_CS_AR_BYTES = 0x00004816, - GUEST_SS_AR_BYTES = 0x00004818, - GUEST_DS_AR_BYTES = 0x0000481a, - GUEST_FS_AR_BYTES = 0x0000481c, - GUEST_GS_AR_BYTES = 0x0000481e, - GUEST_LDTR_AR_BYTES = 0x00004820, - GUEST_TR_AR_BYTES = 0x00004822, - GUEST_INTERRUPTIBILITY_INFO = 0x00004824, - GUEST_ACTIVITY_STATE = 0X00004826, - GUEST_SYSENTER_CS = 0x0000482A, - HOST_IA32_SYSENTER_CS = 0x00004c00, - CR0_GUEST_HOST_MASK = 0x00006000, - CR4_GUEST_HOST_MASK = 0x00006002, - CR0_READ_SHADOW = 0x00006004, - CR4_READ_SHADOW = 0x00006006, - CR3_TARGET_VALUE0 = 0x00006008, - CR3_TARGET_VALUE1 = 0x0000600a, - CR3_TARGET_VALUE2 = 0x0000600c, - CR3_TARGET_VALUE3 = 0x0000600e, - EXIT_QUALIFICATION = 0x00006400, - GUEST_LINEAR_ADDRESS = 0x0000640a, - GUEST_CR0 = 0x00006800, - GUEST_CR3 = 0x00006802, - GUEST_CR4 = 0x00006804, - GUEST_ES_BASE = 0x00006806, - GUEST_CS_BASE = 0x00006808, - GUEST_SS_BASE = 0x0000680a, - GUEST_DS_BASE = 0x0000680c, - GUEST_FS_BASE = 0x0000680e, - GUEST_GS_BASE = 0x00006810, - GUEST_LDTR_BASE = 0x00006812, - GUEST_TR_BASE = 0x00006814, - GUEST_GDTR_BASE = 0x00006816, - GUEST_IDTR_BASE = 0x00006818, - GUEST_DR7 = 0x0000681a, - GUEST_RSP = 0x0000681c, - GUEST_RIP = 0x0000681e, - GUEST_RFLAGS = 0x00006820, - GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822, - GUEST_SYSENTER_ESP = 0x00006824, - GUEST_SYSENTER_EIP = 0x00006826, - HOST_CR0 = 0x00006c00, - HOST_CR3 = 0x00006c02, - HOST_CR4 = 0x00006c04, - HOST_FS_BASE = 0x00006c06, - HOST_GS_BASE = 0x00006c08, - HOST_TR_BASE = 0x00006c0a, - HOST_GDTR_BASE = 0x00006c0c, - HOST_IDTR_BASE = 0x00006c0e, - HOST_IA32_SYSENTER_ESP = 0x00006c10, - HOST_IA32_SYSENTER_EIP = 0x00006c12, - HOST_RSP = 0x00006c14, - HOST_RIP = 0x00006c16, -}; - -#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000 - -#define EXIT_REASON_EXCEPTION_NMI 0 -#define EXIT_REASON_EXTERNAL_INTERRUPT 1 -#define EXIT_REASON_TRIPLE_FAULT 2 - -#define EXIT_REASON_PENDING_INTERRUPT 7 -#define EXIT_REASON_NMI_WINDOW 8 -#define EXIT_REASON_TASK_SWITCH 9 -#define EXIT_REASON_CPUID 10 -#define EXIT_REASON_HLT 12 -#define EXIT_REASON_INVLPG 14 -#define EXIT_REASON_RDPMC 15 -#define EXIT_REASON_RDTSC 16 -#define EXIT_REASON_VMCALL 18 -#define EXIT_REASON_VMCLEAR 19 -#define EXIT_REASON_VMLAUNCH 20 -#define EXIT_REASON_VMPTRLD 21 -#define EXIT_REASON_VMPTRST 22 -#define EXIT_REASON_VMREAD 23 -#define EXIT_REASON_VMRESUME 24 -#define EXIT_REASON_VMWRITE 25 -#define EXIT_REASON_VMOFF 26 -#define EXIT_REASON_VMON 27 -#define EXIT_REASON_CR_ACCESS 28 -#define EXIT_REASON_DR_ACCESS 29 -#define EXIT_REASON_IO_INSTRUCTION 30 -#define EXIT_REASON_MSR_READ 31 -#define EXIT_REASON_MSR_WRITE 32 -#define EXIT_REASON_MWAIT_INSTRUCTION 36 -#define EXIT_REASON_TPR_BELOW_THRESHOLD 43 -#define EXIT_REASON_APIC_ACCESS 44 -#define EXIT_REASON_EPT_VIOLATION 48 -#define EXIT_REASON_EPT_MISCONFIG 49 -#define EXIT_REASON_WBINVD 54 - -/* - * Interruption-information format - */ -#define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */ -#define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */ -#define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */ -#define INTR_INFO_UNBLOCK_NMI 0x1000 /* 12 */ -#define INTR_INFO_VALID_MASK 0x80000000 /* 31 */ -#define INTR_INFO_RESVD_BITS_MASK 0x7ffff000 - -#define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK -#define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK -#define VECTORING_INFO_DELIVER_CODE_MASK INTR_INFO_DELIVER_CODE_MASK -#define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK - -#define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ -#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ -#define INTR_TYPE_EXCEPTION (3 << 8) /* processor exception */ -#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ - -/* GUEST_INTERRUPTIBILITY_INFO flags. */ -#define GUEST_INTR_STATE_STI 0x00000001 -#define GUEST_INTR_STATE_MOV_SS 0x00000002 -#define GUEST_INTR_STATE_SMI 0x00000004 -#define GUEST_INTR_STATE_NMI 0x00000008 - -/* - * Exit Qualifications for MOV for Control Register Access - */ -#define CONTROL_REG_ACCESS_NUM 0x7 /* 2:0, number of control reg.*/ -#define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */ -#define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose reg. */ -#define LMSW_SOURCE_DATA_SHIFT 16 -#define LMSW_SOURCE_DATA (0xFFFF << LMSW_SOURCE_DATA_SHIFT) /* 16:31 lmsw source */ -#define REG_EAX (0 << 8) -#define REG_ECX (1 << 8) -#define REG_EDX (2 << 8) -#define REG_EBX (3 << 8) -#define REG_ESP (4 << 8) -#define REG_EBP (5 << 8) -#define REG_ESI (6 << 8) -#define REG_EDI (7 << 8) -#define REG_R8 (8 << 8) -#define REG_R9 (9 << 8) -#define REG_R10 (10 << 8) -#define REG_R11 (11 << 8) -#define REG_R12 (12 << 8) -#define REG_R13 (13 << 8) -#define REG_R14 (14 << 8) -#define REG_R15 (15 << 8) - -/* - * Exit Qualifications for MOV for Debug Register Access - */ -#define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug reg. */ -#define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */ -#define TYPE_MOV_TO_DR (0 << 4) -#define TYPE_MOV_FROM_DR (1 << 4) -#define DEBUG_REG_ACCESS_REG 0xf00 /* 11:8, general purpose reg. */ - - -/* segment AR */ -#define SEGMENT_AR_L_MASK (1 << 13) - -#define AR_TYPE_ACCESSES_MASK 1 -#define AR_TYPE_READABLE_MASK (1 << 1) -#define AR_TYPE_WRITEABLE_MASK (1 << 2) -#define AR_TYPE_CODE_MASK (1 << 3) -#define AR_TYPE_MASK 0x0f -#define AR_TYPE_BUSY_64_TSS 11 -#define AR_TYPE_BUSY_32_TSS 11 -#define AR_TYPE_BUSY_16_TSS 3 -#define AR_TYPE_LDT 2 - -#define AR_UNUSABLE_MASK (1 << 16) -#define AR_S_MASK (1 << 4) -#define AR_P_MASK (1 << 7) -#define AR_L_MASK (1 << 13) -#define AR_DB_MASK (1 << 14) -#define AR_G_MASK (1 << 15) -#define AR_DPL_SHIFT 5 -#define AR_DPL(ar) (((ar) >> AR_DPL_SHIFT) & 3) - -#define AR_RESERVD_MASK 0xfffe0f00 - -#define TSS_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 0) -#define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 1) -#define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 2) - -#define VMX_NR_VPIDS (1 << 16) -#define VMX_VPID_EXTENT_SINGLE_CONTEXT 1 -#define VMX_VPID_EXTENT_ALL_CONTEXT 2 - -#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0 -#define VMX_EPT_EXTENT_CONTEXT 1 -#define VMX_EPT_EXTENT_GLOBAL 2 -#define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24) -#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) -#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) -#define VMX_EPT_DEFAULT_GAW 3 -#define VMX_EPT_MAX_GAW 0x4 -#define VMX_EPT_MT_EPTE_SHIFT 3 -#define VMX_EPT_GAW_EPTP_SHIFT 3 -#define VMX_EPT_DEFAULT_MT 0x6ull -#define VMX_EPT_READABLE_MASK 0x1ull -#define VMX_EPT_WRITABLE_MASK 0x2ull -#define VMX_EPT_EXECUTABLE_MASK 0x4ull -#define VMX_EPT_IGMT_BIT (1ull << 6) - -#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul - -#endif -- cgit v1.2.3 From c2cedf7be2017e3264c93a4c0d75b1d96d0d7104 Mon Sep 17 00:00:00 2001 From: Eduardo Habkost Date: Mon, 17 Nov 2008 19:03:14 -0200 Subject: KVM: SVM: move svm.h to include/asm svm.h will be used by core code that is independent of KVM, so I am moving it outside the arch/x86/kvm directory. Signed-off-by: Eduardo Habkost Signed-off-by: Avi Kivity --- arch/x86/include/asm/svm.h | 328 +++++++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/kvm_svm.h | 2 +- arch/x86/kvm/svm.h | 328 --------------------------------------------- 3 files changed, 329 insertions(+), 329 deletions(-) create mode 100644 arch/x86/include/asm/svm.h delete mode 100644 arch/x86/kvm/svm.h (limited to 'arch/x86') diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h new file mode 100644 index 000000000000..1b8afa78e869 --- /dev/null +++ b/arch/x86/include/asm/svm.h @@ -0,0 +1,328 @@ +#ifndef __SVM_H +#define __SVM_H + +enum { + INTERCEPT_INTR, + INTERCEPT_NMI, + INTERCEPT_SMI, + INTERCEPT_INIT, + INTERCEPT_VINTR, + INTERCEPT_SELECTIVE_CR0, + INTERCEPT_STORE_IDTR, + INTERCEPT_STORE_GDTR, + INTERCEPT_STORE_LDTR, + INTERCEPT_STORE_TR, + INTERCEPT_LOAD_IDTR, + INTERCEPT_LOAD_GDTR, + INTERCEPT_LOAD_LDTR, + INTERCEPT_LOAD_TR, + INTERCEPT_RDTSC, + INTERCEPT_RDPMC, + INTERCEPT_PUSHF, + INTERCEPT_POPF, + INTERCEPT_CPUID, + INTERCEPT_RSM, + INTERCEPT_IRET, + INTERCEPT_INTn, + INTERCEPT_INVD, + INTERCEPT_PAUSE, + INTERCEPT_HLT, + INTERCEPT_INVLPG, + INTERCEPT_INVLPGA, + INTERCEPT_IOIO_PROT, + INTERCEPT_MSR_PROT, + INTERCEPT_TASK_SWITCH, + INTERCEPT_FERR_FREEZE, + INTERCEPT_SHUTDOWN, + INTERCEPT_VMRUN, + INTERCEPT_VMMCALL, + INTERCEPT_VMLOAD, + INTERCEPT_VMSAVE, + INTERCEPT_STGI, + INTERCEPT_CLGI, + INTERCEPT_SKINIT, + INTERCEPT_RDTSCP, + INTERCEPT_ICEBP, + INTERCEPT_WBINVD, + INTERCEPT_MONITOR, + INTERCEPT_MWAIT, + INTERCEPT_MWAIT_COND, +}; + + +struct __attribute__ ((__packed__)) vmcb_control_area { + u16 intercept_cr_read; + u16 intercept_cr_write; + u16 intercept_dr_read; + u16 intercept_dr_write; + u32 intercept_exceptions; + u64 intercept; + u8 reserved_1[44]; + u64 iopm_base_pa; + u64 msrpm_base_pa; + u64 tsc_offset; + u32 asid; + u8 tlb_ctl; + u8 reserved_2[3]; + u32 int_ctl; + u32 int_vector; + u32 int_state; + u8 reserved_3[4]; + u32 exit_code; + u32 exit_code_hi; + u64 exit_info_1; + u64 exit_info_2; + u32 exit_int_info; + u32 exit_int_info_err; + u64 nested_ctl; + u8 reserved_4[16]; + u32 event_inj; + u32 event_inj_err; + u64 nested_cr3; + u64 lbr_ctl; + u8 reserved_5[832]; +}; + + +#define TLB_CONTROL_DO_NOTHING 0 +#define TLB_CONTROL_FLUSH_ALL_ASID 1 + +#define V_TPR_MASK 0x0f + +#define V_IRQ_SHIFT 8 +#define V_IRQ_MASK (1 << V_IRQ_SHIFT) + +#define V_INTR_PRIO_SHIFT 16 +#define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT) + +#define V_IGN_TPR_SHIFT 20 +#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT) + +#define V_INTR_MASKING_SHIFT 24 +#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT) + +#define SVM_INTERRUPT_SHADOW_MASK 1 + +#define SVM_IOIO_STR_SHIFT 2 +#define SVM_IOIO_REP_SHIFT 3 +#define SVM_IOIO_SIZE_SHIFT 4 +#define SVM_IOIO_ASIZE_SHIFT 7 + +#define SVM_IOIO_TYPE_MASK 1 +#define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT) +#define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT) +#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT) +#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT) + +struct __attribute__ ((__packed__)) vmcb_seg { + u16 selector; + u16 attrib; + u32 limit; + u64 base; +}; + +struct __attribute__ ((__packed__)) vmcb_save_area { + struct vmcb_seg es; + struct vmcb_seg cs; + struct vmcb_seg ss; + struct vmcb_seg ds; + struct vmcb_seg fs; + struct vmcb_seg gs; + struct vmcb_seg gdtr; + struct vmcb_seg ldtr; + struct vmcb_seg idtr; + struct vmcb_seg tr; + u8 reserved_1[43]; + u8 cpl; + u8 reserved_2[4]; + u64 efer; + u8 reserved_3[112]; + u64 cr4; + u64 cr3; + u64 cr0; + u64 dr7; + u64 dr6; + u64 rflags; + u64 rip; + u8 reserved_4[88]; + u64 rsp; + u8 reserved_5[24]; + u64 rax; + u64 star; + u64 lstar; + u64 cstar; + u64 sfmask; + u64 kernel_gs_base; + u64 sysenter_cs; + u64 sysenter_esp; + u64 sysenter_eip; + u64 cr2; + u8 reserved_6[32]; + u64 g_pat; + u64 dbgctl; + u64 br_from; + u64 br_to; + u64 last_excp_from; + u64 last_excp_to; +}; + +struct __attribute__ ((__packed__)) vmcb { + struct vmcb_control_area control; + struct vmcb_save_area save; +}; + +#define SVM_CPUID_FEATURE_SHIFT 2 +#define SVM_CPUID_FUNC 0x8000000a + +#define MSR_EFER_SVME_MASK (1ULL << 12) +#define MSR_VM_CR 0xc0010114 +#define MSR_VM_HSAVE_PA 0xc0010117ULL + +#define SVM_VM_CR_SVM_DISABLE 4 + +#define SVM_SELECTOR_S_SHIFT 4 +#define SVM_SELECTOR_DPL_SHIFT 5 +#define SVM_SELECTOR_P_SHIFT 7 +#define SVM_SELECTOR_AVL_SHIFT 8 +#define SVM_SELECTOR_L_SHIFT 9 +#define SVM_SELECTOR_DB_SHIFT 10 +#define SVM_SELECTOR_G_SHIFT 11 + +#define SVM_SELECTOR_TYPE_MASK (0xf) +#define SVM_SELECTOR_S_MASK (1 << SVM_SELECTOR_S_SHIFT) +#define SVM_SELECTOR_DPL_MASK (3 << SVM_SELECTOR_DPL_SHIFT) +#define SVM_SELECTOR_P_MASK (1 << SVM_SELECTOR_P_SHIFT) +#define SVM_SELECTOR_AVL_MASK (1 << SVM_SELECTOR_AVL_SHIFT) +#define SVM_SELECTOR_L_MASK (1 << SVM_SELECTOR_L_SHIFT) +#define SVM_SELECTOR_DB_MASK (1 << SVM_SELECTOR_DB_SHIFT) +#define SVM_SELECTOR_G_MASK (1 << SVM_SELECTOR_G_SHIFT) + +#define SVM_SELECTOR_WRITE_MASK (1 << 1) +#define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK +#define SVM_SELECTOR_CODE_MASK (1 << 3) + +#define INTERCEPT_CR0_MASK 1 +#define INTERCEPT_CR3_MASK (1 << 3) +#define INTERCEPT_CR4_MASK (1 << 4) +#define INTERCEPT_CR8_MASK (1 << 8) + +#define INTERCEPT_DR0_MASK 1 +#define INTERCEPT_DR1_MASK (1 << 1) +#define INTERCEPT_DR2_MASK (1 << 2) +#define INTERCEPT_DR3_MASK (1 << 3) +#define INTERCEPT_DR4_MASK (1 << 4) +#define INTERCEPT_DR5_MASK (1 << 5) +#define INTERCEPT_DR6_MASK (1 << 6) +#define INTERCEPT_DR7_MASK (1 << 7) + +#define SVM_EVTINJ_VEC_MASK 0xff + +#define SVM_EVTINJ_TYPE_SHIFT 8 +#define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT) + +#define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT) +#define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT) +#define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT) +#define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT) + +#define SVM_EVTINJ_VALID (1 << 31) +#define SVM_EVTINJ_VALID_ERR (1 << 11) + +#define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK + +#define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR +#define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI +#define SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT +#define SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT + +#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID +#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR + +#define SVM_EXITINFOSHIFT_TS_REASON_IRET 36 +#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38 + +#define SVM_EXIT_READ_CR0 0x000 +#define SVM_EXIT_READ_CR3 0x003 +#define SVM_EXIT_READ_CR4 0x004 +#define SVM_EXIT_READ_CR8 0x008 +#define SVM_EXIT_WRITE_CR0 0x010 +#define SVM_EXIT_WRITE_CR3 0x013 +#define SVM_EXIT_WRITE_CR4 0x014 +#define SVM_EXIT_WRITE_CR8 0x018 +#define SVM_EXIT_READ_DR0 0x020 +#define SVM_EXIT_READ_DR1 0x021 +#define SVM_EXIT_READ_DR2 0x022 +#define SVM_EXIT_READ_DR3 0x023 +#define SVM_EXIT_READ_DR4 0x024 +#define SVM_EXIT_READ_DR5 0x025 +#define SVM_EXIT_READ_DR6 0x026 +#define SVM_EXIT_READ_DR7 0x027 +#define SVM_EXIT_WRITE_DR0 0x030 +#define SVM_EXIT_WRITE_DR1 0x031 +#define SVM_EXIT_WRITE_DR2 0x032 +#define SVM_EXIT_WRITE_DR3 0x033 +#define SVM_EXIT_WRITE_DR4 0x034 +#define SVM_EXIT_WRITE_DR5 0x035 +#define SVM_EXIT_WRITE_DR6 0x036 +#define SVM_EXIT_WRITE_DR7 0x037 +#define SVM_EXIT_EXCP_BASE 0x040 +#define SVM_EXIT_INTR 0x060 +#define SVM_EXIT_NMI 0x061 +#define SVM_EXIT_SMI 0x062 +#define SVM_EXIT_INIT 0x063 +#define SVM_EXIT_VINTR 0x064 +#define SVM_EXIT_CR0_SEL_WRITE 0x065 +#define SVM_EXIT_IDTR_READ 0x066 +#define SVM_EXIT_GDTR_READ 0x067 +#define SVM_EXIT_LDTR_READ 0x068 +#define SVM_EXIT_TR_READ 0x069 +#define SVM_EXIT_IDTR_WRITE 0x06a +#define SVM_EXIT_GDTR_WRITE 0x06b +#define SVM_EXIT_LDTR_WRITE 0x06c +#define SVM_EXIT_TR_WRITE 0x06d +#define SVM_EXIT_RDTSC 0x06e +#define SVM_EXIT_RDPMC 0x06f +#define SVM_EXIT_PUSHF 0x070 +#define SVM_EXIT_POPF 0x071 +#define SVM_EXIT_CPUID 0x072 +#define SVM_EXIT_RSM 0x073 +#define SVM_EXIT_IRET 0x074 +#define SVM_EXIT_SWINT 0x075 +#define SVM_EXIT_INVD 0x076 +#define SVM_EXIT_PAUSE 0x077 +#define SVM_EXIT_HLT 0x078 +#define SVM_EXIT_INVLPG 0x079 +#define SVM_EXIT_INVLPGA 0x07a +#define SVM_EXIT_IOIO 0x07b +#define SVM_EXIT_MSR 0x07c +#define SVM_EXIT_TASK_SWITCH 0x07d +#define SVM_EXIT_FERR_FREEZE 0x07e +#define SVM_EXIT_SHUTDOWN 0x07f +#define SVM_EXIT_VMRUN 0x080 +#define SVM_EXIT_VMMCALL 0x081 +#define SVM_EXIT_VMLOAD 0x082 +#define SVM_EXIT_VMSAVE 0x083 +#define SVM_EXIT_STGI 0x084 +#define SVM_EXIT_CLGI 0x085 +#define SVM_EXIT_SKINIT 0x086 +#define SVM_EXIT_RDTSCP 0x087 +#define SVM_EXIT_ICEBP 0x088 +#define SVM_EXIT_WBINVD 0x089 +#define SVM_EXIT_MONITOR 0x08a +#define SVM_EXIT_MWAIT 0x08b +#define SVM_EXIT_MWAIT_COND 0x08c +#define SVM_EXIT_NPF 0x400 + +#define SVM_EXIT_ERR -1 + +#define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */ + +#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" +#define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8" +#define SVM_VMSAVE ".byte 0x0f, 0x01, 0xdb" +#define SVM_CLGI ".byte 0x0f, 0x01, 0xdd" +#define SVM_STGI ".byte 0x0f, 0x01, 0xdc" +#define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf" + +#endif + diff --git a/arch/x86/kvm/kvm_svm.h b/arch/x86/kvm/kvm_svm.h index 65ef0fc2c036..8e5ee99551f6 100644 --- a/arch/x86/kvm/kvm_svm.h +++ b/arch/x86/kvm/kvm_svm.h @@ -7,7 +7,7 @@ #include #include -#include "svm.h" +#include static const u32 host_save_user_msrs[] = { #ifdef CONFIG_X86_64 diff --git a/arch/x86/kvm/svm.h b/arch/x86/kvm/svm.h deleted file mode 100644 index 1b8afa78e869..000000000000 --- a/arch/x86/kvm/svm.h +++ /dev/null @@ -1,328 +0,0 @@ -#ifndef __SVM_H -#define __SVM_H - -enum { - INTERCEPT_INTR, - INTERCEPT_NMI, - INTERCEPT_SMI, - INTERCEPT_INIT, - INTERCEPT_VINTR, - INTERCEPT_SELECTIVE_CR0, - INTERCEPT_STORE_IDTR, - INTERCEPT_STORE_GDTR, - INTERCEPT_STORE_LDTR, - INTERCEPT_STORE_TR, - INTERCEPT_LOAD_IDTR, - INTERCEPT_LOAD_GDTR, - INTERCEPT_LOAD_LDTR, - INTERCEPT_LOAD_TR, - INTERCEPT_RDTSC, - INTERCEPT_RDPMC, - INTERCEPT_PUSHF, - INTERCEPT_POPF, - INTERCEPT_CPUID, - INTERCEPT_RSM, - INTERCEPT_IRET, - INTERCEPT_INTn, - INTERCEPT_INVD, - INTERCEPT_PAUSE, - INTERCEPT_HLT, - INTERCEPT_INVLPG, - INTERCEPT_INVLPGA, - INTERCEPT_IOIO_PROT, - INTERCEPT_MSR_PROT, - INTERCEPT_TASK_SWITCH, - INTERCEPT_FERR_FREEZE, - INTERCEPT_SHUTDOWN, - INTERCEPT_VMRUN, - INTERCEPT_VMMCALL, - INTERCEPT_VMLOAD, - INTERCEPT_VMSAVE, - INTERCEPT_STGI, - INTERCEPT_CLGI, - INTERCEPT_SKINIT, - INTERCEPT_RDTSCP, - INTERCEPT_ICEBP, - INTERCEPT_WBINVD, - INTERCEPT_MONITOR, - INTERCEPT_MWAIT, - INTERCEPT_MWAIT_COND, -}; - - -struct __attribute__ ((__packed__)) vmcb_control_area { - u16 intercept_cr_read; - u16 intercept_cr_write; - u16 intercept_dr_read; - u16 intercept_dr_write; - u32 intercept_exceptions; - u64 intercept; - u8 reserved_1[44]; - u64 iopm_base_pa; - u64 msrpm_base_pa; - u64 tsc_offset; - u32 asid; - u8 tlb_ctl; - u8 reserved_2[3]; - u32 int_ctl; - u32 int_vector; - u32 int_state; - u8 reserved_3[4]; - u32 exit_code; - u32 exit_code_hi; - u64 exit_info_1; - u64 exit_info_2; - u32 exit_int_info; - u32 exit_int_info_err; - u64 nested_ctl; - u8 reserved_4[16]; - u32 event_inj; - u32 event_inj_err; - u64 nested_cr3; - u64 lbr_ctl; - u8 reserved_5[832]; -}; - - -#define TLB_CONTROL_DO_NOTHING 0 -#define TLB_CONTROL_FLUSH_ALL_ASID 1 - -#define V_TPR_MASK 0x0f - -#define V_IRQ_SHIFT 8 -#define V_IRQ_MASK (1 << V_IRQ_SHIFT) - -#define V_INTR_PRIO_SHIFT 16 -#define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT) - -#define V_IGN_TPR_SHIFT 20 -#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT) - -#define V_INTR_MASKING_SHIFT 24 -#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT) - -#define SVM_INTERRUPT_SHADOW_MASK 1 - -#define SVM_IOIO_STR_SHIFT 2 -#define SVM_IOIO_REP_SHIFT 3 -#define SVM_IOIO_SIZE_SHIFT 4 -#define SVM_IOIO_ASIZE_SHIFT 7 - -#define SVM_IOIO_TYPE_MASK 1 -#define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT) -#define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT) -#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT) -#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT) - -struct __attribute__ ((__packed__)) vmcb_seg { - u16 selector; - u16 attrib; - u32 limit; - u64 base; -}; - -struct __attribute__ ((__packed__)) vmcb_save_area { - struct vmcb_seg es; - struct vmcb_seg cs; - struct vmcb_seg ss; - struct vmcb_seg ds; - struct vmcb_seg fs; - struct vmcb_seg gs; - struct vmcb_seg gdtr; - struct vmcb_seg ldtr; - struct vmcb_seg idtr; - struct vmcb_seg tr; - u8 reserved_1[43]; - u8 cpl; - u8 reserved_2[4]; - u64 efer; - u8 reserved_3[112]; - u64 cr4; - u64 cr3; - u64 cr0; - u64 dr7; - u64 dr6; - u64 rflags; - u64 rip; - u8 reserved_4[88]; - u64 rsp; - u8 reserved_5[24]; - u64 rax; - u64 star; - u64 lstar; - u64 cstar; - u64 sfmask; - u64 kernel_gs_base; - u64 sysenter_cs; - u64 sysenter_esp; - u64 sysenter_eip; - u64 cr2; - u8 reserved_6[32]; - u64 g_pat; - u64 dbgctl; - u64 br_from; - u64 br_to; - u64 last_excp_from; - u64 last_excp_to; -}; - -struct __attribute__ ((__packed__)) vmcb { - struct vmcb_control_area control; - struct vmcb_save_area save; -}; - -#define SVM_CPUID_FEATURE_SHIFT 2 -#define SVM_CPUID_FUNC 0x8000000a - -#define MSR_EFER_SVME_MASK (1ULL << 12) -#define MSR_VM_CR 0xc0010114 -#define MSR_VM_HSAVE_PA 0xc0010117ULL - -#define SVM_VM_CR_SVM_DISABLE 4 - -#define SVM_SELECTOR_S_SHIFT 4 -#define SVM_SELECTOR_DPL_SHIFT 5 -#define SVM_SELECTOR_P_SHIFT 7 -#define SVM_SELECTOR_AVL_SHIFT 8 -#define SVM_SELECTOR_L_SHIFT 9 -#define SVM_SELECTOR_DB_SHIFT 10 -#define SVM_SELECTOR_G_SHIFT 11 - -#define SVM_SELECTOR_TYPE_MASK (0xf) -#define SVM_SELECTOR_S_MASK (1 << SVM_SELECTOR_S_SHIFT) -#define SVM_SELECTOR_DPL_MASK (3 << SVM_SELECTOR_DPL_SHIFT) -#define SVM_SELECTOR_P_MASK (1 << SVM_SELECTOR_P_SHIFT) -#define SVM_SELECTOR_AVL_MASK (1 << SVM_SELECTOR_AVL_SHIFT) -#define SVM_SELECTOR_L_MASK (1 << SVM_SELECTOR_L_SHIFT) -#define SVM_SELECTOR_DB_MASK (1 << SVM_SELECTOR_DB_SHIFT) -#define SVM_SELECTOR_G_MASK (1 << SVM_SELECTOR_G_SHIFT) - -#define SVM_SELECTOR_WRITE_MASK (1 << 1) -#define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK -#define SVM_SELECTOR_CODE_MASK (1 << 3) - -#define INTERCEPT_CR0_MASK 1 -#define INTERCEPT_CR3_MASK (1 << 3) -#define INTERCEPT_CR4_MASK (1 << 4) -#define INTERCEPT_CR8_MASK (1 << 8) - -#define INTERCEPT_DR0_MASK 1 -#define INTERCEPT_DR1_MASK (1 << 1) -#define INTERCEPT_DR2_MASK (1 << 2) -#define INTERCEPT_DR3_MASK (1 << 3) -#define INTERCEPT_DR4_MASK (1 << 4) -#define INTERCEPT_DR5_MASK (1 << 5) -#define INTERCEPT_DR6_MASK (1 << 6) -#define INTERCEPT_DR7_MASK (1 << 7) - -#define SVM_EVTINJ_VEC_MASK 0xff - -#define SVM_EVTINJ_TYPE_SHIFT 8 -#define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT) - -#define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT) -#define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT) -#define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT) -#define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT) - -#define SVM_EVTINJ_VALID (1 << 31) -#define SVM_EVTINJ_VALID_ERR (1 << 11) - -#define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK - -#define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR -#define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI -#define SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT -#define SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT - -#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID -#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR - -#define SVM_EXITINFOSHIFT_TS_REASON_IRET 36 -#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38 - -#define SVM_EXIT_READ_CR0 0x000 -#define SVM_EXIT_READ_CR3 0x003 -#define SVM_EXIT_READ_CR4 0x004 -#define SVM_EXIT_READ_CR8 0x008 -#define SVM_EXIT_WRITE_CR0 0x010 -#define SVM_EXIT_WRITE_CR3 0x013 -#define SVM_EXIT_WRITE_CR4 0x014 -#define SVM_EXIT_WRITE_CR8 0x018 -#define SVM_EXIT_READ_DR0 0x020 -#define SVM_EXIT_READ_DR1 0x021 -#define SVM_EXIT_READ_DR2 0x022 -#define SVM_EXIT_READ_DR3 0x023 -#define SVM_EXIT_READ_DR4 0x024 -#define SVM_EXIT_READ_DR5 0x025 -#define SVM_EXIT_READ_DR6 0x026 -#define SVM_EXIT_READ_DR7 0x027 -#define SVM_EXIT_WRITE_DR0 0x030 -#define SVM_EXIT_WRITE_DR1 0x031 -#define SVM_EXIT_WRITE_DR2 0x032 -#define SVM_EXIT_WRITE_DR3 0x033 -#define SVM_EXIT_WRITE_DR4 0x034 -#define SVM_EXIT_WRITE_DR5 0x035 -#define SVM_EXIT_WRITE_DR6 0x036 -#define SVM_EXIT_WRITE_DR7 0x037 -#define SVM_EXIT_EXCP_BASE 0x040 -#define SVM_EXIT_INTR 0x060 -#define SVM_EXIT_NMI 0x061 -#define SVM_EXIT_SMI 0x062 -#define SVM_EXIT_INIT 0x063 -#define SVM_EXIT_VINTR 0x064 -#define SVM_EXIT_CR0_SEL_WRITE 0x065 -#define SVM_EXIT_IDTR_READ 0x066 -#define SVM_EXIT_GDTR_READ 0x067 -#define SVM_EXIT_LDTR_READ 0x068 -#define SVM_EXIT_TR_READ 0x069 -#define SVM_EXIT_IDTR_WRITE 0x06a -#define SVM_EXIT_GDTR_WRITE 0x06b -#define SVM_EXIT_LDTR_WRITE 0x06c -#define SVM_EXIT_TR_WRITE 0x06d -#define SVM_EXIT_RDTSC 0x06e -#define SVM_EXIT_RDPMC 0x06f -#define SVM_EXIT_PUSHF 0x070 -#define SVM_EXIT_POPF 0x071 -#define SVM_EXIT_CPUID 0x072 -#define SVM_EXIT_RSM 0x073 -#define SVM_EXIT_IRET 0x074 -#define SVM_EXIT_SWINT 0x075 -#define SVM_EXIT_INVD 0x076 -#define SVM_EXIT_PAUSE 0x077 -#define SVM_EXIT_HLT 0x078 -#define SVM_EXIT_INVLPG 0x079 -#define SVM_EXIT_INVLPGA 0x07a -#define SVM_EXIT_IOIO 0x07b -#define SVM_EXIT_MSR 0x07c -#define SVM_EXIT_TASK_SWITCH 0x07d -#define SVM_EXIT_FERR_FREEZE 0x07e -#define SVM_EXIT_SHUTDOWN 0x07f -#define SVM_EXIT_VMRUN 0x080 -#define SVM_EXIT_VMMCALL 0x081 -#define SVM_EXIT_VMLOAD 0x082 -#define SVM_EXIT_VMSAVE 0x083 -#define SVM_EXIT_STGI 0x084 -#define SVM_EXIT_CLGI 0x085 -#define SVM_EXIT_SKINIT 0x086 -#define SVM_EXIT_RDTSCP 0x087 -#define SVM_EXIT_ICEBP 0x088 -#define SVM_EXIT_WBINVD 0x089 -#define SVM_EXIT_MONITOR 0x08a -#define SVM_EXIT_MWAIT 0x08b -#define SVM_EXIT_MWAIT_COND 0x08c -#define SVM_EXIT_NPF 0x400 - -#define SVM_EXIT_ERR -1 - -#define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */ - -#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" -#define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8" -#define SVM_VMSAVE ".byte 0x0f, 0x01, 0xdb" -#define SVM_CLGI ".byte 0x0f, 0x01, 0xdd" -#define SVM_STGI ".byte 0x0f, 0x01, 0xdc" -#define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf" - -#endif - -- cgit v1.2.3 From eca70fc5671b226966dfb7ee9953d59199288566 Mon Sep 17 00:00:00 2001 From: Eduardo Habkost Date: Mon, 17 Nov 2008 19:03:15 -0200 Subject: KVM: VMX: move ASM_VMX_* definitions from asm/kvm_host.h to asm/vmx.h Those definitions will be used by code outside KVM, so move it outside of a KVM-specific source file. Those definitions are used only on kvm/vmx.c, that already includes asm/vmx.h, so they can be moved safely. Signed-off-by: Eduardo Habkost Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 12 ------------ arch/x86/include/asm/vmx.h | 15 +++++++++++++++ 2 files changed, 15 insertions(+), 12 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 99e3cc149d21..f58f7ebdea81 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -714,18 +714,6 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); } -#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30" -#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2" -#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3" -#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30" -#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0" -#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0" -#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" -#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" -#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" -#define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08" -#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08" - #define MSR_IA32_TIME_STAMP_COUNTER 0x010 #define TSS_IOPB_BASE_OFFSET 0x66 diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 3db236c26fa4..d0238e6151d8 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -364,4 +364,19 @@ enum vmcs_field { #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul + +#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30" +#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2" +#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3" +#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30" +#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0" +#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0" +#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" +#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" +#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" +#define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08" +#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08" + + + #endif -- cgit v1.2.3 From 6210e37b122583643da335c0389f74098713e5ca Mon Sep 17 00:00:00 2001 From: Eduardo Habkost Date: Mon, 17 Nov 2008 19:03:16 -0200 Subject: KVM: VMX: move cpu_has_kvm_support() to an inline on asm/virtext.h It will be used by core code on kdump and reboot, to disable vmx if needed. Signed-off-by: Eduardo Habkost Signed-off-by: Avi Kivity --- arch/x86/include/asm/virtext.h | 31 +++++++++++++++++++++++++++++++ arch/x86/kvm/vmx.c | 4 ++-- 2 files changed, 33 insertions(+), 2 deletions(-) create mode 100644 arch/x86/include/asm/virtext.h (limited to 'arch/x86') diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h new file mode 100644 index 000000000000..298b6a06110d --- /dev/null +++ b/arch/x86/include/asm/virtext.h @@ -0,0 +1,31 @@ +/* CPU virtualization extensions handling + * + * This should carry the code for handling CPU virtualization extensions + * that needs to live in the kernel core. + * + * Author: Eduardo Habkost + * + * Copyright (C) 2008, Red Hat Inc. + * + * Contains code from KVM, Copyright (C) 2006 Qumranet, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ +#ifndef _ASM_X86_VIRTEX_H +#define _ASM_X86_VIRTEX_H + +#include +#include + +/* + * VMX functions: + */ + +static inline int cpu_has_vmx(void) +{ + unsigned long ecx = cpuid_ecx(1); + return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */ +} + +#endif /* _ASM_X86_VIRTEX_H */ diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ec71f6464cfa..defaeeb3f75c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -31,6 +31,7 @@ #include #include #include +#include #define __ex(x) __kvm_handle_fault_on_reboot(x) @@ -1044,8 +1045,7 @@ static int vmx_get_irq(struct kvm_vcpu *vcpu) static __init int cpu_has_kvm_support(void) { - unsigned long ecx = cpuid_ecx(1); - return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */ + return cpu_has_vmx(); } static __init int vmx_disabled_by_bios(void) -- cgit v1.2.3 From 1e9931146c748420343aeefadb3bb17bd1c14a37 Mon Sep 17 00:00:00 2001 From: Eduardo Habkost Date: Mon, 17 Nov 2008 19:03:17 -0200 Subject: x86: asm/virtext.h: add cpu_vmxoff() inline function Unfortunately we can't use exactly the same code from vmx hardware_disable(), because the KVM function uses the __kvm_handle_fault_on_reboot() tricks. Signed-off-by: Eduardo Habkost Signed-off-by: Avi Kivity --- arch/x86/include/asm/virtext.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 298b6a06110d..7dee5b59930e 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -18,6 +18,8 @@ #include #include +#include + /* * VMX functions: */ @@ -28,4 +30,17 @@ static inline int cpu_has_vmx(void) return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */ } + +/** Disable VMX on the current CPU + * + * vmxoff causes a undefined-opcode exception if vmxon was not run + * on the CPU previously. Only call this function if you know VMX + * is enabled. + */ +static inline void cpu_vmxoff(void) +{ + asm volatile (ASM_VMX_VMXOFF : : : "cc"); + write_cr4(read_cr4() & ~X86_CR4_VMXE); +} + #endif /* _ASM_X86_VIRTEX_H */ -- cgit v1.2.3 From 710ff4a855d0f3bf74b5b4a20328e2858a8a2968 Mon Sep 17 00:00:00 2001 From: Eduardo Habkost Date: Mon, 17 Nov 2008 19:03:18 -0200 Subject: KVM: VMX: extract kvm_cpu_vmxoff() from hardware_disable() Along with some comments on why it is different from the core cpu_vmxoff() function. Signed-off-by: Eduardo Habkost Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index defaeeb3f75c..f5958a7823f4 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1091,13 +1091,22 @@ static void vmclear_local_vcpus(void) __vcpu_clear(vmx); } -static void hardware_disable(void *garbage) + +/* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot() + * tricks. + */ +static void kvm_cpu_vmxoff(void) { - vmclear_local_vcpus(); asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); write_cr4(read_cr4() & ~X86_CR4_VMXE); } +static void hardware_disable(void *garbage) +{ + vmclear_local_vcpus(); + kvm_cpu_vmxoff(); +} + static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr, u32 *result) { -- cgit v1.2.3 From 6aa07a0d77f6aafbe69e4e8609ffaf2b7ee1b591 Mon Sep 17 00:00:00 2001 From: Eduardo Habkost Date: Mon, 17 Nov 2008 19:03:19 -0200 Subject: x86: cpu_emergency_vmxoff() function Add cpu_emergency_vmxoff() and its friends: cpu_vmx_enabled() and __cpu_emergency_vmxoff(). Signed-off-by: Eduardo Habkost Signed-off-by: Avi Kivity --- arch/x86/include/asm/virtext.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 7dee5b59930e..6bcf0acb4ef1 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -43,4 +43,27 @@ static inline void cpu_vmxoff(void) write_cr4(read_cr4() & ~X86_CR4_VMXE); } +static inline int cpu_vmx_enabled(void) +{ + return read_cr4() & X86_CR4_VMXE; +} + +/** Disable VMX if it is enabled on the current CPU + * + * You shouldn't call this if cpu_has_vmx() returns 0. + */ +static inline void __cpu_emergency_vmxoff(void) +{ + if (cpu_vmx_enabled()) + cpu_vmxoff(); +} + +/** Disable VMX if it is supported and enabled on the current CPU + */ +static inline void cpu_emergency_vmxoff(void) +{ + if (cpu_has_vmx()) + __cpu_emergency_vmxoff(); +} + #endif /* _ASM_X86_VIRTEX_H */ -- cgit v1.2.3 From 63d1142f8f69e39468bc6079ab2239e902828134 Mon Sep 17 00:00:00 2001 From: Eduardo Habkost Date: Mon, 17 Nov 2008 19:03:20 -0200 Subject: KVM: SVM: move has_svm() code to asm/virtext.h Use a trick to keep the printk()s on has_svm() working as before. gcc will take care of not generating code for the 'msg' stuff when the function is called with a NULL msg argument. Signed-off-by: Eduardo Habkost Signed-off-by: Avi Kivity --- arch/x86/include/asm/virtext.h | 41 +++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm.c | 19 +++++-------------- 2 files changed, 46 insertions(+), 14 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 6bcf0acb4ef1..6f0d409c3682 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -19,6 +19,7 @@ #include #include +#include /* * VMX functions: @@ -66,4 +67,44 @@ static inline void cpu_emergency_vmxoff(void) __cpu_emergency_vmxoff(); } + + + +/* + * SVM functions: + */ + +/** Check if the CPU has SVM support + * + * You can use the 'msg' arg to get a message describing the problem, + * if the function returns zero. Simply pass NULL if you are not interested + * on the messages; gcc should take care of not generating code for + * the messages on this case. + */ +static inline int cpu_has_svm(const char **msg) +{ + uint32_t eax, ebx, ecx, edx; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { + if (msg) + *msg = "not amd"; + return 0; + } + + cpuid(0x80000000, &eax, &ebx, &ecx, &edx); + if (eax < SVM_CPUID_FUNC) { + if (msg) + *msg = "can't execute cpuid_8000000a"; + return 0; + } + + cpuid(0x80000001, &eax, &ebx, &ecx, &edx); + if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) { + if (msg) + *msg = "svm not available"; + return 0; + } + return 1; +} + #endif /* _ASM_X86_VIRTEX_H */ diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index f0ad4d4217e4..0667c6d13d30 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -28,6 +28,8 @@ #include +#include + #define __ex(x) __kvm_handle_fault_on_reboot(x) MODULE_AUTHOR("Qumranet"); @@ -245,24 +247,13 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) static int has_svm(void) { - uint32_t eax, ebx, ecx, edx; - - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { - printk(KERN_INFO "has_svm: not amd\n"); - return 0; - } + const char *msg; - cpuid(0x80000000, &eax, &ebx, &ecx, &edx); - if (eax < SVM_CPUID_FUNC) { - printk(KERN_INFO "has_svm: can't execute cpuid_8000000a\n"); + if (!cpu_has_svm(&msg)) { + printk(KERN_INFO "has_svn: %s\n", msg); return 0; } - cpuid(0x80000001, &eax, &ebx, &ecx, &edx); - if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) { - printk(KERN_DEBUG "has_svm: svm not available\n"); - return 0; - } return 1; } -- cgit v1.2.3 From 2c8dceebb238680d5577500f8283397d41ca5590 Mon Sep 17 00:00:00 2001 From: Eduardo Habkost Date: Mon, 17 Nov 2008 19:03:21 -0200 Subject: KVM: SVM: move svm_hardware_disable() code to asm/virtext.h Create cpu_svm_disable() function. Signed-off-by: Eduardo Habkost Signed-off-by: Avi Kivity --- arch/x86/include/asm/virtext.h | 14 ++++++++++++++ arch/x86/kvm/svm.c | 6 +----- 2 files changed, 15 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 6f0d409c3682..2cfe363729c3 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -107,4 +107,18 @@ static inline int cpu_has_svm(const char **msg) return 1; } + +/** Disable SVM on the current CPU + * + * You should call this only if cpu_has_svm() returned true. + */ +static inline void cpu_svm_disable(void) +{ + uint64_t efer; + + wrmsrl(MSR_VM_HSAVE_PA, 0); + rdmsrl(MSR_EFER, efer); + wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); +} + #endif /* _ASM_X86_VIRTEX_H */ diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 0667c6d13d30..1452851ae258 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -259,11 +259,7 @@ static int has_svm(void) static void svm_hardware_disable(void *garbage) { - uint64_t efer; - - wrmsrl(MSR_VM_HSAVE_PA, 0); - rdmsrl(MSR_EFER, efer); - wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); + cpu_svm_disable(); } static void svm_hardware_enable(void *garbage) -- cgit v1.2.3 From 0f3e9eeba0ea212bbea88790729d054b700ab91e Mon Sep 17 00:00:00 2001 From: Eduardo Habkost Date: Mon, 17 Nov 2008 19:03:22 -0200 Subject: x86: cpu_emergency_svm_disable() function This function can be used by the reboot or kdump code to forcibly disable SVM on the CPU. Signed-off-by: Eduardo Habkost Signed-off-by: Avi Kivity --- arch/x86/include/asm/virtext.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 2cfe363729c3..593636275238 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -121,4 +121,12 @@ static inline void cpu_svm_disable(void) wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); } +/** Makes sure SVM is disabled, if it is supported on the CPU + */ +static inline void cpu_emergency_svm_disable(void) +{ + if (cpu_has_svm(NULL)) + cpu_svm_disable(); +} + #endif /* _ASM_X86_VIRTEX_H */ -- cgit v1.2.3 From 2340b62f77c782c305e6ae7748675a638436d1ef Mon Sep 17 00:00:00 2001 From: Eduardo Habkost Date: Mon, 17 Nov 2008 19:03:23 -0200 Subject: kdump: forcibly disable VMX and SVM on machine_crash_shutdown() We need to disable virtualization extensions on all CPUs before booting the kdump kernel, otherwise the kdump kernel booting will fail, and rebooting after the kdump kernel did its task may also fail. We do it using cpu_emergency_vmxoff() and cpu_emergency_svm_disable(), that should always work, because those functions check if the CPUs support SVM or VMX before doing their tasks. Signed-off-by: Eduardo Habkost Signed-off-by: Avi Kivity --- arch/x86/kernel/crash.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index d84a852e4cd7..c689d19e35ab 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -26,6 +26,7 @@ #include #include #include +#include #include @@ -49,6 +50,15 @@ static void kdump_nmi_callback(int cpu, struct die_args *args) #endif crash_save_cpu(regs, cpu); + /* Disable VMX or SVM if needed. + * + * We need to disable virtualization on all CPUs. + * Having VMX or SVM enabled on any CPU may break rebooting + * after the kdump kernel has finished its task. + */ + cpu_emergency_vmxoff(); + cpu_emergency_svm_disable(); + disable_local_APIC(); } @@ -80,6 +90,14 @@ void native_machine_crash_shutdown(struct pt_regs *regs) local_irq_disable(); kdump_nmi_shootdown_cpus(); + + /* Booting kdump kernel with VMX or SVM enabled won't work, + * because (among other limitations) we can't disable paging + * with the virt flags. + */ + cpu_emergency_vmxoff(); + cpu_emergency_svm_disable(); + lapic_shutdown(); #if defined(CONFIG_X86_IO_APIC) disable_IO_APIC(); -- cgit v1.2.3 From d176720d34c72f7a8474a12204add93e54fe3ef1 Mon Sep 17 00:00:00 2001 From: Eduardo Habkost Date: Mon, 17 Nov 2008 19:03:24 -0200 Subject: x86: disable VMX on all CPUs on reboot On emergency_restart, we may need to use an NMI to disable virtualization on all CPUs. We do that using nmi_shootdown_cpus() if VMX is enabled. Note: With this patch, we will run the NMI stuff only when the CPU where emergency_restart() was called has VMX enabled. This should work on most cases because KVM enables VMX on all CPUs, but we may miss the small window where KVM is doing that. Also, I don't know if all code using VMX out there always enable VMX on all CPUs like KVM does. We have two other alternatives for that: a) Have an API that all code that enables VMX on any CPU should use to tell the kernel core that it is going to enable VMX on the CPUs. b) Always call nmi_shootdown_cpus() if the CPU supports VMX. This is a bit intrusive and more risky, as it would run nmi_shootdown_cpus() on emergency_reboot() even on systems where virtualization is never enabled. Finding a proper point to hook the nmi_shootdown_cpus() call isn't trivial, as the non-emergency machine_restart() (that doesn't need the NMI tricks) uses machine_emergency_restart() directly. The solution to make this work without adding a new function or argument to machine_ops was setting a 'reboot_emergency' flag that tells if native_machine_emergency_restart() needs to do the virt cleanup or not. Signed-off-by: Eduardo Habkost Signed-off-by: Avi Kivity --- arch/x86/kernel/reboot.c | 62 ++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 60 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 61f718df6eec..72e0e4e712d6 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -12,6 +12,7 @@ #include #include #include +#include #ifdef CONFIG_X86_32 # include @@ -39,6 +40,12 @@ int reboot_force; static int reboot_cpu = -1; #endif +/* This is set if we need to go through the 'emergency' path. + * When machine_emergency_restart() is called, we may be on + * an inconsistent state and won't be able to do a clean cleanup + */ +static int reboot_emergency; + /* This is set by the PCI code if either type 1 or type 2 PCI is detected */ bool port_cf9_safe = false; @@ -368,6 +375,48 @@ static inline void kb_wait(void) } } +static void vmxoff_nmi(int cpu, struct die_args *args) +{ + cpu_emergency_vmxoff(); +} + +/* Use NMIs as IPIs to tell all CPUs to disable virtualization + */ +static void emergency_vmx_disable_all(void) +{ + /* Just make sure we won't change CPUs while doing this */ + local_irq_disable(); + + /* We need to disable VMX on all CPUs before rebooting, otherwise + * we risk hanging up the machine, because the CPU ignore INIT + * signals when VMX is enabled. + * + * We can't take any locks and we may be on an inconsistent + * state, so we use NMIs as IPIs to tell the other CPUs to disable + * VMX and halt. + * + * For safety, we will avoid running the nmi_shootdown_cpus() + * stuff unnecessarily, but we don't have a way to check + * if other CPUs have VMX enabled. So we will call it only if the + * CPU we are running on has VMX enabled. + * + * We will miss cases where VMX is not enabled on all CPUs. This + * shouldn't do much harm because KVM always enable VMX on all + * CPUs anyway. But we can miss it on the small window where KVM + * is still enabling VMX. + */ + if (cpu_has_vmx() && cpu_vmx_enabled()) { + /* Disable VMX on this CPU. + */ + cpu_vmxoff(); + + /* Halt and disable VMX on the other CPUs */ + nmi_shootdown_cpus(vmxoff_nmi); + + } +} + + void __attribute__((weak)) mach_reboot_fixups(void) { } @@ -376,6 +425,9 @@ static void native_machine_emergency_restart(void) { int i; + if (reboot_emergency) + emergency_vmx_disable_all(); + /* Tell the BIOS if we want cold or warm reboot */ *((unsigned short *)__va(0x472)) = reboot_mode; @@ -482,13 +534,19 @@ void native_machine_shutdown(void) #endif } +static void __machine_emergency_restart(int emergency) +{ + reboot_emergency = emergency; + machine_ops.emergency_restart(); +} + static void native_machine_restart(char *__unused) { printk("machine restart\n"); if (!reboot_force) machine_shutdown(); - machine_emergency_restart(); + __machine_emergency_restart(0); } static void native_machine_halt(void) @@ -532,7 +590,7 @@ void machine_shutdown(void) void machine_emergency_restart(void) { - machine_ops.emergency_restart(); + __machine_emergency_restart(1); } void machine_restart(char *cmd) -- cgit v1.2.3 From df203ec9a77a7236cb90456664d714423b98a977 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 23 Nov 2008 18:08:57 +0200 Subject: KVM: VMX: Conditionally request interrupt window after injecting irq If we're injecting an interrupt, and another one is pending, request an interrupt window notification so we don't have excess latency on the second interrupt. This shouldn't happen in practice since an EOI will be issued, giving a second chance to request an interrupt window, but... Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f5958a7823f4..7ea485543cf8 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3304,6 +3304,8 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) if (vcpu->arch.interrupt.pending) { vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); kvm_timer_intr_post(vcpu, vcpu->arch.interrupt.nr); + if (kvm_cpu_has_interrupt(vcpu)) + enable_irq_window(vcpu); } } -- cgit v1.2.3 From 423cd25a5ade17b8a5cc85e6f0a0f37028d2c4a2 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 24 Nov 2008 15:45:23 -0200 Subject: x86: KVM guest: sign kvmclock as paravirt Currently, we only set the KVM paravirt signature in case of CONFIG_KVM_GUEST. However, it is possible to have it turned off, while CONFIG_KVM_CLOCK is turned on. This is also a paravirt case, and should be shown accordingly. Signed-off-by: Glauber Costa Signed-off-by: Avi Kivity --- arch/x86/kernel/kvmclock.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index e169ae9b6a62..b38e801014e3 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -194,5 +194,7 @@ void __init kvmclock_init(void) #endif kvm_get_preset_lpj(); clocksource_register(&kvm_clock); + pv_info.paravirt_enabled = 1; + pv_info.name = "KVM"; } } -- cgit v1.2.3 From ecc5589f19a52e7e6501fe449047b19087ae11bb Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Tue, 25 Nov 2008 15:58:07 +0100 Subject: KVM: MMU: optimize set_spte for page sync The write protect verification in set_spte is unnecessary for page sync. Its guaranteed that, if the unsync spte was writable, the target page does not have a write protected shadow (if it had, the spte would have been write protected under mmu_lock by rmap_write_protect before). Same reasoning applies to mark_page_dirty: the gfn has been marked as dirty via the pagefault path. The cost of hash table and memslot lookups are quite significant if the workload is pagetable write intensive resulting in increased mmu_lock contention. Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index fa3486d64078..dd20b199a7c0 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1593,6 +1593,15 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, spte |= PT_WRITABLE_MASK; + /* + * Optimization: for pte sync, if spte was writable the hash + * lookup is unnecessary (and expensive). Write protection + * is responsibility of mmu_get_page / kvm_sync_page. + * Same reasoning can be applied to dirty page accounting. + */ + if (!can_unsync && is_writeble_pte(*shadow_pte)) + goto set_pte; + if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { pgprintk("%s: found shadow page for %lx, marking ro\n", __func__, gfn); -- cgit v1.2.3 From dda96d8f1b3de692cce09969ce28fe22e58e5acf Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 26 Nov 2008 15:14:10 +0200 Subject: KVM: x86 emulator: reduce duplication in one operand emulation thunks Signed-off-by: Avi Kivity --- arch/x86/kvm/x86_emulate.c | 66 ++++++++++++++++------------------------------ 1 file changed, 23 insertions(+), 43 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 8f60ace13874..5f87d3e59195 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -359,6 +359,12 @@ static u16 group2_table[] = { "andl %"_msk",%"_LO32 _tmp"; " \ "orl %"_LO32 _tmp",%"_sav"; " +#ifdef CONFIG_X86_64 +#define ON64(x) x +#else +#define ON64(x) +#endif + /* Raw emulation: instruction has two explicit operands. */ #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \ do { \ @@ -425,42 +431,27 @@ static u16 group2_table[] = { __emulate_2op_nobyte(_op, _src, _dst, _eflags, \ "w", "r", _LO32, "r", "", "r") -/* Instruction has only one explicit operand (no source operand). */ -#define emulate_1op(_op, _dst, _eflags) \ +#define __emulate_1op(_op, _dst, _eflags, _suffix) \ do { \ unsigned long _tmp; \ \ + __asm__ __volatile__ ( \ + _PRE_EFLAGS("0", "3", "2") \ + _op _suffix " %1; " \ + _POST_EFLAGS("0", "3", "2") \ + : "=m" (_eflags), "+m" ((_dst).val), \ + "=&r" (_tmp) \ + : "i" (EFLAGS_MASK)); \ + } while (0) + +/* Instruction has only one explicit operand (no source operand). */ +#define emulate_1op(_op, _dst, _eflags) \ + do { \ switch ((_dst).bytes) { \ - case 1: \ - __asm__ __volatile__ ( \ - _PRE_EFLAGS("0", "3", "2") \ - _op"b %1; " \ - _POST_EFLAGS("0", "3", "2") \ - : "=m" (_eflags), "=m" ((_dst).val), \ - "=&r" (_tmp) \ - : "i" (EFLAGS_MASK)); \ - break; \ - case 2: \ - __asm__ __volatile__ ( \ - _PRE_EFLAGS("0", "3", "2") \ - _op"w %1; " \ - _POST_EFLAGS("0", "3", "2") \ - : "=m" (_eflags), "=m" ((_dst).val), \ - "=&r" (_tmp) \ - : "i" (EFLAGS_MASK)); \ - break; \ - case 4: \ - __asm__ __volatile__ ( \ - _PRE_EFLAGS("0", "3", "2") \ - _op"l %1; " \ - _POST_EFLAGS("0", "3", "2") \ - : "=m" (_eflags), "=m" ((_dst).val), \ - "=&r" (_tmp) \ - : "i" (EFLAGS_MASK)); \ - break; \ - case 8: \ - __emulate_1op_8byte(_op, _dst, _eflags); \ - break; \ + case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \ + case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \ + case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \ + case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \ } \ } while (0) @@ -476,19 +467,8 @@ static u16 group2_table[] = { : _qy ((_src).val), "i" (EFLAGS_MASK)); \ } while (0) -#define __emulate_1op_8byte(_op, _dst, _eflags) \ - do { \ - __asm__ __volatile__ ( \ - _PRE_EFLAGS("0", "3", "2") \ - _op"q %1; " \ - _POST_EFLAGS("0", "3", "2") \ - : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ - : "i" (EFLAGS_MASK)); \ - } while (0) - #elif defined(__i386__) #define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy) -#define __emulate_1op_8byte(_op, _dst, _eflags) #endif /* __i386__ */ /* Fetch next part of the instruction being emulated. */ -- cgit v1.2.3 From 6b7ad61ffb9ca110add6f7fb36cc8a4dd89696a4 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 26 Nov 2008 15:30:45 +0200 Subject: KVM: x86 emulator: consolidate emulation of two operand instructions No need to repeat the same assembly block over and over. Signed-off-by: Avi Kivity --- arch/x86/kvm/x86_emulate.c | 79 ++++++++++++++++------------------------------ 1 file changed, 28 insertions(+), 51 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 5f87d3e59195..a11af6f74d68 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -365,49 +365,42 @@ static u16 group2_table[] = { #define ON64(x) #endif +#define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \ + do { \ + __asm__ __volatile__ ( \ + _PRE_EFLAGS("0", "4", "2") \ + _op _suffix " %"_x"3,%1; " \ + _POST_EFLAGS("0", "4", "2") \ + : "=m" (_eflags), "=m" ((_dst).val), \ + "=&r" (_tmp) \ + : _y ((_src).val), "i" (EFLAGS_MASK)); \ + } while (0); + + /* Raw emulation: instruction has two explicit operands. */ #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \ - do { \ - unsigned long _tmp; \ - \ - switch ((_dst).bytes) { \ - case 2: \ - __asm__ __volatile__ ( \ - _PRE_EFLAGS("0", "4", "2") \ - _op"w %"_wx"3,%1; " \ - _POST_EFLAGS("0", "4", "2") \ - : "=m" (_eflags), "=m" ((_dst).val), \ - "=&r" (_tmp) \ - : _wy ((_src).val), "i" (EFLAGS_MASK)); \ - break; \ - case 4: \ - __asm__ __volatile__ ( \ - _PRE_EFLAGS("0", "4", "2") \ - _op"l %"_lx"3,%1; " \ - _POST_EFLAGS("0", "4", "2") \ - : "=m" (_eflags), "=m" ((_dst).val), \ - "=&r" (_tmp) \ - : _ly ((_src).val), "i" (EFLAGS_MASK)); \ - break; \ - case 8: \ - __emulate_2op_8byte(_op, _src, _dst, \ - _eflags, _qx, _qy); \ - break; \ - } \ + do { \ + unsigned long _tmp; \ + \ + switch ((_dst).bytes) { \ + case 2: \ + ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \ + break; \ + case 4: \ + ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \ + break; \ + case 8: \ + ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \ + break; \ + } \ } while (0) #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \ do { \ - unsigned long __tmp; \ + unsigned long _tmp; \ switch ((_dst).bytes) { \ case 1: \ - __asm__ __volatile__ ( \ - _PRE_EFLAGS("0", "4", "2") \ - _op"b %"_bx"3,%1; " \ - _POST_EFLAGS("0", "4", "2") \ - : "=m" (_eflags), "=m" ((_dst).val), \ - "=&r" (__tmp) \ - : _by ((_src).val), "i" (EFLAGS_MASK)); \ + ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \ break; \ default: \ __emulate_2op_nobyte(_op, _src, _dst, _eflags, \ @@ -455,22 +448,6 @@ static u16 group2_table[] = { } \ } while (0) -/* Emulate an instruction with quadword operands (x86/64 only). */ -#if defined(CONFIG_X86_64) -#define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy) \ - do { \ - __asm__ __volatile__ ( \ - _PRE_EFLAGS("0", "4", "2") \ - _op"q %"_qx"3,%1; " \ - _POST_EFLAGS("0", "4", "2") \ - : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ - : _qy ((_src).val), "i" (EFLAGS_MASK)); \ - } while (0) - -#elif defined(__i386__) -#define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy) -#endif /* __i386__ */ - /* Fetch next part of the instruction being emulated. */ #define insn_fetch(_type, _size, _eip) \ ({ unsigned long _x; \ -- cgit v1.2.3 From faa5a3ae39483aefc46a78299c811194f953af27 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 27 Nov 2008 17:36:41 +0200 Subject: KVM: x86 emulator: Extract 'pop' sequence into a function Switch 'pop r/m' instruction to use the new function. Signed-off-by: Avi Kivity --- arch/x86/kvm/x86_emulate.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index a11af6f74d68..2555762f4b42 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -1057,20 +1057,33 @@ static inline void emulate_push(struct x86_emulate_ctxt *ctxt) c->regs[VCPU_REGS_RSP]); } -static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops) +static int emulate_pop(struct x86_emulate_ctxt *ctxt, + struct x86_emulate_ops *ops) { struct decode_cache *c = &ctxt->decode; int rc; rc = ops->read_std(register_address(c, ss_base(ctxt), c->regs[VCPU_REGS_RSP]), - &c->dst.val, c->dst.bytes, ctxt->vcpu); + &c->src.val, c->src.bytes, ctxt->vcpu); if (rc != 0) return rc; - register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->dst.bytes); + register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.bytes); + return rc; +} +static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt, + struct x86_emulate_ops *ops) +{ + struct decode_cache *c = &ctxt->decode; + int rc; + + c->src.bytes = c->dst.bytes; + rc = emulate_pop(ctxt, ops); + if (rc != 0) + return rc; + c->dst.val = c->src.val; return 0; } -- cgit v1.2.3 From 781d0edc5fc5cfe7491a0c5081734e62f6dc66ee Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 27 Nov 2008 18:00:28 +0200 Subject: KVM: x86 emulator: allow pop from mmio Signed-off-by: Avi Kivity --- arch/x86/kvm/x86_emulate.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 2555762f4b42..70242f5f0964 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -1063,9 +1063,9 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt, struct decode_cache *c = &ctxt->decode; int rc; - rc = ops->read_std(register_address(c, ss_base(ctxt), - c->regs[VCPU_REGS_RSP]), - &c->src.val, c->src.bytes, ctxt->vcpu); + rc = ops->read_emulated(register_address(c, ss_base(ctxt), + c->regs[VCPU_REGS_RSP]), + &c->src.val, c->src.bytes, ctxt->vcpu); if (rc != 0) return rc; -- cgit v1.2.3 From 8a09b6877f3100207b3572e7e12ea796493fe914 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 27 Nov 2008 18:06:33 +0200 Subject: KVM: x86 emulator: switch 'pop reg' instruction to emulate_pop() Signed-off-by: Avi Kivity --- arch/x86/kvm/x86_emulate.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 70242f5f0964..702de9869c19 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -1389,14 +1389,11 @@ special_insn: break; case 0x58 ... 0x5f: /* pop reg */ pop_instruction: - if ((rc = ops->read_std(register_address(c, ss_base(ctxt), - c->regs[VCPU_REGS_RSP]), c->dst.ptr, - c->op_bytes, ctxt->vcpu)) != 0) + c->src.bytes = c->op_bytes; + rc = emulate_pop(ctxt, ops); + if (rc != 0) goto done; - - register_address_increment(c, &c->regs[VCPU_REGS_RSP], - c->op_bytes); - c->dst.type = OP_NONE; /* Disable writeback. */ + c->dst.val = c->src.val; break; case 0x63: /* movsxd */ if (ctxt->mode != X86EMUL_MODE_PROT64) -- cgit v1.2.3 From cf5de4f886116871c2ae2eee53524edd741a68ae Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Fri, 28 Nov 2008 00:14:07 +0200 Subject: KVM: x86 emulator: fix ret emulation 'ret' did not set the operand type or size for the destination, so writeback ignored it. Signed-off-by: Avi Kivity --- arch/x86/kvm/x86_emulate.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 702de9869c19..72ae86b1b131 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -1650,7 +1650,9 @@ special_insn: emulate_grp2(ctxt); break; case 0xc3: /* ret */ + c->dst.type = OP_REG; c->dst.ptr = &c->eip; + c->dst.bytes = c->op_bytes; goto pop_instruction; case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */ mov: -- cgit v1.2.3 From 2b48cc75b21431037d6f902b9d583b1aff198490 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sat, 29 Nov 2008 20:36:13 +0200 Subject: KVM: x86 emulator: fix popf emulation Set operand type and size to get correct writeback behavior. Signed-off-by: Avi Kivity --- arch/x86/kvm/x86_emulate.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 72ae86b1b131..e8c87ccfe310 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -1552,7 +1552,9 @@ special_insn: emulate_push(ctxt); break; case 0x9d: /* popf */ + c->dst.type = OP_REG; c->dst.ptr = (unsigned long *) &ctxt->eflags; + c->dst.bytes = c->op_bytes; goto pop_instruction; case 0xa0 ... 0xa1: /* mov */ c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX]; -- cgit v1.2.3 From f3fd92fbdb7663bd889c136842afc3851351ea8f Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sat, 29 Nov 2008 20:38:12 +0200 Subject: KVM: Remove extraneous semicolon after do/while Notices by Guillaume Thouvenin. Signed-off-by: Avi Kivity --- arch/x86/kvm/x86_emulate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index e8c87ccfe310..69b330ba0ad0 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -374,7 +374,7 @@ static u16 group2_table[] = { : "=m" (_eflags), "=m" ((_dst).val), \ "=&r" (_tmp) \ : _y ((_src).val), "i" (EFLAGS_MASK)); \ - } while (0); + } while (0) /* Raw emulation: instruction has two explicit operands. */ -- cgit v1.2.3 From efff9e538f6bfa8ee2ca03f7e9a55d98df115186 Mon Sep 17 00:00:00 2001 From: Hannes Eder Date: Fri, 28 Nov 2008 17:02:06 +0100 Subject: KVM: VMX: fix sparse warning Impact: make global function static arch/x86/kvm/vmx.c:134:3: warning: symbol 'vmx_capability' was not declared. Should it be static? Signed-off-by: Hannes Eder Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 7ea485543cf8..e446f232588e 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -128,7 +128,7 @@ static struct vmcs_config { u32 vmentry_ctrl; } vmcs_config; -struct vmx_capability { +static struct vmx_capability { u32 ept; u32 vpid; } vmx_capability; -- cgit v1.2.3 From 45ed60b371aeae6ed80f7e9d594a5e6412edc176 Mon Sep 17 00:00:00 2001 From: Guillaume Thouvenin Date: Thu, 4 Dec 2008 14:25:38 +0100 Subject: KVM: x86 emulator: Extend the opcode descriptor Extend the opcode descriptor to 32 bits. This is needed by the introduction of a new Src2 operand type. Signed-off-by: Guillaume Thouvenin Signed-off-by: Avi Kivity --- arch/x86/kvm/x86_emulate.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 69b330ba0ad0..7a07ca46c8ae 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -76,7 +76,7 @@ enum { Group1A, Group3_Byte, Group3, Group4, Group5, Group7, }; -static u16 opcode_table[256] = { +static u32 opcode_table[256] = { /* 0x00 - 0x07 */ ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, @@ -195,7 +195,7 @@ static u16 opcode_table[256] = { ImplicitOps, ImplicitOps, Group | Group4, Group | Group5, }; -static u16 twobyte_table[256] = { +static u32 twobyte_table[256] = { /* 0x00 - 0x0F */ 0, Group | GroupDual | Group7, 0, 0, 0, 0, ImplicitOps, 0, ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0, @@ -253,7 +253,7 @@ static u16 twobyte_table[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; -static u16 group_table[] = { +static u32 group_table[] = { [Group1_80*8] = ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, @@ -297,7 +297,7 @@ static u16 group_table[] = { SrcMem16 | ModRM | Mov, SrcMem | ModRM | ByteOp, }; -static u16 group2_table[] = { +static u32 group2_table[] = { [Group7*8] = SrcNone | ModRM, 0, 0, 0, SrcNone | ModRM | DstMem | Mov, 0, -- cgit v1.2.3 From 0dc8d10f7d848b63c8d32cf6fd31ba7def792ac9 Mon Sep 17 00:00:00 2001 From: Guillaume Thouvenin Date: Thu, 4 Dec 2008 14:26:42 +0100 Subject: KVM: x86 emulator: add Src2 decode set Instruction like shld has three operands, so we need to add a Src2 decode set. We start with Src2None, Src2CL, and Src2ImmByte, Src2One to support shld/shrd and we will expand it later. Signed-off-by: Guillaume Thouvenin Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_x86_emulate.h | 1 + arch/x86/kvm/x86_emulate.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_x86_emulate.h b/arch/x86/include/asm/kvm_x86_emulate.h index 16a002655f31..6a159732881a 100644 --- a/arch/x86/include/asm/kvm_x86_emulate.h +++ b/arch/x86/include/asm/kvm_x86_emulate.h @@ -123,6 +123,7 @@ struct decode_cache { u8 ad_bytes; u8 rex_prefix; struct operand src; + struct operand src2; struct operand dst; bool has_seg_override; u8 seg_override; diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 7a07ca46c8ae..7f5cd62362c5 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -70,6 +70,12 @@ #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */ #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */ #define GroupMask 0xff /* Group number stored in bits 0:7 */ +/* Source 2 operand type */ +#define Src2None (0<<29) +#define Src2CL (1<<29) +#define Src2ImmByte (2<<29) +#define Src2One (3<<29) +#define Src2Mask (7<<29) enum { Group1_80, Group1_81, Group1_82, Group1_83, @@ -1000,6 +1006,29 @@ done_prefixes: break; } + /* + * Decode and fetch the second source operand: register, memory + * or immediate. + */ + switch (c->d & Src2Mask) { + case Src2None: + break; + case Src2CL: + c->src2.bytes = 1; + c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8; + break; + case Src2ImmByte: + c->src2.type = OP_IMM; + c->src2.ptr = (unsigned long *)c->eip; + c->src2.bytes = 1; + c->src2.val = insn_fetch(u8, 1, c->eip); + break; + case Src2One: + c->src2.bytes = 1; + c->src2.val = 1; + break; + } + /* Decode and fetch the destination operand: register or memory. */ switch (c->d & DstMask) { case ImplicitOps: -- cgit v1.2.3 From bfcadf83ec5aafe600e73dd427d997db7bcc1d12 Mon Sep 17 00:00:00 2001 From: Guillaume Thouvenin Date: Thu, 4 Dec 2008 14:27:38 +0100 Subject: KVM: x86 emulator: add a new "implied 1" Src decode type Add SrcOne operand type when we need to decode an implied '1' like with regular shift instruction Signed-off-by: Guillaume Thouvenin Signed-off-by: Avi Kivity --- arch/x86/kvm/x86_emulate.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 7f5cd62362c5..0c75306e7a07 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -58,6 +58,7 @@ #define SrcMem32 (4<<4) /* Memory operand (32-bit). */ #define SrcImm (5<<4) /* Immediate operand. */ #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */ +#define SrcOne (7<<4) /* Implied '1' */ #define SrcMask (7<<4) /* Generic ModRM decode. */ #define ModRM (1<<7) @@ -1004,6 +1005,10 @@ done_prefixes: c->src.bytes = 1; c->src.val = insn_fetch(s8, 1, c->eip); break; + case SrcOne: + c->src.bytes = 1; + c->src.val = 1; + break; } /* -- cgit v1.2.3 From d175226a5f54817ba427368c6b739aefa7780fb2 Mon Sep 17 00:00:00 2001 From: Guillaume Thouvenin Date: Thu, 4 Dec 2008 14:29:00 +0100 Subject: KVM: x86 emulator: add the assembler code for three operands Add the assembler code for instruction with three operands and one operand is stored in ECX register Signed-off-by: Guillaume Thouvenin Signed-off-by: Avi Kivity --- arch/x86/kvm/x86_emulate.c | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 0c75306e7a07..9ae6d5b3e962 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -431,6 +431,45 @@ static u32 group2_table[] = { __emulate_2op_nobyte(_op, _src, _dst, _eflags, \ "w", "r", _LO32, "r", "", "r") +/* Instruction has three operands and one operand is stored in ECX register */ +#define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \ + do { \ + unsigned long _tmp; \ + _type _clv = (_cl).val; \ + _type _srcv = (_src).val; \ + _type _dstv = (_dst).val; \ + \ + __asm__ __volatile__ ( \ + _PRE_EFLAGS("0", "5", "2") \ + _op _suffix " %4,%1 \n" \ + _POST_EFLAGS("0", "5", "2") \ + : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \ + : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \ + ); \ + \ + (_cl).val = (unsigned long) _clv; \ + (_src).val = (unsigned long) _srcv; \ + (_dst).val = (unsigned long) _dstv; \ + } while (0) + +#define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \ + do { \ + switch ((_dst).bytes) { \ + case 2: \ + __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \ + "w", unsigned short); \ + break; \ + case 4: \ + __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \ + "l", unsigned int); \ + break; \ + case 8: \ + ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \ + "q", unsigned long)); \ + break; \ + } \ + } while (0) + #define __emulate_1op(_op, _dst, _eflags, _suffix) \ do { \ unsigned long _tmp; \ -- cgit v1.2.3 From 9bf8ea42fe22d7d1c48044148fa658cb9083d49c Mon Sep 17 00:00:00 2001 From: Guillaume Thouvenin Date: Thu, 4 Dec 2008 14:30:13 +0100 Subject: KVM: x86 emulator: add the emulation of shld and shrd instructions Add emulation of shld and shrd instructions Signed-off-by: Guillaume Thouvenin Signed-off-by: Avi Kivity --- arch/x86/kvm/x86_emulate.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 9ae6d5b3e962..219dc3110bf1 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -237,9 +237,14 @@ static u32 twobyte_table[256] = { /* 0x90 - 0x9F */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xA0 - 0xA7 */ - 0, 0, 0, DstMem | SrcReg | ModRM | BitOp, 0, 0, 0, 0, + 0, 0, 0, DstMem | SrcReg | ModRM | BitOp, + DstMem | SrcReg | Src2ImmByte | ModRM, + DstMem | SrcReg | Src2CL | ModRM, 0, 0, /* 0xA8 - 0xAF */ - 0, 0, 0, DstMem | SrcReg | ModRM | BitOp, 0, 0, ModRM, 0, + 0, 0, 0, DstMem | SrcReg | ModRM | BitOp, + DstMem | SrcReg | Src2ImmByte | ModRM, + DstMem | SrcReg | Src2CL | ModRM, + ModRM, 0, /* 0xB0 - 0xB7 */ ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 0, DstMem | SrcReg | ModRM | BitOp, @@ -2037,12 +2042,20 @@ twobyte_insn: c->src.val &= (c->dst.bytes << 3) - 1; emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags); break; + case 0xa4: /* shld imm8, r, r/m */ + case 0xa5: /* shld cl, r, r/m */ + emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags); + break; case 0xab: bts: /* bts */ /* only subword offset */ c->src.val &= (c->dst.bytes << 3) - 1; emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags); break; + case 0xac: /* shrd imm8, r, r/m */ + case 0xad: /* shrd cl, r, r/m */ + emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags); + break; case 0xae: /* clflush */ break; case 0xb0 ... 0xb1: /* cmpxchg */ -- cgit v1.2.3 From fbce554e940a983d005e29849636d0ef54b3eb18 Mon Sep 17 00:00:00 2001 From: Amit Shah Date: Thu, 4 Dec 2008 11:11:40 +0000 Subject: KVM: x86 emulator: Fix handling of VMMCALL instruction The VMMCALL instruction doesn't get recognised and isn't processed by the emulator. This is seen on an Intel host that tries to execute the VMMCALL instruction after a guest live migrates from an AMD host. Signed-off-by: Amit Shah Signed-off-by: Avi Kivity --- arch/x86/kvm/x86_emulate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 219dc3110bf1..d174db7a3370 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -311,7 +311,7 @@ static u32 group_table[] = { static u32 group2_table[] = { [Group7*8] = - SrcNone | ModRM, 0, 0, 0, + SrcNone | ModRM, 0, 0, SrcNone | ModRM, SrcNone | ModRM | DstMem | Mov, 0, SrcMem16 | ModRM | Mov, 0, }; -- cgit v1.2.3 From 60c8aec6e2c9923492dabbd6b67e34692bd26c20 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Mon, 1 Dec 2008 22:32:02 -0200 Subject: KVM: MMU: use page array in unsync walk Instead of invoking the handler directly collect pages into an array so the caller can work with it. Simplifies TLB flush collapsing. Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/mmu.c | 195 ++++++++++++++++++++++++++++------------ 2 files changed, 141 insertions(+), 56 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f58f7ebdea81..93d0aed35880 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -200,7 +200,7 @@ struct kvm_mmu_page { int multimapped; /* More than one parent_pte? */ int root_count; /* Currently serving as active root */ bool unsync; - bool unsync_children; + unsigned int unsync_children; union { u64 *parent_pte; /* !multimapped */ struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index dd20b199a7c0..7ce92f78f337 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -908,8 +908,9 @@ static void kvm_mmu_update_unsync_bitmap(u64 *spte) struct kvm_mmu_page *sp = page_header(__pa(spte)); index = spte - sp->spt; - __set_bit(index, sp->unsync_child_bitmap); - sp->unsync_children = 1; + if (!__test_and_set_bit(index, sp->unsync_child_bitmap)) + sp->unsync_children++; + WARN_ON(!sp->unsync_children); } static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp) @@ -936,7 +937,6 @@ static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp) static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { - sp->unsync_children = 1; kvm_mmu_update_parents_unsync(sp); return 1; } @@ -967,18 +967,41 @@ static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva) { } +#define KVM_PAGE_ARRAY_NR 16 + +struct kvm_mmu_pages { + struct mmu_page_and_offset { + struct kvm_mmu_page *sp; + unsigned int idx; + } page[KVM_PAGE_ARRAY_NR]; + unsigned int nr; +}; + #define for_each_unsync_children(bitmap, idx) \ for (idx = find_first_bit(bitmap, 512); \ idx < 512; \ idx = find_next_bit(bitmap, 512, idx+1)) -static int mmu_unsync_walk(struct kvm_mmu_page *sp, - struct kvm_unsync_walk *walker) +int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, + int idx) { - int i, ret; + int i; - if (!sp->unsync_children) - return 0; + if (sp->unsync) + for (i=0; i < pvec->nr; i++) + if (pvec->page[i].sp == sp) + return 0; + + pvec->page[pvec->nr].sp = sp; + pvec->page[pvec->nr].idx = idx; + pvec->nr++; + return (pvec->nr == KVM_PAGE_ARRAY_NR); +} + +static int __mmu_unsync_walk(struct kvm_mmu_page *sp, + struct kvm_mmu_pages *pvec) +{ + int i, ret, nr_unsync_leaf = 0; for_each_unsync_children(sp->unsync_child_bitmap, i) { u64 ent = sp->spt[i]; @@ -988,17 +1011,22 @@ static int mmu_unsync_walk(struct kvm_mmu_page *sp, child = page_header(ent & PT64_BASE_ADDR_MASK); if (child->unsync_children) { - ret = mmu_unsync_walk(child, walker); - if (ret) + if (mmu_pages_add(pvec, child, i)) + return -ENOSPC; + + ret = __mmu_unsync_walk(child, pvec); + if (!ret) + __clear_bit(i, sp->unsync_child_bitmap); + else if (ret > 0) + nr_unsync_leaf += ret; + else return ret; - __clear_bit(i, sp->unsync_child_bitmap); } if (child->unsync) { - ret = walker->entry(child, walker); - __clear_bit(i, sp->unsync_child_bitmap); - if (ret) - return ret; + nr_unsync_leaf++; + if (mmu_pages_add(pvec, child, i)) + return -ENOSPC; } } } @@ -1006,7 +1034,17 @@ static int mmu_unsync_walk(struct kvm_mmu_page *sp, if (find_first_bit(sp->unsync_child_bitmap, 512) == 512) sp->unsync_children = 0; - return 0; + return nr_unsync_leaf; +} + +static int mmu_unsync_walk(struct kvm_mmu_page *sp, + struct kvm_mmu_pages *pvec) +{ + if (!sp->unsync_children) + return 0; + + mmu_pages_add(pvec, sp, 0); + return __mmu_unsync_walk(sp, pvec); } static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) @@ -1056,30 +1094,81 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) return 0; } -struct sync_walker { - struct kvm_vcpu *vcpu; - struct kvm_unsync_walk walker; +struct mmu_page_path { + struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1]; + unsigned int idx[PT64_ROOT_LEVEL-1]; }; -static int mmu_sync_fn(struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk) +#define for_each_sp(pvec, sp, parents, i) \ + for (i = mmu_pages_next(&pvec, &parents, -1), \ + sp = pvec.page[i].sp; \ + i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ + i = mmu_pages_next(&pvec, &parents, i)) + +int mmu_pages_next(struct kvm_mmu_pages *pvec, struct mmu_page_path *parents, + int i) { - struct sync_walker *sync_walk = container_of(walk, struct sync_walker, - walker); - struct kvm_vcpu *vcpu = sync_walk->vcpu; + int n; + + for (n = i+1; n < pvec->nr; n++) { + struct kvm_mmu_page *sp = pvec->page[n].sp; + + if (sp->role.level == PT_PAGE_TABLE_LEVEL) { + parents->idx[0] = pvec->page[n].idx; + return n; + } - kvm_sync_page(vcpu, sp); - return (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)); + parents->parent[sp->role.level-2] = sp; + parents->idx[sp->role.level-1] = pvec->page[n].idx; + } + + return n; } -static void mmu_sync_children(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) +void mmu_pages_clear_parents(struct mmu_page_path *parents) { - struct sync_walker walker = { - .walker = { .entry = mmu_sync_fn, }, - .vcpu = vcpu, - }; + struct kvm_mmu_page *sp; + unsigned int level = 0; + + do { + unsigned int idx = parents->idx[level]; + + sp = parents->parent[level]; + if (!sp) + return; + + --sp->unsync_children; + WARN_ON((int)sp->unsync_children < 0); + __clear_bit(idx, sp->unsync_child_bitmap); + level++; + } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children); +} + +static void kvm_mmu_pages_init(struct kvm_mmu_page *parent, + struct mmu_page_path *parents, + struct kvm_mmu_pages *pvec) +{ + parents->parent[parent->role.level-1] = NULL; + pvec->nr = 0; +} - while (mmu_unsync_walk(sp, &walker.walker)) +static void mmu_sync_children(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *parent) +{ + int i; + struct kvm_mmu_page *sp; + struct mmu_page_path parents; + struct kvm_mmu_pages pages; + + kvm_mmu_pages_init(parent, &parents, &pages); + while (mmu_unsync_walk(parent, &pages)) { + for_each_sp(pages, sp, parents, i) { + kvm_sync_page(vcpu, sp); + mmu_pages_clear_parents(&parents); + } cond_resched_lock(&vcpu->kvm->mmu_lock); + kvm_mmu_pages_init(parent, &parents, &pages); + } } static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, @@ -1245,33 +1334,29 @@ static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) } } -struct zap_walker { - struct kvm_unsync_walk walker; - struct kvm *kvm; - int zapped; -}; - -static int mmu_zap_fn(struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk) -{ - struct zap_walker *zap_walk = container_of(walk, struct zap_walker, - walker); - kvm_mmu_zap_page(zap_walk->kvm, sp); - zap_walk->zapped = 1; - return 0; -} - -static int mmu_zap_unsync_children(struct kvm *kvm, struct kvm_mmu_page *sp) +static int mmu_zap_unsync_children(struct kvm *kvm, + struct kvm_mmu_page *parent) { - struct zap_walker walker = { - .walker = { .entry = mmu_zap_fn, }, - .kvm = kvm, - .zapped = 0, - }; + int i, zapped = 0; + struct mmu_page_path parents; + struct kvm_mmu_pages pages; - if (sp->role.level == PT_PAGE_TABLE_LEVEL) + if (parent->role.level == PT_PAGE_TABLE_LEVEL) return 0; - mmu_unsync_walk(sp, &walker.walker); - return walker.zapped; + + kvm_mmu_pages_init(parent, &parents, &pages); + while (mmu_unsync_walk(parent, &pages)) { + struct kvm_mmu_page *sp; + + for_each_sp(pages, sp, parents, i) { + kvm_mmu_zap_page(kvm, sp); + mmu_pages_clear_parents(&parents); + } + zapped += pages.nr; + kvm_mmu_pages_init(parent, &parents, &pages); + } + + return zapped; } static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) -- cgit v1.2.3 From b1a368218ad5b6e62380c8f206f16e6f18bf154c Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Mon, 1 Dec 2008 22:32:03 -0200 Subject: KVM: MMU: collapse remote TLB flushes on root sync Collapse remote TLB flushes on root sync. kernbench is 2.7% faster on 4-way guest. Improvements have been seen with other loads such as AIM7. Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 7ce92f78f337..58c35dead321 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -621,7 +621,7 @@ static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) return NULL; } -static void rmap_write_protect(struct kvm *kvm, u64 gfn) +static int rmap_write_protect(struct kvm *kvm, u64 gfn) { unsigned long *rmapp; u64 *spte; @@ -667,8 +667,7 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn) spte = rmap_next(kvm, rmapp, spte); } - if (write_protected) - kvm_flush_remote_tlbs(kvm); + return write_protected; } static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp) @@ -1083,7 +1082,8 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) return 1; } - rmap_write_protect(vcpu->kvm, sp->gfn); + if (rmap_write_protect(vcpu->kvm, sp->gfn)) + kvm_flush_remote_tlbs(vcpu->kvm); kvm_unlink_unsync_page(vcpu->kvm, sp); if (vcpu->arch.mmu.sync_page(vcpu, sp)) { kvm_mmu_zap_page(vcpu->kvm, sp); @@ -1162,6 +1162,14 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu, kvm_mmu_pages_init(parent, &parents, &pages); while (mmu_unsync_walk(parent, &pages)) { + int protected = 0; + + for_each_sp(pages, sp, parents, i) + protected |= rmap_write_protect(vcpu->kvm, sp->gfn); + + if (protected) + kvm_flush_remote_tlbs(vcpu->kvm); + for_each_sp(pages, sp, parents, i) { kvm_sync_page(vcpu, sp); mmu_pages_clear_parents(&parents); @@ -1226,7 +1234,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, sp->role = role; hlist_add_head(&sp->hash_link, bucket); if (!metaphysical) { - rmap_write_protect(vcpu->kvm, gfn); + if (rmap_write_protect(vcpu->kvm, gfn)) + kvm_flush_remote_tlbs(vcpu->kvm); account_shadowed(vcpu->kvm, gfn); } if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte) -- cgit v1.2.3 From 6cffe8ca4a2adf1ac5003d9cad08fe4434d6eee0 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Mon, 1 Dec 2008 22:32:04 -0200 Subject: KVM: MMU: skip global pgtables on sync due to cr3 switch Skip syncing global pages on cr3 switch (but not on cr4/cr0). This is important for Linux 32-bit guests with PAE, where the kmap page is marked as global. Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 6 +++++ arch/x86/kvm/mmu.c | 53 ++++++++++++++++++++++++++++++++++++----- arch/x86/kvm/paging_tmpl.h | 10 ++++---- arch/x86/kvm/x86.c | 4 ++++ 4 files changed, 63 insertions(+), 10 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 93d0aed35880..65b1ed295698 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -182,6 +182,8 @@ struct kvm_mmu_page { struct list_head link; struct hlist_node hash_link; + struct list_head oos_link; + /* * The following two entries are used to key the shadow page in the * hash table. @@ -200,6 +202,7 @@ struct kvm_mmu_page { int multimapped; /* More than one parent_pte? */ int root_count; /* Currently serving as active root */ bool unsync; + bool global; unsigned int unsync_children; union { u64 *parent_pte; /* !multimapped */ @@ -356,6 +359,7 @@ struct kvm_arch{ */ struct list_head active_mmu_pages; struct list_head assigned_dev_head; + struct list_head oos_global_pages; struct dmar_domain *intel_iommu_domain; struct kvm_pic *vpic; struct kvm_ioapic *vioapic; @@ -385,6 +389,7 @@ struct kvm_vm_stat { u32 mmu_recycled; u32 mmu_cache_miss; u32 mmu_unsync; + u32 mmu_unsync_global; u32 remote_tlb_flush; u32 lpages; }; @@ -603,6 +608,7 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); int kvm_mmu_load(struct kvm_vcpu *vcpu); void kvm_mmu_unload(struct kvm_vcpu *vcpu); void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); +void kvm_mmu_sync_global(struct kvm_vcpu *vcpu); int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 58c35dead321..cbac9e4b156f 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -793,9 +793,11 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); set_page_private(virt_to_page(sp->spt), (unsigned long)sp); list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); + INIT_LIST_HEAD(&sp->oos_link); ASSERT(is_empty_shadow_page(sp->spt)); bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); sp->multimapped = 0; + sp->global = 1; sp->parent_pte = parent_pte; --vcpu->kvm->arch.n_free_mmu_pages; return sp; @@ -1066,10 +1068,18 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) return NULL; } +static void kvm_unlink_unsync_global(struct kvm *kvm, struct kvm_mmu_page *sp) +{ + list_del(&sp->oos_link); + --kvm->stat.mmu_unsync_global; +} + static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) { WARN_ON(!sp->unsync); sp->unsync = 0; + if (sp->global) + kvm_unlink_unsync_global(kvm, sp); --kvm->stat.mmu_unsync; } @@ -1615,9 +1625,15 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) if (s->role.word != sp->role.word) return 1; } - kvm_mmu_mark_parents_unsync(vcpu, sp); ++vcpu->kvm->stat.mmu_unsync; sp->unsync = 1; + + if (sp->global) { + list_add(&sp->oos_link, &vcpu->kvm->arch.oos_global_pages); + ++vcpu->kvm->stat.mmu_unsync_global; + } else + kvm_mmu_mark_parents_unsync(vcpu, sp); + mmu_convert_notrap(sp); return 0; } @@ -1643,12 +1659,21 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, unsigned pte_access, int user_fault, int write_fault, int dirty, int largepage, - gfn_t gfn, pfn_t pfn, bool speculative, + int global, gfn_t gfn, pfn_t pfn, bool speculative, bool can_unsync) { u64 spte; int ret = 0; u64 mt_mask = shadow_mt_mask; + struct kvm_mmu_page *sp = page_header(__pa(shadow_pte)); + + if (!global && sp->global) { + sp->global = 0; + if (sp->unsync) { + kvm_unlink_unsync_global(vcpu->kvm, sp); + kvm_mmu_mark_parents_unsync(vcpu, sp); + } + } /* * We don't set the accessed bit, since we sometimes want to see @@ -1717,8 +1742,8 @@ set_pte: static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, unsigned pt_access, unsigned pte_access, int user_fault, int write_fault, int dirty, - int *ptwrite, int largepage, gfn_t gfn, - pfn_t pfn, bool speculative) + int *ptwrite, int largepage, int global, + gfn_t gfn, pfn_t pfn, bool speculative) { int was_rmapped = 0; int was_writeble = is_writeble_pte(*shadow_pte); @@ -1751,7 +1776,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, } } if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault, - dirty, largepage, gfn, pfn, speculative, true)) { + dirty, largepage, global, gfn, pfn, speculative, true)) { if (write_fault) *ptwrite = 1; kvm_x86_ops->tlb_flush(vcpu); @@ -1808,7 +1833,7 @@ static int direct_map_entry(struct kvm_shadow_walk *_walk, || (walk->largepage && level == PT_DIRECTORY_LEVEL)) { mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL, 0, walk->write, 1, &walk->pt_write, - walk->largepage, gfn, walk->pfn, false); + walk->largepage, 0, gfn, walk->pfn, false); ++vcpu->stat.pf_fixed; return 1; } @@ -1995,6 +2020,15 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu) } } +static void mmu_sync_global(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_mmu_page *sp, *n; + + list_for_each_entry_safe(sp, n, &kvm->arch.oos_global_pages, oos_link) + kvm_sync_page(vcpu, sp); +} + void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) { spin_lock(&vcpu->kvm->mmu_lock); @@ -2002,6 +2036,13 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) spin_unlock(&vcpu->kvm->mmu_lock); } +void kvm_mmu_sync_global(struct kvm_vcpu *vcpu) +{ + spin_lock(&vcpu->kvm->mmu_lock); + mmu_sync_global(vcpu); + spin_unlock(&vcpu->kvm->mmu_lock); +} + static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) { return vaddr; diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 84eee43bbe74..e644d81979b6 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -274,7 +274,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, return; kvm_get_pfn(pfn); mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, - gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte), + gpte & PT_DIRTY_MASK, NULL, largepage, + gpte & PT_GLOBAL_MASK, gpte_to_gfn(gpte), pfn, true); } @@ -301,8 +302,9 @@ static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw, mmu_set_spte(vcpu, sptep, access, gw->pte_access & access, sw->user_fault, sw->write_fault, gw->ptes[gw->level-1] & PT_DIRTY_MASK, - sw->ptwrite, sw->largepage, gw->gfn, sw->pfn, - false); + sw->ptwrite, sw->largepage, + gw->ptes[gw->level-1] & PT_GLOBAL_MASK, + gw->gfn, sw->pfn, false); sw->sptep = sptep; return 1; } @@ -580,7 +582,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) nr_present++; pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, - is_dirty_pte(gpte), 0, gfn, + is_dirty_pte(gpte), 0, gpte & PT_GLOBAL_MASK, gfn, spte_to_pfn(sp->spt[i]), true, false); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7a2aeba0bfbd..774db00d2db6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -104,6 +104,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "mmu_recycled", VM_STAT(mmu_recycled) }, { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, { "mmu_unsync", VM_STAT(mmu_unsync) }, + { "mmu_unsync_global", VM_STAT(mmu_unsync_global) }, { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, { "largepages", VM_STAT(lpages) }, { NULL } @@ -315,6 +316,7 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) kvm_x86_ops->set_cr0(vcpu, cr0); vcpu->arch.cr0 = cr0; + kvm_mmu_sync_global(vcpu); kvm_mmu_reset_context(vcpu); return; } @@ -358,6 +360,7 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) } kvm_x86_ops->set_cr4(vcpu, cr4); vcpu->arch.cr4 = cr4; + kvm_mmu_sync_global(vcpu); kvm_mmu_reset_context(vcpu); } EXPORT_SYMBOL_GPL(kvm_set_cr4); @@ -4113,6 +4116,7 @@ struct kvm *kvm_arch_create_vm(void) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); + INIT_LIST_HEAD(&kvm->arch.oos_global_pages); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ -- cgit v1.2.3 From ad218f85e388e8ca816ff09d91c246cd014c53a8 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Mon, 1 Dec 2008 22:32:05 -0200 Subject: KVM: MMU: prepopulate the shadow on invlpg If the guest executes invlpg, peek into the pagetable and attempt to prepopulate the shadow entry. Also stop dirty fault updates from interfering with the fork detector. 2% improvement on RHEL3/AIM7. Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/include/asm/kvm_host.h | 3 ++- arch/x86/kvm/mmu.c | 25 +++++++++++++------------ arch/x86/kvm/paging_tmpl.h | 25 ++++++++++++++++++++++++- arch/x86/kvm/x86.c | 2 +- 4 files changed, 40 insertions(+), 15 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 65b1ed295698..97215a458e5f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -602,7 +602,8 @@ unsigned long segment_base(u16 selector); void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, - const u8 *new, int bytes); + const u8 *new, int bytes, + bool guest_initiated); int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); int kvm_mmu_load(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index cbac9e4b156f..863baf70506e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2441,7 +2441,8 @@ static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn) } void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, - const u8 *new, int bytes) + const u8 *new, int bytes, + bool guest_initiated) { gfn_t gfn = gpa >> PAGE_SHIFT; struct kvm_mmu_page *sp; @@ -2467,15 +2468,17 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, kvm_mmu_free_some_pages(vcpu); ++vcpu->kvm->stat.mmu_pte_write; kvm_mmu_audit(vcpu, "pre pte write"); - if (gfn == vcpu->arch.last_pt_write_gfn - && !last_updated_pte_accessed(vcpu)) { - ++vcpu->arch.last_pt_write_count; - if (vcpu->arch.last_pt_write_count >= 3) - flooded = 1; - } else { - vcpu->arch.last_pt_write_gfn = gfn; - vcpu->arch.last_pt_write_count = 1; - vcpu->arch.last_pte_updated = NULL; + if (guest_initiated) { + if (gfn == vcpu->arch.last_pt_write_gfn + && !last_updated_pte_accessed(vcpu)) { + ++vcpu->arch.last_pt_write_count; + if (vcpu->arch.last_pt_write_count >= 3) + flooded = 1; + } else { + vcpu->arch.last_pt_write_gfn = gfn; + vcpu->arch.last_pt_write_count = 1; + vcpu->arch.last_pte_updated = NULL; + } } index = kvm_page_table_hashfn(gfn); bucket = &vcpu->kvm->arch.mmu_page_hash[index]; @@ -2615,9 +2618,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) { - spin_lock(&vcpu->kvm->mmu_lock); vcpu->arch.mmu.invlpg(vcpu, gva); - spin_unlock(&vcpu->kvm->mmu_lock); kvm_mmu_flush_tlb(vcpu); ++vcpu->stat.invlpg; } diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index e644d81979b6..d20640154216 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -82,6 +82,7 @@ struct shadow_walker { int *ptwrite; pfn_t pfn; u64 *sptep; + gpa_t pte_gpa; }; static gfn_t gpte_to_gfn(pt_element_t gpte) @@ -222,7 +223,7 @@ walk: if (ret) goto walk; pte |= PT_DIRTY_MASK; - kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte)); + kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte), 0); walker->ptes[walker->level - 1] = pte; } @@ -468,8 +469,15 @@ static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw, struct kvm_vcpu *vcpu, u64 addr, u64 *sptep, int level) { + struct shadow_walker *sw = + container_of(_sw, struct shadow_walker, walker); if (level == PT_PAGE_TABLE_LEVEL) { + struct kvm_mmu_page *sp = page_header(__pa(sptep)); + + sw->pte_gpa = (sp->gfn << PAGE_SHIFT); + sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); + if (is_shadow_present_pte(*sptep)) rmap_remove(vcpu->kvm, sptep); set_shadow_pte(sptep, shadow_trap_nonpresent_pte); @@ -482,11 +490,26 @@ static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw, static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) { + pt_element_t gpte; struct shadow_walker walker = { .walker = { .entry = FNAME(shadow_invlpg_entry), }, + .pte_gpa = -1, }; + spin_lock(&vcpu->kvm->mmu_lock); walk_shadow(&walker.walker, vcpu, gva); + spin_unlock(&vcpu->kvm->mmu_lock); + if (walker.pte_gpa == -1) + return; + if (kvm_read_guest_atomic(vcpu->kvm, walker.pte_gpa, &gpte, + sizeof(pt_element_t))) + return; + if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) { + if (mmu_topup_memory_caches(vcpu)) + return; + kvm_mmu_pte_write(vcpu, walker.pte_gpa, (const u8 *)&gpte, + sizeof(pt_element_t), 0); + } } static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 774db00d2db6..ba102879de33 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2046,7 +2046,7 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); if (ret < 0) return 0; - kvm_mmu_pte_write(vcpu, gpa, val, bytes); + kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1); return 1; } -- cgit v1.2.3 From e93353c93a3ba4215633ce930784f40a4e94e3f9 Mon Sep 17 00:00:00 2001 From: Eduardo Habkost Date: Fri, 5 Dec 2008 18:36:45 -0200 Subject: x86: KVM guest: kvm_get_tsc_khz: return khz, not lpj kvm_get_tsc_khz() currently returns the previously-calculated preset_lpj value, but it is in loops-per-jiffy, not kHz. The current code works correctly only when HZ=1000. Signed-off-by: Eduardo Habkost Signed-off-by: Avi Kivity --- arch/x86/kernel/kvmclock.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index b38e801014e3..652fce6d2cce 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -89,17 +89,17 @@ static cycle_t kvm_clock_read(void) */ static unsigned long kvm_get_tsc_khz(void) { - return preset_lpj; + struct pvclock_vcpu_time_info *src; + src = &per_cpu(hv_clock, 0); + return pvclock_tsc_khz(src); } static void kvm_get_preset_lpj(void) { - struct pvclock_vcpu_time_info *src; unsigned long khz; u64 lpj; - src = &per_cpu(hv_clock, 0); - khz = pvclock_tsc_khz(src); + khz = kvm_get_tsc_khz(); lpj = ((u64)khz * 1000); do_div(lpj, HZ); -- cgit v1.2.3 From ca9edaee1aea34ebd9adb48910aba0b3d64b1b22 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Mon, 8 Dec 2008 18:29:29 +0200 Subject: KVM: Consolidate userspace memory capability reporting into common code Signed-off-by: Avi Kivity --- arch/ia64/kvm/kvm-ia64.c | 1 - arch/powerpc/kvm/powerpc.c | 3 --- arch/s390/kvm/kvm-s390.c | 2 -- arch/x86/kvm/x86.c | 1 - virt/kvm/kvm_main.c | 1 + 5 files changed, 1 insertion(+), 7 deletions(-) (limited to 'arch/x86') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index b4d24e2cce40..d2eb9691d61d 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -180,7 +180,6 @@ int kvm_dev_ioctl_check_extension(long ext) switch (ext) { case KVM_CAP_IRQCHIP: - case KVM_CAP_USER_MEMORY: case KVM_CAP_MP_STATE: r = 1; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 1deda37cb771..2822c8ccfaaf 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -137,9 +137,6 @@ int kvm_dev_ioctl_check_extension(long ext) int r; switch (ext) { - case KVM_CAP_USER_MEMORY: - r = 1; - break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 76f05ddaef10..be8497186b96 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -113,8 +113,6 @@ long kvm_arch_dev_ioctl(struct file *filp, int kvm_dev_ioctl_check_extension(long ext) { switch (ext) { - case KVM_CAP_USER_MEMORY: - return 1; default: return 0; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ba102879de33..10302d3bd415 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -964,7 +964,6 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_IRQCHIP: case KVM_CAP_HLT: case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: - case KVM_CAP_USER_MEMORY: case KVM_CAP_SET_TSS_ADDR: case KVM_CAP_EXT_CPUID: case KVM_CAP_CLOCKSOURCE: diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index e066eb125e55..eb70ca6c7145 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1908,6 +1908,7 @@ static int kvm_dev_ioctl_create_vm(void) static long kvm_dev_ioctl_check_extension_generic(long arg) { switch (arg) { + case KVM_CAP_USER_MEMORY: case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: return 1; default: -- cgit v1.2.3 From eb64f1e8cd5c3cae912db30a77d062367f7a11a6 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Tue, 9 Dec 2008 16:07:22 +0100 Subject: KVM: MMU: check for present pdptr shadow page in walk_shadow walk_shadow assumes the caller verified validity of the pdptr pointer in question, which is not the case for the invlpg handler. Fixes oops during Solaris 10 install. Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 863baf70506e..641c07844e6e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1269,6 +1269,8 @@ static int walk_shadow(struct kvm_shadow_walk *walker, if (level == PT32E_ROOT_LEVEL) { shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; shadow_addr &= PT64_BASE_ADDR_MASK; + if (!shadow_addr) + return 1; --level; } -- cgit v1.2.3 From 264ff01d55b456932cef03082448b41d2edeb6a1 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Mon, 24 Nov 2008 12:26:19 +0100 Subject: KVM: VMX: Fix pending NMI-vs.-IRQ race for user space irqchip As with the kernel irqchip, don't allow an NMI to stomp over an already injected IRQ; instead wait for the IRQ injection to be completed. Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e446f232588e..487e1dcdce33 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2486,7 +2486,9 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, vmx_update_window_states(vcpu); if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { - if (vcpu->arch.nmi_window_open) { + if (vcpu->arch.interrupt.pending) { + enable_nmi_window(vcpu); + } else if (vcpu->arch.nmi_window_open) { vcpu->arch.nmi_pending = false; vcpu->arch.nmi_injected = true; } else { -- cgit v1.2.3 From 4531220b71f0399e71cda0c4cf749e7281a7416a Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Thu, 11 Dec 2008 16:54:54 +0100 Subject: KVM: x86: Rework user space NMI injection as KVM_CAP_USER_NMI There is no point in doing the ready_for_nmi_injection/ request_nmi_window dance with user space. First, we don't do this for in-kernel irqchip anyway, while the code path is the same as for user space irqchip mode. And second, there is nothing to loose if a pending NMI is overwritten by another one (in contrast to IRQs where we have to save the number). Actually, there is even the risk of raising spurious NMIs this way because the reason for the held-back NMI might already be handled while processing the first one. Therefore this patch creates a simplified user space NMI injection interface, exporting it under KVM_CAP_USER_NMI and dropping the old KVM_CAP_NMI capability. And this time we also take care to provide the interface only on archs supporting NMIs via KVM (right now only x86). Signed-off-by: Jan Kiszka Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 24 ++---------------------- arch/x86/kvm/x86.c | 28 ++-------------------------- include/linux/kvm.h | 11 +++++------ 3 files changed, 9 insertions(+), 54 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 487e1dcdce33..6259d7467648 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2498,15 +2498,13 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, } if (vcpu->arch.nmi_injected) { vmx_inject_nmi(vcpu); - if (vcpu->arch.nmi_pending || kvm_run->request_nmi_window) + if (vcpu->arch.nmi_pending) enable_nmi_window(vcpu); else if (vcpu->arch.irq_summary || kvm_run->request_interrupt_window) enable_irq_window(vcpu); return; } - if (!vcpu->arch.nmi_window_open || kvm_run->request_nmi_window) - enable_nmi_window(vcpu); if (vcpu->arch.interrupt_window_open) { if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending) @@ -3040,14 +3038,6 @@ static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); ++vcpu->stat.nmi_window_exits; - /* - * If the user space waits to inject a NMI, exit as soon as possible - */ - if (kvm_run->request_nmi_window && !vcpu->arch.nmi_pending) { - kvm_run->exit_reason = KVM_EXIT_NMI_WINDOW_OPEN; - return 0; - } - return 1; } @@ -3162,7 +3152,7 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) vmx->soft_vnmi_blocked = 0; vcpu->arch.nmi_window_open = 1; } else if (vmx->vnmi_blocked_time > 1000000000LL && - (kvm_run->request_nmi_window || vcpu->arch.nmi_pending)) { + vcpu->arch.nmi_pending) { /* * This CPU don't support us in finding the end of an * NMI-blocked window if the guest runs with IRQs @@ -3175,16 +3165,6 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) vmx->soft_vnmi_blocked = 0; vmx->vcpu.arch.nmi_window_open = 1; } - - /* - * If the user space waits to inject an NNI, exit ASAP - */ - if (vcpu->arch.nmi_window_open && kvm_run->request_nmi_window - && !vcpu->arch.nmi_pending) { - kvm_run->exit_reason = KVM_EXIT_NMI_WINDOW_OPEN; - ++vcpu->stat.nmi_window_exits; - return 0; - } } if (exit_reason < kvm_vmx_max_exit_handlers diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 10302d3bd415..0e6aa8141dcd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2887,37 +2887,18 @@ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu, (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF)); } -/* - * Check if userspace requested a NMI window, and that the NMI window - * is open. - * - * No need to exit to userspace if we already have a NMI queued. - */ -static int dm_request_for_nmi_injection(struct kvm_vcpu *vcpu, - struct kvm_run *kvm_run) -{ - return (!vcpu->arch.nmi_pending && - kvm_run->request_nmi_window && - vcpu->arch.nmi_window_open); -} - static void post_kvm_run_save(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0; kvm_run->cr8 = kvm_get_cr8(vcpu); kvm_run->apic_base = kvm_get_apic_base(vcpu); - if (irqchip_in_kernel(vcpu->kvm)) { + if (irqchip_in_kernel(vcpu->kvm)) kvm_run->ready_for_interrupt_injection = 1; - kvm_run->ready_for_nmi_injection = 1; - } else { + else kvm_run->ready_for_interrupt_injection = (vcpu->arch.interrupt_window_open && vcpu->arch.irq_summary == 0); - kvm_run->ready_for_nmi_injection = - (vcpu->arch.nmi_window_open && - vcpu->arch.nmi_pending == 0); - } } static void vapic_enter(struct kvm_vcpu *vcpu) @@ -3093,11 +3074,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) } if (r > 0) { - if (dm_request_for_nmi_injection(vcpu, kvm_run)) { - r = -EINTR; - kvm_run->exit_reason = KVM_EXIT_NMI; - ++vcpu->stat.request_nmi_exits; - } if (dm_request_for_irq_injection(vcpu, kvm_run)) { r = -EINTR; kvm_run->exit_reason = KVM_EXIT_INTR; diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 48807767e726..35525ac63337 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -84,21 +84,18 @@ struct kvm_irqchip { #define KVM_EXIT_S390_RESET 14 #define KVM_EXIT_DCR 15 #define KVM_EXIT_NMI 16 -#define KVM_EXIT_NMI_WINDOW_OPEN 17 /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ struct kvm_run { /* in */ __u8 request_interrupt_window; - __u8 request_nmi_window; - __u8 padding1[6]; + __u8 padding1[7]; /* out */ __u32 exit_reason; __u8 ready_for_interrupt_injection; __u8 if_flag; - __u8 ready_for_nmi_injection; - __u8 padding2; + __u8 padding2[2]; /* in (pre_kvm_run), out (post_kvm_run) */ __u64 cr8; @@ -391,12 +388,14 @@ struct kvm_trace_rec { #define KVM_CAP_DEVICE_ASSIGNMENT 17 #endif #define KVM_CAP_IOMMU 18 -#define KVM_CAP_NMI 19 #if defined(CONFIG_X86) #define KVM_CAP_DEVICE_MSI 20 #endif /* Bug in KVM_SET_USER_MEMORY_REGION fixed: */ #define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21 +#if defined(CONFIG_X86) +#define KVM_CAP_USER_NMI 22 +#endif /* * ioctls for VM fds -- cgit v1.2.3 From 25e2343246fe135fce672f41abe61e9d2c38caac Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 21 Dec 2008 18:31:10 +0200 Subject: KVM: MMU: Don't treat a global pte as such if cr4.pge is cleared The pte.g bit is meaningless if global pages are disabled; deferring mmu page synchronization on these ptes will lead to the guest using stale shadow ptes. Fixes Vista x86 smp bootloader failure. Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 641c07844e6e..d50ebac6a07f 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1669,6 +1669,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, u64 mt_mask = shadow_mt_mask; struct kvm_mmu_page *sp = page_header(__pa(shadow_pte)); + if (!(vcpu->arch.cr4 & X86_CR4_PGE)) + global = 0; if (!global && sp->global) { sp->global = 0; if (sp->unsync) { -- cgit v1.2.3 From 3f353858c98dbe0240dac558a89870f4600f81bb Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 21 Dec 2008 22:48:32 +0200 Subject: KVM: Add locking to virtual i8259 interrupt controller While most accesses to the i8259 are with the kvm mutex taken, the call to kvm_pic_read_irq() is not. We can't easily take the kvm mutex there since the function is called with interrupts disabled. Fix by adding a spinlock to the virtual interrupt controller. Since we can't send an IPI under the spinlock (we also take the same spinlock in an irq disabled context), we defer the IPI until the spinlock is released. Similarly, we defer irq ack notifications until after spinlock release to avoid lock recursion. Signed-off-by: Avi Kivity --- arch/x86/kvm/i8259.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++---- arch/x86/kvm/irq.h | 5 +++++ 2 files changed, 53 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 17e41e165f1a..179dcb0103fd 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -26,10 +26,40 @@ * Port from Qemu. */ #include +#include #include "irq.h" #include +static void pic_lock(struct kvm_pic *s) +{ + spin_lock(&s->lock); +} + +static void pic_unlock(struct kvm_pic *s) +{ + struct kvm *kvm = s->kvm; + unsigned acks = s->pending_acks; + bool wakeup = s->wakeup_needed; + struct kvm_vcpu *vcpu; + + s->pending_acks = 0; + s->wakeup_needed = false; + + spin_unlock(&s->lock); + + while (acks) { + kvm_notify_acked_irq(kvm, __ffs(acks)); + acks &= acks - 1; + } + + if (wakeup) { + vcpu = s->kvm->vcpus[0]; + if (vcpu) + kvm_vcpu_kick(vcpu); + } +} + static void pic_clear_isr(struct kvm_kpic_state *s, int irq) { s->isr &= ~(1 << irq); @@ -136,17 +166,21 @@ static void pic_update_irq(struct kvm_pic *s) void kvm_pic_update_irq(struct kvm_pic *s) { + pic_lock(s); pic_update_irq(s); + pic_unlock(s); } void kvm_pic_set_irq(void *opaque, int irq, int level) { struct kvm_pic *s = opaque; + pic_lock(s); if (irq >= 0 && irq < PIC_NUM_PINS) { pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); pic_update_irq(s); } + pic_unlock(s); } /* @@ -172,6 +206,7 @@ int kvm_pic_read_irq(struct kvm *kvm) int irq, irq2, intno; struct kvm_pic *s = pic_irqchip(kvm); + pic_lock(s); irq = pic_get_irq(&s->pics[0]); if (irq >= 0) { pic_intack(&s->pics[0], irq); @@ -196,6 +231,7 @@ int kvm_pic_read_irq(struct kvm *kvm) intno = s->pics[0].irq_base + irq; } pic_update_irq(s); + pic_unlock(s); kvm_notify_acked_irq(kvm, irq); return intno; @@ -203,7 +239,7 @@ int kvm_pic_read_irq(struct kvm *kvm) void kvm_pic_reset(struct kvm_kpic_state *s) { - int irq, irqbase; + int irq, irqbase, n; struct kvm *kvm = s->pics_state->irq_request_opaque; struct kvm_vcpu *vcpu0 = kvm->vcpus[0]; @@ -214,8 +250,10 @@ void kvm_pic_reset(struct kvm_kpic_state *s) for (irq = 0; irq < PIC_NUM_PINS/2; irq++) { if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0)) - if (s->irr & (1 << irq) || s->isr & (1 << irq)) - kvm_notify_acked_irq(kvm, irq+irqbase); + if (s->irr & (1 << irq) || s->isr & (1 << irq)) { + n = irq + irqbase; + s->pics_state->pending_acks |= 1 << n; + } } s->last_irr = 0; s->irr = 0; @@ -406,6 +444,7 @@ static void picdev_write(struct kvm_io_device *this, printk(KERN_ERR "PIC: non byte write\n"); return; } + pic_lock(s); switch (addr) { case 0x20: case 0x21: @@ -418,6 +457,7 @@ static void picdev_write(struct kvm_io_device *this, elcr_ioport_write(&s->pics[addr & 1], addr, data); break; } + pic_unlock(s); } static void picdev_read(struct kvm_io_device *this, @@ -431,6 +471,7 @@ static void picdev_read(struct kvm_io_device *this, printk(KERN_ERR "PIC: non byte read\n"); return; } + pic_lock(s); switch (addr) { case 0x20: case 0x21: @@ -444,6 +485,7 @@ static void picdev_read(struct kvm_io_device *this, break; } *(unsigned char *)val = data; + pic_unlock(s); } /* @@ -459,7 +501,7 @@ static void pic_irq_request(void *opaque, int level) s->output = level; if (vcpu && level && (s->pics[0].isr_ack & (1 << irq))) { s->pics[0].isr_ack &= ~(1 << irq); - kvm_vcpu_kick(vcpu); + s->wakeup_needed = true; } } @@ -469,6 +511,8 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm) s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL); if (!s) return NULL; + spin_lock_init(&s->lock); + s->kvm = kvm; s->pics[0].elcr_mask = 0xf8; s->pics[1].elcr_mask = 0xde; s->irq_request = pic_irq_request; diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index b9e9051650ea..2bf32a03ceec 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h @@ -25,6 +25,7 @@ #include #include #include +#include #include "iodev.h" #include "ioapic.h" @@ -59,6 +60,10 @@ struct kvm_kpic_state { }; struct kvm_pic { + spinlock_t lock; + bool wakeup_needed; + unsigned pending_acks; + struct kvm *kvm; struct kvm_kpic_state pics[2]; /* 0 is master pic, 1 is slave pic */ irq_request_func *irq_request; void *irq_request_opaque; -- cgit v1.2.3 From 87917239204d67a316cb89751750f86c9ed3640b Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Mon, 22 Dec 2008 18:49:30 -0200 Subject: KVM: MMU: handle large host sptes on invlpg/resync The invlpg and sync walkers lack knowledge of large host sptes, descending to non-existant pagetable level. Stop at directory level in such case. Fixes SMP Windows XP with hugepages. Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 2 +- arch/x86/kvm/paging_tmpl.h | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index d50ebac6a07f..83f11c7474a1 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1007,7 +1007,7 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp, for_each_unsync_children(sp->unsync_child_bitmap, i) { u64 ent = sp->spt[i]; - if (is_shadow_present_pte(ent)) { + if (is_shadow_present_pte(ent) && !is_large_pte(ent)) { struct kvm_mmu_page *child; child = page_header(ent & PT64_BASE_ADDR_MASK); diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index d20640154216..9fd78b6e17ad 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -472,14 +472,19 @@ static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw, struct shadow_walker *sw = container_of(_sw, struct shadow_walker, walker); - if (level == PT_PAGE_TABLE_LEVEL) { + /* FIXME: properly handle invlpg on large guest pages */ + if (level == PT_PAGE_TABLE_LEVEL || + ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) { struct kvm_mmu_page *sp = page_header(__pa(sptep)); sw->pte_gpa = (sp->gfn << PAGE_SHIFT); sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); - if (is_shadow_present_pte(*sptep)) + if (is_shadow_present_pte(*sptep)) { rmap_remove(vcpu->kvm, sptep); + if (is_large_pte(*sptep)) + --vcpu->kvm->stat.lpages; + } set_shadow_pte(sptep, shadow_trap_nonpresent_pte); return 1; } -- cgit v1.2.3 From 18d8fda7c3c9439be04d7ea2e82da2513b121acb Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 26 Dec 2008 00:35:37 -0500 Subject: take init_fs to saner place Signed-off-by: Al Viro --- arch/alpha/kernel/init_task.c | 1 - arch/arm/kernel/init_task.c | 1 - arch/avr32/kernel/init_task.c | 1 - arch/blackfin/kernel/init_task.c | 1 - arch/cris/kernel/process.c | 1 - arch/frv/kernel/init_task.c | 1 - arch/h8300/kernel/init_task.c | 1 - arch/ia64/kernel/init_task.c | 1 - arch/m32r/kernel/init_task.c | 1 - arch/m68k/kernel/process.c | 1 - arch/m68knommu/kernel/init_task.c | 1 - arch/mips/kernel/init_task.c | 1 - arch/mn10300/kernel/init_task.c | 1 - arch/parisc/kernel/init_task.c | 1 - arch/powerpc/kernel/init_task.c | 1 - arch/s390/kernel/init_task.c | 1 - arch/sh/kernel/init_task.c | 1 - arch/sparc/kernel/init_task.c | 1 - arch/um/kernel/init_task.c | 1 - arch/x86/kernel/init_task.c | 1 - arch/xtensa/kernel/init_task.c | 1 - fs/namei.c | 7 +++++++ include/linux/fs_struct.h | 6 ------ include/linux/init_task.h | 1 + 24 files changed, 8 insertions(+), 27 deletions(-) (limited to 'arch/x86') diff --git a/arch/alpha/kernel/init_task.c b/arch/alpha/kernel/init_task.c index 1f762189fa64..c2938e574a40 100644 --- a/arch/alpha/kernel/init_task.c +++ b/arch/alpha/kernel/init_task.c @@ -8,7 +8,6 @@ #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/arm/kernel/init_task.c b/arch/arm/kernel/init_task.c index 0bbf80625395..e859af349467 100644 --- a/arch/arm/kernel/init_task.c +++ b/arch/arm/kernel/init_task.c @@ -12,7 +12,6 @@ #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/avr32/kernel/init_task.c b/arch/avr32/kernel/init_task.c index 44058469c6ec..993d56ee3cf3 100644 --- a/arch/avr32/kernel/init_task.c +++ b/arch/avr32/kernel/init_task.c @@ -13,7 +13,6 @@ #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/blackfin/kernel/init_task.c b/arch/blackfin/kernel/init_task.c index 6bdba7b21109..2c228c020978 100644 --- a/arch/blackfin/kernel/init_task.c +++ b/arch/blackfin/kernel/init_task.c @@ -33,7 +33,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c index 5933656db5a2..60816e876455 100644 --- a/arch/cris/kernel/process.c +++ b/arch/cris/kernel/process.c @@ -37,7 +37,6 @@ * setup. */ -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/frv/kernel/init_task.c b/arch/frv/kernel/init_task.c index e2198815b630..29429a8b7f6a 100644 --- a/arch/frv/kernel/init_task.c +++ b/arch/frv/kernel/init_task.c @@ -10,7 +10,6 @@ #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/h8300/kernel/init_task.c b/arch/h8300/kernel/init_task.c index 93a4899e46c2..cb5dc552da97 100644 --- a/arch/h8300/kernel/init_task.c +++ b/arch/h8300/kernel/init_task.c @@ -12,7 +12,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c index 9d7e1c66faf4..5b0e830c6f33 100644 --- a/arch/ia64/kernel/init_task.c +++ b/arch/ia64/kernel/init_task.c @@ -17,7 +17,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/m32r/kernel/init_task.c b/arch/m32r/kernel/init_task.c index 0d658dbb6766..016885c6f260 100644 --- a/arch/m32r/kernel/init_task.c +++ b/arch/m32r/kernel/init_task.c @@ -11,7 +11,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index 3042c2bc8c58..632ce016014d 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c @@ -40,7 +40,6 @@ * alignment requirements and potentially different initial * setup. */ -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/m68knommu/kernel/init_task.c b/arch/m68knommu/kernel/init_task.c index 344c01aede08..fe282de1d596 100644 --- a/arch/m68knommu/kernel/init_task.c +++ b/arch/m68knommu/kernel/init_task.c @@ -12,7 +12,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/mips/kernel/init_task.c b/arch/mips/kernel/init_task.c index d72487ad7c15..149cd914526e 100644 --- a/arch/mips/kernel/init_task.c +++ b/arch/mips/kernel/init_task.c @@ -9,7 +9,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/mn10300/kernel/init_task.c b/arch/mn10300/kernel/init_task.c index af16f6e5c918..5ac3566f8c98 100644 --- a/arch/mn10300/kernel/init_task.c +++ b/arch/mn10300/kernel/init_task.c @@ -18,7 +18,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/parisc/kernel/init_task.c b/arch/parisc/kernel/init_task.c index f5941c086551..1e25a45d64c1 100644 --- a/arch/parisc/kernel/init_task.c +++ b/arch/parisc/kernel/init_task.c @@ -34,7 +34,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/powerpc/kernel/init_task.c b/arch/powerpc/kernel/init_task.c index 4c85b8d56478..688b329800bd 100644 --- a/arch/powerpc/kernel/init_task.c +++ b/arch/powerpc/kernel/init_task.c @@ -7,7 +7,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/s390/kernel/init_task.c b/arch/s390/kernel/init_task.c index e80716843619..7db95c0b8693 100644 --- a/arch/s390/kernel/init_task.c +++ b/arch/s390/kernel/init_task.c @@ -16,7 +16,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/sh/kernel/init_task.c b/arch/sh/kernel/init_task.c index b151a25cb14d..80c35ff71d56 100644 --- a/arch/sh/kernel/init_task.c +++ b/arch/sh/kernel/init_task.c @@ -7,7 +7,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct pt_regs fake_swapper_regs; diff --git a/arch/sparc/kernel/init_task.c b/arch/sparc/kernel/init_task.c index 62126e4cec54..f28cb8278e98 100644 --- a/arch/sparc/kernel/init_task.c +++ b/arch/sparc/kernel/init_task.c @@ -8,7 +8,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/um/kernel/init_task.c b/arch/um/kernel/init_task.c index 910eda8fca18..806d381947bf 100644 --- a/arch/um/kernel/init_task.c +++ b/arch/um/kernel/init_task.c @@ -10,7 +10,6 @@ #include "linux/mqueue.h" #include "asm/uaccess.h" -static struct fs_struct init_fs = INIT_FS; struct mm_struct init_mm = INIT_MM(init_mm); static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c index d39918076bb4..df3bf269beab 100644 --- a/arch/x86/kernel/init_task.c +++ b/arch/x86/kernel/init_task.c @@ -10,7 +10,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/xtensa/kernel/init_task.c b/arch/xtensa/kernel/init_task.c index 3df469dbe814..e07f5c9fcd35 100644 --- a/arch/xtensa/kernel/init_task.c +++ b/arch/xtensa/kernel/init_task.c @@ -21,7 +21,6 @@ #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/fs/namei.c b/fs/namei.c index 3f88e043d459..e203691b9d12 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2893,3 +2893,10 @@ EXPORT_SYMBOL(vfs_symlink); EXPORT_SYMBOL(vfs_unlink); EXPORT_SYMBOL(dentry_unhash); EXPORT_SYMBOL(generic_readlink); + +/* to be mentioned only in INIT_TASK */ +struct fs_struct init_fs = { + .count = ATOMIC_INIT(1), + .lock = RW_LOCK_UNLOCKED, + .umask = 0022, +}; diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h index 9e5a06e78d02..a97c053d3a9a 100644 --- a/include/linux/fs_struct.h +++ b/include/linux/fs_struct.h @@ -10,12 +10,6 @@ struct fs_struct { struct path root, pwd; }; -#define INIT_FS { \ - .count = ATOMIC_INIT(1), \ - .lock = RW_LOCK_UNLOCKED, \ - .umask = 0022, \ -} - extern struct kmem_cache *fs_cachep; extern void exit_fs(struct task_struct *); diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 959f5522d10a..2f3c2d4ef73b 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -12,6 +12,7 @@ #include extern struct files_struct init_files; +extern struct fs_struct init_fs; #define INIT_KIOCTX(name, which_mm) \ { \ -- cgit v1.2.3 From c64d8996bd758cedc2ddc04b86ca66fa1d8599cf Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Fri, 2 Jan 2009 11:27:18 +0300 Subject: x86: early_printk - use sizeof instead of hardcoded number Impact: cleanup Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/kernel/early_printk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 23b138e31e9c..504ad198e4ad 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -886,7 +886,7 @@ asmlinkage void early_printk(const char *fmt, ...) va_list ap; va_start(ap, fmt); - n = vscnprintf(buf, 512, fmt, ap); + n = vscnprintf(buf, sizeof(buf), fmt, ap); early_console->write(early_console, buf, n); va_end(ap); } -- cgit v1.2.3 From a9067d537615d534dcef06c0d819472e43a0d152 Mon Sep 17 00:00:00 2001 From: Ingo Brueckl Date: Fri, 2 Jan 2009 14:42:00 +0100 Subject: x86: convert permanent_kmaps_init() from macro to inline Impact: cleanup This compiler warning: arch/x86/mm/init_32.c:515: warning: unused variable 'pgd_base' triggers because permanent_kmaps_init() is a CPP macro in the !CONFIG_HIGHMEM case, that does not tell the compiler that the 'pgd_base' parameter is used. Convert permanent_kmaps_init() (and set_highmem_pages_init()) to C inline functions - which gives the parameter a proper type and which gets rid of the compiler warning as well. Signed-off-by: Ingo Brueckl Signed-off-by: Ingo Molnar --- arch/x86/mm/init_32.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 800e1d94c1b5..ad98b18f2b48 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -434,8 +434,12 @@ static void __init set_highmem_pages_init(void) #endif /* !CONFIG_NUMA */ #else -# define permanent_kmaps_init(pgd_base) do { } while (0) -# define set_highmem_pages_init() do { } while (0) +static inline void permanent_kmaps_init(pgd_t *pgd_base) +{ +} +static inline void set_highmem_pages_init(void) +{ +} #endif /* CONFIG_HIGHMEM */ void __init native_pagetable_setup_start(pgd_t *base) -- cgit v1.2.3 From 26799a63110dcbe81291ea53178f6b4810d07424 Mon Sep 17 00:00:00 2001 From: Ravikiran G Thirumalai Date: Wed, 31 Dec 2008 13:44:46 -0800 Subject: x86: fix incorrect __read_mostly on _boot_cpu_pda The pda rework (commit 3461b0af025251bbc6b3d56c821c6ac2de6f7209) to remove static boot cpu pdas introduced a performance bug. _boot_cpu_pda is the actual pda used by the boot cpu and is definitely not "__read_mostly" and ended up polluting the read mostly section with writes. This bug caused regression of about 8-10% on certain syscall intensive workloads. Signed-off-by: Ravikiran Thirumalai Acked-by: Mike Travis Cc: Signed-off-by: Ingo Molnar --- arch/x86/kernel/head64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 388e05a5fc17..b9a4d8c4b935 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -27,7 +27,7 @@ #include /* boot cpu pda */ -static struct x8664_pda _boot_cpu_pda __read_mostly; +static struct x8664_pda _boot_cpu_pda; #ifdef CONFIG_SMP /* -- cgit v1.2.3 From 46814dded1b972a07b1609d81632eef3009fbb10 Mon Sep 17 00:00:00 2001 From: Cliff Wickman Date: Wed, 31 Dec 2008 13:20:50 -0600 Subject: x86, UV: remove erroneous BAU initialization Impact: fix crash on x86/UV UV is the SGI "UltraViolet" machine, which is x86_64 based. BAU is the "Broadcast Assist Unit", used for TLB shootdown in UV. This patch removes the allocation and initialization of an unused table. This table is left over from a development test mode. It is unused in the present code. And it was incorrectly initialized: 8 entries allocated but 17 initialized, causing slab corruption. This patch should go into 2.6.27 and 2.6.28 as well as the current tree. Diffed against 2.6.28 (linux-next, 12/30/08) Signed-off-by: Cliff Wickman Cc: Signed-off-by: Ingo Molnar --- arch/x86/kernel/tlb_uv.c | 9 --------- 1 file changed, 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 6a00e5faaa74..f885023167e0 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c @@ -582,7 +582,6 @@ static int __init uv_ptc_init(void) static struct bau_control * __init uv_table_bases_init(int blade, int node) { int i; - int *ip; struct bau_msg_status *msp; struct bau_control *bau_tabp; @@ -599,13 +598,6 @@ static struct bau_control * __init uv_table_bases_init(int blade, int node) bau_cpubits_clear(&msp->seen_by, (int) uv_blade_nr_possible_cpus(blade)); - bau_tabp->watching = - kmalloc_node(sizeof(int) * DEST_NUM_RESOURCES, GFP_KERNEL, node); - BUG_ON(!bau_tabp->watching); - - for (i = 0, ip = bau_tabp->watching; i < DEST_Q_SIZE; i++, ip++) - *ip = 0; - uv_bau_table_bases[blade] = bau_tabp; return bau_tabp; @@ -628,7 +620,6 @@ uv_table_bases_finish(int blade, int node, int cur_cpu, bcp->bau_msg_head = bau_tablesp->va_queue_first; bcp->va_queue_first = bau_tablesp->va_queue_first; bcp->va_queue_last = bau_tablesp->va_queue_last; - bcp->watching = bau_tablesp->watching; bcp->msg_statuses = bau_tablesp->msg_statuses; bcp->descriptor_base = adp; } -- cgit v1.2.3 From f634fa941188a91dbf1dab961fe7a4509852fd6e Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Wed, 31 Dec 2008 16:29:48 +0530 Subject: x86: cpuid.c fix style problems Impact: cleanup Fixes style problems: WARNING: Use #include instead of ERROR: "foo * bar" should be "foo *bar" ERROR: trailing whitespace WARNING: usage of NR_CPUS is often wrong - consider using cpu_possible(), num_possible_cpus(), for_each_possible_cpu(), etc total: 2 errors, 2 warnings Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpuid.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 72cefd1e649b..85d28d53f5d3 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c @@ -39,10 +39,10 @@ #include #include #include +#include #include #include -#include #include static struct class *cpuid_class; @@ -82,7 +82,7 @@ static loff_t cpuid_seek(struct file *file, loff_t offset, int orig) } static ssize_t cpuid_read(struct file *file, char __user *buf, - size_t count, loff_t * ppos) + size_t count, loff_t *ppos) { char __user *tmp = buf; struct cpuid_regs cmd; @@ -117,7 +117,7 @@ static int cpuid_open(struct inode *inode, struct file *file) unsigned int cpu; struct cpuinfo_x86 *c; int ret = 0; - + lock_kernel(); cpu = iminor(file->f_path.dentry->d_inode); -- cgit v1.2.3 From 423a54058f746579aff1430877dbc82f17442b34 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Wed, 31 Dec 2008 16:42:20 +0530 Subject: x86: ldt.c fix style problems Impact: cleanup Fixes style problems: WARNING: Use #include instead of ERROR: space required before the open parenthesis '(' total: 1 errors, 1 warnings Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/ldt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index eee32b43fee3..71f1d99a635d 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -12,8 +12,8 @@ #include #include #include +#include -#include #include #include #include @@ -93,7 +93,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old) if (err < 0) return err; - for(i = 0; i < old->size; i++) + for (i = 0; i < old->size; i++) write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); return 0; } -- cgit v1.2.3 From dceb4521c8ed24b9fe4230e0d385cf4770260383 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Wed, 31 Dec 2008 17:35:02 +0530 Subject: x86: nmi.c fix style problems Impact: cleanup, fix style problems Fixes style problems: WARNING: Use #include instead of WARNING: Use #include instead of total: 0 errors, 2 warnings Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/nmi.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 8bd1bf9622a7..45a09ccdc214 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -26,11 +26,10 @@ #include #include #include +#include #include #include -#include -#include #include #include -- cgit v1.2.3 From 103ceffb9501531f6931df6aebc11a05189201f0 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Fri, 2 Jan 2009 23:43:25 +0530 Subject: x86: mpparse.c fix style problems Impact: cleanup, fix style problems, more readable Fixes style problems: WARNING: Use #include instead of WARNING: Use #include instead of WARNING: suspect code indent for conditional statements (8, 17) WARNING: space prohibited between function name and open parenthesis '(' total: 0 errors, 5 warnings Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 45e3b69808ba..c5c5b8df1dbc 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -16,14 +16,14 @@ #include #include #include +#include +#include -#include #include #include #include #include #include -#include #include #include #include @@ -95,8 +95,8 @@ static void __init MP_bus_info(struct mpc_config_bus *m) #endif if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { - set_bit(m->mpc_busid, mp_bus_not_pci); -#if defined(CONFIG_EISA) || defined (CONFIG_MCA) + set_bit(m->mpc_busid, mp_bus_not_pci); +#if defined(CONFIG_EISA) || defined(CONFIG_MCA) mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; #endif } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { @@ -104,7 +104,7 @@ static void __init MP_bus_info(struct mpc_config_bus *m) x86_quirks->mpc_oem_pci_bus(m); clear_bit(m->mpc_busid, mp_bus_not_pci); -#if defined(CONFIG_EISA) || defined (CONFIG_MCA) +#if defined(CONFIG_EISA) || defined(CONFIG_MCA) mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) { mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; -- cgit v1.2.3 From e8e32326279cba3d049b4325111f76618953195c Mon Sep 17 00:00:00 2001 From: Ingo Brueckl Date: Fri, 2 Jan 2009 14:42:00 +0100 Subject: Fix compiler warning in arch/x86/mm/init_32.c Signed-off-by: Ingo Brueckl Signed-off-by: Linus Torvalds --- arch/x86/mm/init_32.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 8655b5bb0963..f99a6c6c432e 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -435,8 +435,12 @@ static void __init set_highmem_pages_init(void) #endif /* !CONFIG_NUMA */ #else -# define permanent_kmaps_init(pgd_base) do { } while (0) -# define set_highmem_pages_init() do { } while (0) +static inline void permanent_kmaps_init(pgd_t *pgd_base) +{ +} +static inline void set_highmem_pages_init(void) +{ +} #endif /* CONFIG_HIGHMEM */ void __init native_pagetable_setup_start(pgd_t *base) -- cgit v1.2.3 From 79ff56ebd3edfb16f8badc558cb439b203a3298f Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Tue, 30 Dec 2008 20:18:00 -0800 Subject: swiotlb: add missing __init annotations Impact: cleanup, reduce kernel size a bit The current kernel build warns: WARNING: vmlinux.o(.text+0x11458): Section mismatch in reference from the function swiotlb_alloc_boot() to the function .init.text:__alloc_bootmem_low() The function swiotlb_alloc_boot() references the function __init __alloc_bootmem_low(). This is often because swiotlb_alloc_boot lacks a __init annotation or the annotation of __alloc_bootmem_low is wrong. WARNING: vmlinux.o(.text+0x1011f2): Section mismatch in reference from the function swiotlb_late_init_with_default_size() to the function .init.text:__alloc_bootmem_low() The function swiotlb_late_init_with_default_size() references the function __init __alloc_bootmem_low(). This is often because swiotlb_late_init_with_default_size lacks a __init annotation or the annotation of __alloc_bootmem_low is wrong. and indeed the functions calling __alloc_bootmem_low() can be marked __init as well. Signed-off-by: Roland Dreier Signed-off-by: Ingo Molnar --- arch/x86/kernel/pci-swiotlb_64.c | 2 +- lib/swiotlb.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c index 242c3440687f..8cba3749a511 100644 --- a/arch/x86/kernel/pci-swiotlb_64.c +++ b/arch/x86/kernel/pci-swiotlb_64.c @@ -13,7 +13,7 @@ int swiotlb __read_mostly; -void *swiotlb_alloc_boot(size_t size, unsigned long nslabs) +void * __init swiotlb_alloc_boot(size_t size, unsigned long nslabs) { return alloc_bootmem_low_pages(size); } diff --git a/lib/swiotlb.c b/lib/swiotlb.c index fa2dc4e5f9ba..b6d0aae4fd31 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -116,7 +116,7 @@ setup_io_tlb_npages(char *str) __setup("swiotlb=", setup_io_tlb_npages); /* make io_tlb_overflow tunable too? */ -void * __weak swiotlb_alloc_boot(size_t size, unsigned long nslabs) +void * __weak __init swiotlb_alloc_boot(size_t size, unsigned long nslabs) { return alloc_bootmem_low_pages(size); } -- cgit v1.2.3 From 37dd3cb4153084ff693e738b7ee087990d351e5e Mon Sep 17 00:00:00 2001 From: Markus Trippelsdorf Date: Wed, 31 Dec 2008 13:00:43 +0100 Subject: x86: remove debug printks (io_apic.c) Impact: reduce printk noise The message "alloc irq_2_pin on cpu 0 node 0" is printed way too often. % dmesg|grep irq_2_pin|wc -l 20 Get rid of the debug printks. Signed-off-by: Markus Trippelsdorf Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 69911722b9d3..45073520407f 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -129,7 +129,6 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int cpu) node = cpu_to_node(cpu); pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node); - printk(KERN_DEBUG " alloc irq_2_pin on cpu %d node %d\n", cpu, node); return pin; } @@ -227,7 +226,6 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu) cpumask_clear(cfg->old_domain); } } - printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node); return cfg; } -- cgit v1.2.3 From c4fa3864281c7d88b7262cbc6cbd5c90bb59860e Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 26 Nov 2008 15:51:19 +0100 Subject: KVM: rename vtd.c to iommu.c Impact: file renamed The code in the vtd.c file can be reused for other IOMMUs as well. So rename it to make it clear that it handle more than VT-d. Signed-off-by: Joerg Roedel --- arch/ia64/kvm/Makefile | 2 +- arch/x86/kvm/Makefile | 2 +- virt/kvm/iommu.c | 217 +++++++++++++++++++++++++++++++++++++++++++++++++ virt/kvm/vtd.c | 217 ------------------------------------------------- 4 files changed, 219 insertions(+), 219 deletions(-) create mode 100644 virt/kvm/iommu.c delete mode 100644 virt/kvm/vtd.c (limited to 'arch/x86') diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile index 76464dc312e6..cb69dfcfb1a6 100644 --- a/arch/ia64/kvm/Makefile +++ b/arch/ia64/kvm/Makefile @@ -52,7 +52,7 @@ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ coalesced_mmio.o irq_comm.o) ifeq ($(CONFIG_DMAR),y) -common-objs += $(addprefix ../../../virt/kvm/, vtd.o) +common-objs += $(addprefix ../../../virt/kvm/, iommu.o) endif kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index c02343594b4d..00f46c2d14bd 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -8,7 +8,7 @@ ifeq ($(CONFIG_KVM_TRACE),y) common-objs += $(addprefix ../../../virt/kvm/, kvm_trace.o) endif ifeq ($(CONFIG_DMAR),y) -common-objs += $(addprefix ../../../virt/kvm/, vtd.o) +common-objs += $(addprefix ../../../virt/kvm/, iommu.o) endif EXTRA_CFLAGS += -Ivirt/kvm -Iarch/x86/kvm diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c new file mode 100644 index 000000000000..d46de9af838b --- /dev/null +++ b/virt/kvm/iommu.c @@ -0,0 +1,217 @@ +/* + * Copyright (c) 2006, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + * Copyright (C) 2006-2008 Intel Corporation + * Copyright IBM Corporation, 2008 + * Author: Allen M. Kay + * Author: Weidong Han + * Author: Ben-Ami Yassour + */ + +#include +#include +#include +#include +#include + +static int kvm_iommu_unmap_memslots(struct kvm *kvm); +static void kvm_iommu_put_pages(struct kvm *kvm, + gfn_t base_gfn, unsigned long npages); + +int kvm_iommu_map_pages(struct kvm *kvm, + gfn_t base_gfn, unsigned long npages) +{ + gfn_t gfn = base_gfn; + pfn_t pfn; + int i, r = 0; + struct dmar_domain *domain = kvm->arch.intel_iommu_domain; + + /* check if iommu exists and in use */ + if (!domain) + return 0; + + for (i = 0; i < npages; i++) { + /* check if already mapped */ + if (intel_iommu_iova_to_phys(domain, + gfn_to_gpa(gfn))) + continue; + + pfn = gfn_to_pfn(kvm, gfn); + r = intel_iommu_map_address(domain, + gfn_to_gpa(gfn), + pfn_to_hpa(pfn), + PAGE_SIZE, + DMA_PTE_READ | DMA_PTE_WRITE); + if (r) { + printk(KERN_ERR "kvm_iommu_map_address:" + "iommu failed to map pfn=%lx\n", pfn); + goto unmap_pages; + } + gfn++; + } + return 0; + +unmap_pages: + kvm_iommu_put_pages(kvm, base_gfn, i); + return r; +} + +static int kvm_iommu_map_memslots(struct kvm *kvm) +{ + int i, r; + + down_read(&kvm->slots_lock); + for (i = 0; i < kvm->nmemslots; i++) { + r = kvm_iommu_map_pages(kvm, kvm->memslots[i].base_gfn, + kvm->memslots[i].npages); + if (r) + break; + } + up_read(&kvm->slots_lock); + return r; +} + +int kvm_assign_device(struct kvm *kvm, + struct kvm_assigned_dev_kernel *assigned_dev) +{ + struct pci_dev *pdev = NULL; + struct dmar_domain *domain = kvm->arch.intel_iommu_domain; + int r; + + /* check if iommu exists and in use */ + if (!domain) + return 0; + + pdev = assigned_dev->dev; + if (pdev == NULL) + return -ENODEV; + + r = intel_iommu_attach_device(domain, pdev); + if (r) { + printk(KERN_ERR "assign device %x:%x.%x failed", + pdev->bus->number, + PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + return r; + } + + printk(KERN_DEBUG "assign device: host bdf = %x:%x:%x\n", + assigned_dev->host_busnr, + PCI_SLOT(assigned_dev->host_devfn), + PCI_FUNC(assigned_dev->host_devfn)); + + return 0; +} + +int kvm_deassign_device(struct kvm *kvm, + struct kvm_assigned_dev_kernel *assigned_dev) +{ + struct dmar_domain *domain = kvm->arch.intel_iommu_domain; + struct pci_dev *pdev = NULL; + + /* check if iommu exists and in use */ + if (!domain) + return 0; + + pdev = assigned_dev->dev; + if (pdev == NULL) + return -ENODEV; + + intel_iommu_detach_device(domain, pdev); + + printk(KERN_DEBUG "deassign device: host bdf = %x:%x:%x\n", + assigned_dev->host_busnr, + PCI_SLOT(assigned_dev->host_devfn), + PCI_FUNC(assigned_dev->host_devfn)); + + return 0; +} + +int kvm_iommu_map_guest(struct kvm *kvm) +{ + int r; + + if (!intel_iommu_found()) { + printk(KERN_ERR "%s: intel iommu not found\n", __func__); + return -ENODEV; + } + + kvm->arch.intel_iommu_domain = intel_iommu_alloc_domain(); + if (!kvm->arch.intel_iommu_domain) + return -ENOMEM; + + r = kvm_iommu_map_memslots(kvm); + if (r) + goto out_unmap; + + return 0; + +out_unmap: + kvm_iommu_unmap_memslots(kvm); + return r; +} + +static void kvm_iommu_put_pages(struct kvm *kvm, + gfn_t base_gfn, unsigned long npages) +{ + gfn_t gfn = base_gfn; + pfn_t pfn; + struct dmar_domain *domain = kvm->arch.intel_iommu_domain; + unsigned long i; + u64 phys; + + /* check if iommu exists and in use */ + if (!domain) + return; + + for (i = 0; i < npages; i++) { + phys = intel_iommu_iova_to_phys(domain, + gfn_to_gpa(gfn)); + pfn = phys >> PAGE_SHIFT; + kvm_release_pfn_clean(pfn); + gfn++; + } + + intel_iommu_unmap_address(domain, + gfn_to_gpa(base_gfn), + PAGE_SIZE * npages); +} + +static int kvm_iommu_unmap_memslots(struct kvm *kvm) +{ + int i; + down_read(&kvm->slots_lock); + for (i = 0; i < kvm->nmemslots; i++) { + kvm_iommu_put_pages(kvm, kvm->memslots[i].base_gfn, + kvm->memslots[i].npages); + } + up_read(&kvm->slots_lock); + + return 0; +} + +int kvm_iommu_unmap_guest(struct kvm *kvm) +{ + struct dmar_domain *domain = kvm->arch.intel_iommu_domain; + + /* check if iommu exists and in use */ + if (!domain) + return 0; + + kvm_iommu_unmap_memslots(kvm); + intel_iommu_free_domain(domain); + return 0; +} diff --git a/virt/kvm/vtd.c b/virt/kvm/vtd.c deleted file mode 100644 index d46de9af838b..000000000000 --- a/virt/kvm/vtd.c +++ /dev/null @@ -1,217 +0,0 @@ -/* - * Copyright (c) 2006, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - * Copyright (C) 2006-2008 Intel Corporation - * Copyright IBM Corporation, 2008 - * Author: Allen M. Kay - * Author: Weidong Han - * Author: Ben-Ami Yassour - */ - -#include -#include -#include -#include -#include - -static int kvm_iommu_unmap_memslots(struct kvm *kvm); -static void kvm_iommu_put_pages(struct kvm *kvm, - gfn_t base_gfn, unsigned long npages); - -int kvm_iommu_map_pages(struct kvm *kvm, - gfn_t base_gfn, unsigned long npages) -{ - gfn_t gfn = base_gfn; - pfn_t pfn; - int i, r = 0; - struct dmar_domain *domain = kvm->arch.intel_iommu_domain; - - /* check if iommu exists and in use */ - if (!domain) - return 0; - - for (i = 0; i < npages; i++) { - /* check if already mapped */ - if (intel_iommu_iova_to_phys(domain, - gfn_to_gpa(gfn))) - continue; - - pfn = gfn_to_pfn(kvm, gfn); - r = intel_iommu_map_address(domain, - gfn_to_gpa(gfn), - pfn_to_hpa(pfn), - PAGE_SIZE, - DMA_PTE_READ | DMA_PTE_WRITE); - if (r) { - printk(KERN_ERR "kvm_iommu_map_address:" - "iommu failed to map pfn=%lx\n", pfn); - goto unmap_pages; - } - gfn++; - } - return 0; - -unmap_pages: - kvm_iommu_put_pages(kvm, base_gfn, i); - return r; -} - -static int kvm_iommu_map_memslots(struct kvm *kvm) -{ - int i, r; - - down_read(&kvm->slots_lock); - for (i = 0; i < kvm->nmemslots; i++) { - r = kvm_iommu_map_pages(kvm, kvm->memslots[i].base_gfn, - kvm->memslots[i].npages); - if (r) - break; - } - up_read(&kvm->slots_lock); - return r; -} - -int kvm_assign_device(struct kvm *kvm, - struct kvm_assigned_dev_kernel *assigned_dev) -{ - struct pci_dev *pdev = NULL; - struct dmar_domain *domain = kvm->arch.intel_iommu_domain; - int r; - - /* check if iommu exists and in use */ - if (!domain) - return 0; - - pdev = assigned_dev->dev; - if (pdev == NULL) - return -ENODEV; - - r = intel_iommu_attach_device(domain, pdev); - if (r) { - printk(KERN_ERR "assign device %x:%x.%x failed", - pdev->bus->number, - PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn)); - return r; - } - - printk(KERN_DEBUG "assign device: host bdf = %x:%x:%x\n", - assigned_dev->host_busnr, - PCI_SLOT(assigned_dev->host_devfn), - PCI_FUNC(assigned_dev->host_devfn)); - - return 0; -} - -int kvm_deassign_device(struct kvm *kvm, - struct kvm_assigned_dev_kernel *assigned_dev) -{ - struct dmar_domain *domain = kvm->arch.intel_iommu_domain; - struct pci_dev *pdev = NULL; - - /* check if iommu exists and in use */ - if (!domain) - return 0; - - pdev = assigned_dev->dev; - if (pdev == NULL) - return -ENODEV; - - intel_iommu_detach_device(domain, pdev); - - printk(KERN_DEBUG "deassign device: host bdf = %x:%x:%x\n", - assigned_dev->host_busnr, - PCI_SLOT(assigned_dev->host_devfn), - PCI_FUNC(assigned_dev->host_devfn)); - - return 0; -} - -int kvm_iommu_map_guest(struct kvm *kvm) -{ - int r; - - if (!intel_iommu_found()) { - printk(KERN_ERR "%s: intel iommu not found\n", __func__); - return -ENODEV; - } - - kvm->arch.intel_iommu_domain = intel_iommu_alloc_domain(); - if (!kvm->arch.intel_iommu_domain) - return -ENOMEM; - - r = kvm_iommu_map_memslots(kvm); - if (r) - goto out_unmap; - - return 0; - -out_unmap: - kvm_iommu_unmap_memslots(kvm); - return r; -} - -static void kvm_iommu_put_pages(struct kvm *kvm, - gfn_t base_gfn, unsigned long npages) -{ - gfn_t gfn = base_gfn; - pfn_t pfn; - struct dmar_domain *domain = kvm->arch.intel_iommu_domain; - unsigned long i; - u64 phys; - - /* check if iommu exists and in use */ - if (!domain) - return; - - for (i = 0; i < npages; i++) { - phys = intel_iommu_iova_to_phys(domain, - gfn_to_gpa(gfn)); - pfn = phys >> PAGE_SHIFT; - kvm_release_pfn_clean(pfn); - gfn++; - } - - intel_iommu_unmap_address(domain, - gfn_to_gpa(base_gfn), - PAGE_SIZE * npages); -} - -static int kvm_iommu_unmap_memslots(struct kvm *kvm) -{ - int i; - down_read(&kvm->slots_lock); - for (i = 0; i < kvm->nmemslots; i++) { - kvm_iommu_put_pages(kvm, kvm->memslots[i].base_gfn, - kvm->memslots[i].npages); - } - up_read(&kvm->slots_lock); - - return 0; -} - -int kvm_iommu_unmap_guest(struct kvm *kvm) -{ - struct dmar_domain *domain = kvm->arch.intel_iommu_domain; - - /* check if iommu exists and in use */ - if (!domain) - return 0; - - kvm_iommu_unmap_memslots(kvm); - intel_iommu_free_domain(domain); - return 0; -} -- cgit v1.2.3 From 1aaf118352b85bb359ce28070bcc478f659a7031 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 26 Nov 2008 17:25:13 +0100 Subject: select IOMMU_API when DMAR and/or AMD_IOMMU is selected These two IOMMUs can implement the current version of this API. So select the API if one or both of these IOMMU drivers is selected. Signed-off-by: Joerg Roedel --- arch/ia64/Kconfig | 3 +++ arch/x86/Kconfig | 3 +++ drivers/base/Makefile | 1 + 3 files changed, 7 insertions(+) (limited to 'arch/x86') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 7fa8f615ba6e..3d31636cbafb 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -687,3 +687,6 @@ config IRQ_PER_CPU config IOMMU_HELPER def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB) + +config IOMMU_API + def_bool (DMAR) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 249d1e0824b5..4737435b00d4 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -599,6 +599,9 @@ config SWIOTLB config IOMMU_HELPER def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU) +config IOMMU_API + def_bool (AMD_IOMMU || DMAR) + config MAXSMP bool "Configure Maximum number of SMP Processors and NUMA Nodes" depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL diff --git a/drivers/base/Makefile b/drivers/base/Makefile index c66637392bbc..b5b8ba512b28 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_FW_LOADER) += firmware_class.o obj-$(CONFIG_NUMA) += node.o obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o obj-$(CONFIG_SMP) += topology.o +obj-$(CONFIG_IOMMU_API) += iommu.o ifeq ($(CONFIG_SYSFS),y) obj-$(CONFIG_MODULES) += module.o endif -- cgit v1.2.3 From 19de40a8472fa64693eab844911eec277d489f6c Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 3 Dec 2008 14:43:34 +0100 Subject: KVM: change KVM to use IOMMU API Signed-off-by: Joerg Roedel --- arch/ia64/include/asm/kvm_host.h | 2 +- arch/ia64/kvm/Makefile | 2 +- arch/ia64/kvm/kvm-ia64.c | 3 ++- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/Makefile | 2 +- arch/x86/kvm/x86.c | 3 ++- include/linux/kvm_host.h | 6 +++--- virt/kvm/iommu.c | 45 +++++++++++++++++++--------------------- virt/kvm/kvm_main.c | 2 +- 9 files changed, 33 insertions(+), 34 deletions(-) (limited to 'arch/x86') diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index 0560f3fae538..348663661659 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h @@ -467,7 +467,7 @@ struct kvm_arch { struct kvm_sal_data rdv_sal_data; struct list_head assigned_dev_head; - struct dmar_domain *intel_iommu_domain; + struct iommu_domain *iommu_domain; struct hlist_head irq_ack_notifier_list; unsigned long irq_sources_bitmap; diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile index cb69dfcfb1a6..0bb99b732908 100644 --- a/arch/ia64/kvm/Makefile +++ b/arch/ia64/kvm/Makefile @@ -51,7 +51,7 @@ EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ coalesced_mmio.o irq_comm.o) -ifeq ($(CONFIG_DMAR),y) +ifeq ($(CONFIG_IOMMU_API),y) common-objs += $(addprefix ../../../virt/kvm/, iommu.o) endif diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 0f5ebd948437..4e586f6110aa 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -188,7 +189,7 @@ int kvm_dev_ioctl_check_extension(long ext) r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; case KVM_CAP_IOMMU: - r = intel_iommu_found(); + r = iommu_found(); break; default: r = 0; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 97215a458e5f..730843d1d2fb 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -360,7 +360,7 @@ struct kvm_arch{ struct list_head active_mmu_pages; struct list_head assigned_dev_head; struct list_head oos_global_pages; - struct dmar_domain *intel_iommu_domain; + struct iommu_domain *iommu_domain; struct kvm_pic *vpic; struct kvm_ioapic *vioapic; struct kvm_pit *vpit; diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 00f46c2d14bd..d3ec292f00f2 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -7,7 +7,7 @@ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ ifeq ($(CONFIG_KVM_TRACE),y) common-objs += $(addprefix ../../../virt/kvm/, kvm_trace.o) endif -ifeq ($(CONFIG_DMAR),y) +ifeq ($(CONFIG_IOMMU_API),y) common-objs += $(addprefix ../../../virt/kvm/, iommu.o) endif diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0e6aa8141dcd..cc17546a2406 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -989,7 +990,7 @@ int kvm_dev_ioctl_check_extension(long ext) r = !tdp_enabled; break; case KVM_CAP_IOMMU: - r = intel_iommu_found(); + r = iommu_found(); break; default: r = 0; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index e62a4629e51c..ec49d0be7f52 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -328,7 +328,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian); int kvm_request_irq_source_id(struct kvm *kvm); void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); -#ifdef CONFIG_DMAR +#ifdef CONFIG_IOMMU_API int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages); int kvm_iommu_map_guest(struct kvm *kvm); @@ -337,7 +337,7 @@ int kvm_assign_device(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev); int kvm_deassign_device(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev); -#else /* CONFIG_DMAR */ +#else /* CONFIG_IOMMU_API */ static inline int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages) @@ -366,7 +366,7 @@ static inline int kvm_deassign_device(struct kvm *kvm, { return 0; } -#endif /* CONFIG_DMAR */ +#endif /* CONFIG_IOMMU_API */ static inline void kvm_guest_enter(void) { diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index d46de9af838b..d0bebaa5bf0a 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c @@ -25,6 +25,7 @@ #include #include #include +#include #include static int kvm_iommu_unmap_memslots(struct kvm *kvm); @@ -37,7 +38,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, gfn_t gfn = base_gfn; pfn_t pfn; int i, r = 0; - struct dmar_domain *domain = kvm->arch.intel_iommu_domain; + struct iommu_domain *domain = kvm->arch.iommu_domain; /* check if iommu exists and in use */ if (!domain) @@ -45,16 +46,15 @@ int kvm_iommu_map_pages(struct kvm *kvm, for (i = 0; i < npages; i++) { /* check if already mapped */ - if (intel_iommu_iova_to_phys(domain, - gfn_to_gpa(gfn))) + if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) continue; pfn = gfn_to_pfn(kvm, gfn); - r = intel_iommu_map_address(domain, - gfn_to_gpa(gfn), - pfn_to_hpa(pfn), - PAGE_SIZE, - DMA_PTE_READ | DMA_PTE_WRITE); + r = iommu_map_range(domain, + gfn_to_gpa(gfn), + pfn_to_hpa(pfn), + PAGE_SIZE, + IOMMU_READ | IOMMU_WRITE); if (r) { printk(KERN_ERR "kvm_iommu_map_address:" "iommu failed to map pfn=%lx\n", pfn); @@ -88,7 +88,7 @@ int kvm_assign_device(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev) { struct pci_dev *pdev = NULL; - struct dmar_domain *domain = kvm->arch.intel_iommu_domain; + struct iommu_domain *domain = kvm->arch.iommu_domain; int r; /* check if iommu exists and in use */ @@ -99,7 +99,7 @@ int kvm_assign_device(struct kvm *kvm, if (pdev == NULL) return -ENODEV; - r = intel_iommu_attach_device(domain, pdev); + r = iommu_attach_device(domain, &pdev->dev); if (r) { printk(KERN_ERR "assign device %x:%x.%x failed", pdev->bus->number, @@ -119,7 +119,7 @@ int kvm_assign_device(struct kvm *kvm, int kvm_deassign_device(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev) { - struct dmar_domain *domain = kvm->arch.intel_iommu_domain; + struct iommu_domain *domain = kvm->arch.iommu_domain; struct pci_dev *pdev = NULL; /* check if iommu exists and in use */ @@ -130,7 +130,7 @@ int kvm_deassign_device(struct kvm *kvm, if (pdev == NULL) return -ENODEV; - intel_iommu_detach_device(domain, pdev); + iommu_detach_device(domain, &pdev->dev); printk(KERN_DEBUG "deassign device: host bdf = %x:%x:%x\n", assigned_dev->host_busnr, @@ -144,13 +144,13 @@ int kvm_iommu_map_guest(struct kvm *kvm) { int r; - if (!intel_iommu_found()) { - printk(KERN_ERR "%s: intel iommu not found\n", __func__); + if (!iommu_found()) { + printk(KERN_ERR "%s: iommu not found\n", __func__); return -ENODEV; } - kvm->arch.intel_iommu_domain = intel_iommu_alloc_domain(); - if (!kvm->arch.intel_iommu_domain) + kvm->arch.iommu_domain = iommu_domain_alloc(); + if (!kvm->arch.iommu_domain) return -ENOMEM; r = kvm_iommu_map_memslots(kvm); @@ -169,7 +169,7 @@ static void kvm_iommu_put_pages(struct kvm *kvm, { gfn_t gfn = base_gfn; pfn_t pfn; - struct dmar_domain *domain = kvm->arch.intel_iommu_domain; + struct iommu_domain *domain = kvm->arch.iommu_domain; unsigned long i; u64 phys; @@ -178,16 +178,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm, return; for (i = 0; i < npages; i++) { - phys = intel_iommu_iova_to_phys(domain, - gfn_to_gpa(gfn)); + phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn)); pfn = phys >> PAGE_SHIFT; kvm_release_pfn_clean(pfn); gfn++; } - intel_iommu_unmap_address(domain, - gfn_to_gpa(base_gfn), - PAGE_SIZE * npages); + iommu_unmap_range(domain, gfn_to_gpa(base_gfn), PAGE_SIZE * npages); } static int kvm_iommu_unmap_memslots(struct kvm *kvm) @@ -205,13 +202,13 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm) int kvm_iommu_unmap_guest(struct kvm *kvm) { - struct dmar_domain *domain = kvm->arch.intel_iommu_domain; + struct iommu_domain *domain = kvm->arch.iommu_domain; /* check if iommu exists and in use */ if (!domain) return 0; kvm_iommu_unmap_memslots(kvm); - intel_iommu_free_domain(domain); + iommu_domain_free(domain); return 0; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4ef0fb43d1f9..3a5a08298aab 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -504,7 +504,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, list_add(&match->list, &kvm->arch.assigned_dev_head); if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { - if (!kvm->arch.intel_iommu_domain) { + if (!kvm->arch.iommu_domain) { r = kvm_iommu_map_guest(kvm); if (r) goto out_list_del; -- cgit v1.2.3 From 38e817febe2f12bd2fbf92a1df36f41946d0c223 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 2 Dec 2008 17:27:52 +0100 Subject: AMD IOMMU: rename iommu_map to iommu_map_page Impact: function rename The iommu_map function maps only one page. Make this clear in the function name. Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 2e2da717b350..b11c855af7b4 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -338,10 +338,10 @@ static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid) * supporting all features of AMD IOMMU page tables like level skipping * and full 64 bit address spaces. */ -static int iommu_map(struct protection_domain *dom, - unsigned long bus_addr, - unsigned long phys_addr, - int prot) +static int iommu_map_page(struct protection_domain *dom, + unsigned long bus_addr, + unsigned long phys_addr, + int prot) { u64 __pte, *pte, *page; @@ -440,7 +440,7 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, for (addr = e->address_start; addr < e->address_end; addr += PAGE_SIZE) { - ret = iommu_map(&dma_dom->domain, addr, addr, e->prot); + ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot); if (ret) return ret; /* -- cgit v1.2.3 From 86db2e5d47bfa61a151d6ac83263f4bde4d52290 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 2 Dec 2008 18:20:21 +0100 Subject: AMD IOMMU: make dma_ops_free_pagetable generic Impact: change code to free pagetables from protection domains The dma_ops_free_pagetable function can only free pagetables from dma_ops domains. Change that to free pagetables of pure protection domains. Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index b11c855af7b4..8a0fd3d09973 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -587,12 +587,12 @@ static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, iommu_area_reserve(dom->bitmap, start_page, pages); } -static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom) +static void free_pagetable(struct protection_domain *domain) { int i, j; u64 *p1, *p2, *p3; - p1 = dma_dom->domain.pt_root; + p1 = domain->pt_root; if (!p1) return; @@ -613,6 +613,8 @@ static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom) } free_page((unsigned long)p1); + + domain->pt_root = NULL; } /* @@ -624,7 +626,7 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) if (!dom) return; - dma_ops_free_pagetable(dom); + free_pagetable(&dom->domain); kfree(dom->pte_pages); -- cgit v1.2.3 From a2acfb75792511a35586db80a38b8e4701a97730 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 2 Dec 2008 18:28:53 +0100 Subject: AMD IOMMU: add domain id free function Impact: add code to release a domain id Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 8a0fd3d09973..0922d5fe633c 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -571,6 +571,18 @@ static u16 domain_id_alloc(void) return id; } +#ifdef CONFIG_IOMMU_API +static void domain_id_free(int id) +{ + unsigned long flags; + + write_lock_irqsave(&amd_iommu_devtable_lock, flags); + if (id > 0 && id < MAX_DOMAIN_ID) + __clear_bit(id, amd_iommu_pd_alloc_bitmap); + write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); +} +#endif + /* * Used to reserve address ranges in the aperture (e.g. for exclusion * ranges. -- cgit v1.2.3 From 8d201968e15f56ae2837b0d0b64d3fff098857b0 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 2 Dec 2008 20:34:41 +0100 Subject: AMD IOMMU: refactor completion wait handling into separate functions Impact: split one function into three The separate functions are required synchronize commands across all hardware IOMMUs in the system. Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 68 ++++++++++++++++++++++++++++++--------------- 1 file changed, 45 insertions(+), 23 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 0922d5fe633c..2280ef86651f 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -195,6 +195,46 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) return ret; } +/* + * This function waits until an IOMMU has completed a completion + * wait command + */ +static void __iommu_wait_for_completion(struct amd_iommu *iommu) +{ + int ready = 0; + unsigned status = 0; + unsigned long i = 0; + + while (!ready && (i < EXIT_LOOP_COUNT)) { + ++i; + /* wait for the bit to become one */ + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); + ready = status & MMIO_STATUS_COM_WAIT_INT_MASK; + } + + /* set bit back to zero */ + status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; + writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); + + if (unlikely(i == EXIT_LOOP_COUNT)) + panic("AMD IOMMU: Completion wait loop failed\n"); +} + +/* + * This function queues a completion wait command into the command + * buffer of an IOMMU + */ +static int __iommu_completion_wait(struct amd_iommu *iommu) +{ + struct iommu_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; + CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); + + return __iommu_queue_command(iommu, &cmd); +} + /* * This function is called whenever we need to ensure that the IOMMU has * completed execution of all commands we sent. It sends a @@ -204,40 +244,22 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) */ static int iommu_completion_wait(struct amd_iommu *iommu) { - int ret = 0, ready = 0; - unsigned status = 0; - struct iommu_cmd cmd; - unsigned long flags, i = 0; - - memset(&cmd, 0, sizeof(cmd)); - cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; - CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); + int ret = 0; + unsigned long flags; spin_lock_irqsave(&iommu->lock, flags); if (!iommu->need_sync) goto out; - iommu->need_sync = 0; + ret = __iommu_completion_wait(iommu); - ret = __iommu_queue_command(iommu, &cmd); + iommu->need_sync = 0; if (ret) goto out; - while (!ready && (i < EXIT_LOOP_COUNT)) { - ++i; - /* wait for the bit to become one */ - status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); - ready = status & MMIO_STATUS_COM_WAIT_INT_MASK; - } - - /* set bit back to zero */ - status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; - writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); - - if (unlikely(i == EXIT_LOOP_COUNT)) - panic("AMD IOMMU: Completion wait loop failed\n"); + __iommu_wait_for_completion(iommu); out: spin_unlock_irqrestore(&iommu->lock, flags); -- cgit v1.2.3 From 237b6f33291394c337ae84e2d3782d5605803af2 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 2 Dec 2008 20:54:37 +0100 Subject: AMD IOMMU: move invalidation command building to a separate function Impact: refactoring of iommu_queue_inv_iommu_pages Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 2280ef86651f..fee16fbf2f33 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -286,6 +286,21 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) return ret; } +static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, + u16 domid, int pde, int s) +{ + memset(cmd, 0, sizeof(*cmd)); + address &= PAGE_MASK; + CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); + cmd->data[1] |= domid; + cmd->data[2] = lower_32_bits(address); + cmd->data[3] = upper_32_bits(address); + if (s) /* size bit - we flush more than one 4kb page */ + cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; + if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ + cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; +} + /* * Generic command send function for invalidaing TLB entries */ @@ -295,16 +310,7 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, struct iommu_cmd cmd; int ret; - memset(&cmd, 0, sizeof(cmd)); - address &= PAGE_MASK; - CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES); - cmd.data[1] |= domid; - cmd.data[2] = lower_32_bits(address); - cmd.data[3] = upper_32_bits(address); - if (s) /* size bit - we flush more than one 4kb page */ - cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; - if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ - cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; + __iommu_build_inv_iommu_pages(&cmd, address, domid, pde, s); ret = iommu_queue_command(iommu, &cmd); -- cgit v1.2.3 From 9e919012e33c481991e46aa4cb13d807cd47b798 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 10 Dec 2008 20:05:52 +0100 Subject: AMD IOMMU: don't remove protection domain from iommu_pd_list Impact: save unneeded logic to add and remove domains to the list The removal of a protection domain from the iommu_pd_list is not necessary. Another benefit is that we save complexity because we don't have to readd it later when the device no longer uses the domain. Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index fee16fbf2f33..b7b3067630cf 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -844,7 +844,6 @@ static struct dma_ops_domain *find_protection_domain(u16 devid) list_for_each_entry(entry, &iommu_pd_list, list) { if (entry->target_dev == devid) { ret = entry; - list_del(&ret->list); break; } } -- cgit v1.2.3 From 43f4960983a309568a6c4375f081e63fb2ff24a3 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 2 Dec 2008 21:01:12 +0100 Subject: AMD IOMMU: add iommu_flush_domain function Impact: add a function to flush a domain id on every IOMMU Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index b7b3067630cf..2b6b8e050bd8 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -352,6 +352,30 @@ static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid) iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1); } +#ifdef CONFIG_IOMMU_API +/* + * This function is used to flush the IO/TLB for a given protection domain + * on every IOMMU in the system + */ +static void iommu_flush_domain(u16 domid) +{ + unsigned long flags; + struct amd_iommu *iommu; + struct iommu_cmd cmd; + + __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, + domid, 1, 1); + + list_for_each_entry(iommu, &amd_iommu_list, list) { + spin_lock_irqsave(&iommu->lock, flags); + __iommu_queue_command(iommu, &cmd); + __iommu_completion_wait(iommu); + __iommu_wait_for_completion(iommu); + spin_unlock_irqrestore(&iommu->lock, flags); + } +} +#endif + /**************************************************************************** * * The functions below are used the create the page table mappings for -- cgit v1.2.3 From 9fdb19d64c0247f23343b51fc85f438f8e7a2f3c Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 2 Dec 2008 17:46:25 +0100 Subject: AMD IOMMU: add protection domain flags Imapct: add a new struct member to 'struct protection_domain' When using protection domains for dma_ops and KVM its better to know for which subsystem it was allocated. Add a flags member to struct protection domain for that purpose. Signed-off-by: Joerg Roedel --- arch/x86/include/asm/amd_iommu_types.h | 14 +++++++++----- arch/x86/kernel/amd_iommu.c | 1 + 2 files changed, 10 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index ac302a2fa339..4862a5be899c 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h @@ -190,16 +190,20 @@ /* FIXME: move this macro to */ #define PCI_BUS(x) (((x) >> 8) & 0xff) +/* Protection domain flags */ +#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ + /* * This structure contains generic data for IOMMU protection domains * independent of their use. */ struct protection_domain { - spinlock_t lock; /* mostly used to lock the page table*/ - u16 id; /* the domain id written to the device table */ - int mode; /* paging mode (0-6 levels) */ - u64 *pt_root; /* page table root pointer */ - void *priv; /* private data */ + spinlock_t lock; /* mostly used to lock the page table*/ + u16 id; /* the domain id written to the device table */ + int mode; /* paging mode (0-6 levels) */ + u64 *pt_root; /* page table root pointer */ + unsigned long flags; /* flags to find out type of domain */ + void *priv; /* private data */ }; /* diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 2b6b8e050bd8..bb28e2cda711 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -729,6 +729,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu, goto free_dma_dom; dma_dom->domain.mode = PAGE_MODE_3_LEVEL; dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); + dma_dom->domain.flags = PD_DMA_OPS_MASK; dma_dom->domain.priv = dma_dom; if (!dma_dom->domain.pt_root) goto free_dma_dom; -- cgit v1.2.3 From 5b28df6f43ac9878f310ad0cb7f11ddb262a7ac6 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 2 Dec 2008 17:49:42 +0100 Subject: AMD IOMMU: add checks for dma_ops domain to dma_ops functions Impact: detect when a driver uses a device assigned otherwise Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 41 +++++++++++++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 6 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index bb28e2cda711..5c465c91150e 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -791,6 +791,15 @@ free_dma_dom: return NULL; } +/* + * little helper function to check whether a given protection domain is a + * dma_ops domain + */ +static bool dma_ops_domain(struct protection_domain *domain) +{ + return domain->flags & PD_DMA_OPS_MASK; +} + /* * Find out the protection domain structure for a given PCI device. This * will give us the pointer to the page table root for example. @@ -1096,6 +1105,9 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, /* device not handled by any AMD IOMMU */ return (dma_addr_t)paddr; + if (!dma_ops_domain(domain)) + return bad_dma_address; + spin_lock_irqsave(&domain->lock, flags); addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false, dma_mask); @@ -1126,6 +1138,9 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr, /* device not handled by any AMD IOMMU */ return; + if (!dma_ops_domain(domain)) + return; + spin_lock_irqsave(&domain->lock, flags); __unmap_single(iommu, domain->priv, dma_addr, size, dir); @@ -1180,6 +1195,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, if (!iommu || !domain) return map_sg_no_iommu(dev, sglist, nelems, dir); + if (!dma_ops_domain(domain)) + return 0; + spin_lock_irqsave(&domain->lock, flags); for_each_sg(sglist, s, nelems, i) { @@ -1233,6 +1251,9 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, !get_device_resources(dev, &iommu, &domain, &devid)) return; + if (!dma_ops_domain(domain)) + return; + spin_lock_irqsave(&domain->lock, flags); for_each_sg(sglist, s, nelems, i) { @@ -1278,6 +1299,9 @@ static void *alloc_coherent(struct device *dev, size_t size, return virt_addr; } + if (!dma_ops_domain(domain)) + goto out_free; + if (!dma_mask) dma_mask = *dev->dma_mask; @@ -1286,18 +1310,20 @@ static void *alloc_coherent(struct device *dev, size_t size, *dma_addr = __map_single(dev, iommu, domain->priv, paddr, size, DMA_BIDIRECTIONAL, true, dma_mask); - if (*dma_addr == bad_dma_address) { - free_pages((unsigned long)virt_addr, get_order(size)); - virt_addr = NULL; - goto out; - } + if (*dma_addr == bad_dma_address) + goto out_free; iommu_completion_wait(iommu); -out: spin_unlock_irqrestore(&domain->lock, flags); return virt_addr; + +out_free: + + free_pages((unsigned long)virt_addr, get_order(size)); + + return NULL; } /* @@ -1319,6 +1345,9 @@ static void free_coherent(struct device *dev, size_t size, if (!iommu || !domain) goto free_mem; + if (!dma_ops_domain(domain)) + goto free_mem; + spin_lock_irqsave(&domain->lock, flags); __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); -- cgit v1.2.3 From 863c74ebd0152b21bc4b11c1447b5d1429287d37 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 2 Dec 2008 17:56:36 +0100 Subject: AMD IOMMU: add device reference counting for protection domains Impact: know how many devices are assigned to a domain Signed-off-by: Joerg Roedel --- arch/x86/include/asm/amd_iommu_types.h | 1 + arch/x86/kernel/amd_iommu.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 4862a5be899c..1c769f4e6cdf 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h @@ -203,6 +203,7 @@ struct protection_domain { int mode; /* paging mode (0-6 levels) */ u64 *pt_root; /* page table root pointer */ unsigned long flags; /* flags to find out type of domain */ + unsigned dev_cnt; /* devices assigned to this domain */ void *priv; /* private data */ }; diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 5c465c91150e..8b45bc49d1c5 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -825,9 +825,10 @@ static void set_device_domain(struct amd_iommu *iommu, u16 devid) { unsigned long flags; - u64 pte_root = virt_to_phys(domain->pt_root); + domain->dev_cnt += 1; + pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) << DEV_ENTRY_MODE_SHIFT; pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; -- cgit v1.2.3 From f1179dc005ee2b0e55c3f74f3552c3e9ef852265 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 10 Dec 2008 14:39:51 +0100 Subject: AMD IOMMU: rename set_device_domain function Impact: rename set_device_domain() to attach_device() Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 8b45bc49d1c5..12e8b67e5881 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -820,9 +820,9 @@ static struct protection_domain *domain_for_device(u16 devid) * If a device is not yet associated with a domain, this function does * assigns it visible for the hardware */ -static void set_device_domain(struct amd_iommu *iommu, - struct protection_domain *domain, - u16 devid) +static void attach_device(struct amd_iommu *iommu, + struct protection_domain *domain, + u16 devid) { unsigned long flags; u64 pte_root = virt_to_phys(domain->pt_root); @@ -929,14 +929,14 @@ static int get_device_resources(struct device *dev, if (!dma_dom) dma_dom = (*iommu)->default_dom; *domain = &dma_dom->domain; - set_device_domain(*iommu, *domain, *bdf); + attach_device(*iommu, *domain, *bdf); printk(KERN_INFO "AMD IOMMU: Using protection domain %d for " "device ", (*domain)->id); print_devid(_bdf, 1); } if (domain_for_device(_bdf) == NULL) - set_device_domain(*iommu, *domain, _bdf); + attach_device(*iommu, *domain, _bdf); return 1; } -- cgit v1.2.3 From 355bf553edb7fe21ada51f62c849180bec6da877 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 8 Dec 2008 12:02:41 +0100 Subject: AMD IOMMU: add device detach helper functions Impact: add helper functions to detach a device from a domain Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 12e8b67e5881..15456a3a18c8 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -844,6 +844,45 @@ static void attach_device(struct amd_iommu *iommu, iommu_queue_inv_dev_entry(iommu, devid); } +#ifdef CONFIG_IOMMU_API +/* + * Removes a device from a protection domain (unlocked) + */ +static void __detach_device(struct protection_domain *domain, u16 devid) +{ + + /* lock domain */ + spin_lock(&domain->lock); + + /* remove domain from the lookup table */ + amd_iommu_pd_table[devid] = NULL; + + /* remove entry from the device table seen by the hardware */ + amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV; + amd_iommu_dev_table[devid].data[1] = 0; + amd_iommu_dev_table[devid].data[2] = 0; + + /* decrease reference counter */ + domain->dev_cnt -= 1; + + /* ready */ + spin_unlock(&domain->lock); +} + +/* + * Removes a device from a protection domain (with devtable_lock held) + */ +static void detach_device(struct protection_domain *domain, u16 devid) +{ + unsigned long flags; + + /* lock device table */ + write_lock_irqsave(&amd_iommu_devtable_lock, flags); + __detach_device(domain, devid); + write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); +} +#endif + /***************************************************************************** * * The next functions belong to the dma_ops mapping/unmapping code. -- cgit v1.2.3 From e275a2a0fc9e2168b15f6c7814e30b7ad58b1c7c Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 10 Dec 2008 18:27:25 +0100 Subject: AMD IOMMU: add device notifier callback Impact: inform IOMMU about state change of a device in the driver core Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 62 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 60 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 15456a3a18c8..140875b3f6ef 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -47,6 +47,8 @@ struct iommu_cmd { static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, struct unity_map_entry *e); +static struct dma_ops_domain *find_protection_domain(u16 devid); + /* returns !0 if the IOMMU is caching non-present entries in its TLB */ static int iommu_has_npcache(struct amd_iommu *iommu) @@ -844,7 +846,6 @@ static void attach_device(struct amd_iommu *iommu, iommu_queue_inv_dev_entry(iommu, devid); } -#ifdef CONFIG_IOMMU_API /* * Removes a device from a protection domain (unlocked) */ @@ -881,7 +882,62 @@ static void detach_device(struct protection_domain *domain, u16 devid) __detach_device(domain, devid); write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); } -#endif + +static int device_change_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + struct pci_dev *pdev = to_pci_dev(dev); + u16 devid = calc_devid(pdev->bus->number, pdev->devfn); + struct protection_domain *domain; + struct dma_ops_domain *dma_domain; + struct amd_iommu *iommu; + + if (devid > amd_iommu_last_bdf) + goto out; + + devid = amd_iommu_alias_table[devid]; + + iommu = amd_iommu_rlookup_table[devid]; + if (iommu == NULL) + goto out; + + domain = domain_for_device(devid); + + if (domain && !dma_ops_domain(domain)) + WARN_ONCE(1, "AMD IOMMU WARNING: device %s already bound " + "to a non-dma-ops domain\n", dev_name(dev)); + + switch (action) { + case BUS_NOTIFY_BOUND_DRIVER: + if (domain) + goto out; + dma_domain = find_protection_domain(devid); + if (!dma_domain) + dma_domain = iommu->default_dom; + attach_device(iommu, &dma_domain->domain, devid); + printk(KERN_INFO "AMD IOMMU: Using protection domain %d for " + "device %s\n", dma_domain->domain.id, dev_name(dev)); + break; + case BUS_NOTIFY_UNBIND_DRIVER: + if (!domain) + goto out; + detach_device(domain, devid); + break; + default: + goto out; + } + + iommu_queue_inv_dev_entry(iommu, devid); + iommu_completion_wait(iommu); + +out: + return 0; +} + +struct notifier_block device_nb = { + .notifier_call = device_change_notifier, +}; /***************************************************************************** * @@ -1510,6 +1566,8 @@ int __init amd_iommu_init_dma_ops(void) /* Make the driver finally visible to the drivers */ dma_ops = &amd_iommu_dma_ops; + bus_register_notifier(&pci_bus_type, &device_nb); + return 0; free_domains: -- cgit v1.2.3 From 6d98cd8043c13438e4ca8a9464893f0224198a30 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 8 Dec 2008 12:05:55 +0100 Subject: AMD IOMMU: add domain cleanup helper function Impact: add a function to remove all devices from a domain Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 140875b3f6ef..5d3f085289eb 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -1579,3 +1579,31 @@ free_domains: return ret; } + +/***************************************************************************** + * + * The following functions belong to the exported interface of AMD IOMMU + * + * This interface allows access to lower level functions of the IOMMU + * like protection domain handling and assignement of devices to domains + * which is not possible with the dma_ops interface. + * + *****************************************************************************/ + +#ifdef CONFIG_IOMMU_API + +static void cleanup_domain(struct protection_domain *domain) +{ + unsigned long flags; + u16 devid; + + write_lock_irqsave(&amd_iommu_devtable_lock, flags); + + for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) + if (amd_iommu_pd_table[devid] == domain) + __detach_device(domain, devid); + + write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); +} + +#endif -- cgit v1.2.3 From c156e347d6d3c36b6843c3b168eda61b9a02c827 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 2 Dec 2008 18:13:27 +0100 Subject: AMD IOMMU: add domain init function for IOMMU API Impact: add a generic function for allocation protection domains Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 5d3f085289eb..6c0bd49cee5f 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -22,6 +22,9 @@ #include #include #include +#ifdef CONFIG_IOMMU_API +#include +#endif #include #include #include @@ -1606,4 +1609,31 @@ static void cleanup_domain(struct protection_domain *domain) write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); } +static int amd_iommu_domain_init(struct iommu_domain *dom) +{ + struct protection_domain *domain; + + domain = kzalloc(sizeof(*domain), GFP_KERNEL); + if (!domain) + return -ENOMEM; + + spin_lock_init(&domain->lock); + domain->mode = PAGE_MODE_3_LEVEL; + domain->id = domain_id_alloc(); + if (!domain->id) + goto out_free; + domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); + if (!domain->pt_root) + goto out_free; + + dom->priv = domain; + + return 0; + +out_free: + kfree(domain); + + return -ENOMEM; +} + #endif -- cgit v1.2.3 From 98383fc301c6546af0f3a8a1d3cb8bf218f7e940 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 2 Dec 2008 18:34:12 +0100 Subject: AMD IOMMU: add domain destroy function for IOMMU API Impact: add a generic function for releasing protection domains Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 6c0bd49cee5f..891d713d9c96 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -1636,4 +1636,25 @@ out_free: return -ENOMEM; } +static void amd_iommu_domain_destroy(struct iommu_domain *dom) +{ + struct protection_domain *domain = dom->priv; + + if (!domain) + return; + + if (domain->dev_cnt > 0) + cleanup_domain(domain); + + BUG_ON(domain->dev_cnt != 0); + + free_pagetable(domain); + + domain_id_free(domain->id); + + kfree(domain); + + dom->priv = NULL; +} + #endif -- cgit v1.2.3 From 684f2888847b896faafed87ce4733501d2cc283c Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 8 Dec 2008 12:07:44 +0100 Subject: AMD IOMMU: add device detach function for IOMMU API Impact: add a generic function to detach devices from protection domains Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 891d713d9c96..ef9b309e8e09 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -1657,4 +1657,30 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom) dom->priv = NULL; } +static void amd_iommu_detach_device(struct iommu_domain *dom, + struct device *dev) +{ + struct protection_domain *domain = dom->priv; + struct amd_iommu *iommu; + struct pci_dev *pdev; + u16 devid; + + if (dev->bus != &pci_bus_type) + return; + + pdev = to_pci_dev(dev); + + devid = calc_devid(pdev->bus->number, pdev->devfn); + + if (devid > 0) + detach_device(domain, devid); + + iommu = amd_iommu_rlookup_table[devid]; + if (!iommu) + return; + + iommu_queue_inv_dev_entry(iommu, devid); + iommu_completion_wait(iommu); +} + #endif -- cgit v1.2.3 From 01106066a6900b518debe990ddaadf376d433bd6 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 2 Dec 2008 19:34:11 +0100 Subject: AMD IOMMU: add device attach function for IOMMU API Impact: add a generic function to attach devices to protection domains Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index ef9b309e8e09..2f7c0b3a448b 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -1683,4 +1683,39 @@ static void amd_iommu_detach_device(struct iommu_domain *dom, iommu_completion_wait(iommu); } +static int amd_iommu_attach_device(struct iommu_domain *dom, + struct device *dev) +{ + struct protection_domain *domain = dom->priv; + struct protection_domain *old_domain; + struct amd_iommu *iommu; + struct pci_dev *pdev; + u16 devid; + + if (dev->bus != &pci_bus_type) + return -EINVAL; + + pdev = to_pci_dev(dev); + + devid = calc_devid(pdev->bus->number, pdev->devfn); + + if (devid >= amd_iommu_last_bdf || + devid != amd_iommu_alias_table[devid]) + return -EINVAL; + + iommu = amd_iommu_rlookup_table[devid]; + if (!iommu) + return -EINVAL; + + old_domain = domain_for_device(devid); + if (old_domain) + return -EBUSY; + + attach_device(iommu, domain, devid); + + iommu_completion_wait(iommu); + + return 0; +} + #endif -- cgit v1.2.3 From c6229ca649aa9b312d1f1de20af8d2603b14eead Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 2 Dec 2008 19:48:43 +0100 Subject: AMD IOMMU: add domain map function for IOMMU API Impact: add a generic function to map pages into protection domains Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 2f7c0b3a448b..1fcedbe39a99 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -1718,4 +1718,33 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, return 0; } +static int amd_iommu_map_range(struct iommu_domain *dom, + unsigned long iova, phys_addr_t paddr, + size_t size, int iommu_prot) +{ + struct protection_domain *domain = dom->priv; + unsigned long i, npages = iommu_num_pages(paddr, size, PAGE_SIZE); + int prot = 0; + int ret; + + if (iommu_prot & IOMMU_READ) + prot |= IOMMU_PROT_IR; + if (iommu_prot & IOMMU_WRITE) + prot |= IOMMU_PROT_IW; + + iova &= PAGE_MASK; + paddr &= PAGE_MASK; + + for (i = 0; i < npages; ++i) { + ret = iommu_map_page(domain, iova, paddr, prot); + if (ret) + return ret; + + iova += PAGE_SIZE; + paddr += PAGE_SIZE; + } + + return 0; +} + #endif -- cgit v1.2.3 From eb74ff6cc0080c7f6270fdfffba65c4eff23d3ad Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 2 Dec 2008 19:59:10 +0100 Subject: AMD IOMMU: add domain unmap function for IOMMU API Impact: add a generic function to unmap pages into protection domains Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 1fcedbe39a99..d8a0abf423ba 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -445,6 +445,30 @@ static int iommu_map_page(struct protection_domain *dom, return 0; } +#ifdef CONFIG_IOMMU_API +static void iommu_unmap_page(struct protection_domain *dom, + unsigned long bus_addr) +{ + u64 *pte; + + pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)]; + + if (!IOMMU_PTE_PRESENT(*pte)) + return; + + pte = IOMMU_PTE_PAGE(*pte); + pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)]; + + if (!IOMMU_PTE_PRESENT(*pte)) + return; + + pte = IOMMU_PTE_PAGE(*pte); + pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)]; + + *pte = 0; +} +#endif + /* * This function checks if a specific unity mapping entry is needed for * this specific IOMMU. @@ -1747,4 +1771,21 @@ static int amd_iommu_map_range(struct iommu_domain *dom, return 0; } +static void amd_iommu_unmap_range(struct iommu_domain *dom, + unsigned long iova, size_t size) +{ + + struct protection_domain *domain = dom->priv; + unsigned long i, npages = iommu_num_pages(iova, size, PAGE_SIZE); + + iova &= PAGE_MASK; + + for (i = 0; i < npages; ++i) { + iommu_unmap_page(domain, iova); + iova += PAGE_SIZE; + } + + iommu_flush_domain(domain->id); +} + #endif -- cgit v1.2.3 From 645c4c8d7289a718c9c828ec217f2b94e3b3e6ff Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 2 Dec 2008 20:05:50 +0100 Subject: AMD IOMMU: add domain address lookup function for IOMMU API Impact: add a generic function to lockup addresses in protection domains Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index d8a0abf423ba..b599e80051fc 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -1788,4 +1788,35 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom, iommu_flush_domain(domain->id); } +static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, + unsigned long iova) +{ + struct protection_domain *domain = dom->priv; + unsigned long offset = iova & ~PAGE_MASK; + phys_addr_t paddr; + u64 *pte; + + pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(iova)]; + + if (!IOMMU_PTE_PRESENT(*pte)) + return 0; + + pte = IOMMU_PTE_PAGE(*pte); + pte = &pte[IOMMU_PTE_L1_INDEX(iova)]; + + if (!IOMMU_PTE_PRESENT(*pte)) + return 0; + + pte = IOMMU_PTE_PAGE(*pte); + pte = &pte[IOMMU_PTE_L0_INDEX(iova)]; + + if (!IOMMU_PTE_PRESENT(*pte)) + return 0; + + paddr = *pte & IOMMU_PAGE_MASK; + paddr |= offset; + + return paddr; +} + #endif -- cgit v1.2.3 From 26961efe0dab9ca73f8fc3b6137b814252e04972 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 3 Dec 2008 17:00:17 +0100 Subject: AMD IOMMU: register functions for the IOMMU API Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index b599e80051fc..d9b651c01186 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -41,6 +41,10 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock); static LIST_HEAD(iommu_pd_list); static DEFINE_SPINLOCK(iommu_pd_list_lock); +#ifdef CONFIG_IOMMU_API +static struct iommu_ops amd_iommu_ops; +#endif + /* * general struct to manage commands send to an IOMMU */ @@ -1593,6 +1597,10 @@ int __init amd_iommu_init_dma_ops(void) /* Make the driver finally visible to the drivers */ dma_ops = &amd_iommu_dma_ops; +#ifdef CONFIG_IOMMU_API + register_iommu(&amd_iommu_ops); +#endif + bus_register_notifier(&pci_bus_type, &device_nb); return 0; @@ -1819,4 +1827,14 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, return paddr; } +static struct iommu_ops amd_iommu_ops = { + .domain_init = amd_iommu_domain_init, + .domain_destroy = amd_iommu_domain_destroy, + .attach_dev = amd_iommu_attach_device, + .detach_dev = amd_iommu_detach_device, + .map = amd_iommu_map_range, + .unmap = amd_iommu_unmap_range, + .iova_to_phys = amd_iommu_iova_to_phys, +}; + #endif -- cgit v1.2.3 From e2dc14a2a6c9a83baaafc51f06b7e73cec2167be Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 10 Dec 2008 18:48:59 +0100 Subject: AMD IOMMU: add a domain flag for default domains Impact: adds a new protection domain flag Signed-off-by: Joerg Roedel --- arch/x86/include/asm/amd_iommu_types.h | 2 ++ arch/x86/kernel/amd_iommu.c | 1 + 2 files changed, 3 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 1c769f4e6cdf..6adc7020ef97 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h @@ -192,6 +192,8 @@ /* Protection domain flags */ #define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ +#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops + domain for an IOMMU */ /* * This structure contains generic data for IOMMU protection domains diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index d9b651c01186..f2956546423b 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -1574,6 +1574,7 @@ int __init amd_iommu_init_dma_ops(void) iommu->default_dom = dma_ops_domain_alloc(iommu, order); if (iommu->default_dom == NULL) return -ENOMEM; + iommu->default_dom->domain.flags |= PD_DEFAULT_MASK; ret = iommu_init_unity_mappings(iommu); if (ret) goto free_domains; -- cgit v1.2.3 From 1ac4cbbc5eb56de96d264d10f464ba5222815b1b Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 10 Dec 2008 19:33:26 +0100 Subject: AMD IOMMU: allocate a new protection for hotplugged devices Impact: also hotplug devices benefit from device isolation Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index f2956546423b..f2260609eadc 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -923,6 +923,8 @@ static int device_change_notifier(struct notifier_block *nb, struct protection_domain *domain; struct dma_ops_domain *dma_domain; struct amd_iommu *iommu; + int order = amd_iommu_aperture_order; + unsigned long flags; if (devid > amd_iommu_last_bdf) goto out; @@ -954,6 +956,21 @@ static int device_change_notifier(struct notifier_block *nb, if (!domain) goto out; detach_device(domain, devid); + break; + case BUS_NOTIFY_ADD_DEVICE: + /* allocate a protection domain if a device is added */ + dma_domain = find_protection_domain(devid); + if (dma_domain) + goto out; + dma_domain = dma_ops_domain_alloc(iommu, order); + if (!dma_domain) + goto out; + dma_domain->target_dev = devid; + + spin_lock_irqsave(&iommu_pd_list_lock, flags); + list_add_tail(&dma_domain->list, &iommu_pd_list); + spin_unlock_irqrestore(&iommu_pd_list_lock, flags); + break; default: goto out; -- cgit v1.2.3 From ab896722867602eb0e836717e8b857ad513800d8 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 10 Dec 2008 19:43:07 +0100 Subject: AMD IOMMU: use dev_name instead of self-build print_devid Impact: use generic dev_name instead of own function Signed-off-by: Joerg Roedel --- arch/x86/include/asm/amd_iommu_types.h | 12 ------------ arch/x86/kernel/amd_iommu.c | 3 +-- 2 files changed, 1 insertion(+), 14 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 6adc7020ef97..ee8cfa00017d 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h @@ -389,18 +389,6 @@ extern int amd_iommu_isolate; */ extern bool amd_iommu_unmap_flush; -/* takes a PCI device id and prints it out in a readable form */ -static inline void print_devid(u16 devid, int nl) -{ - int bus = devid >> 8; - int dev = devid >> 3 & 0x1f; - int fn = devid & 0x07; - - printk("%02x:%02x.%x", bus, dev, fn); - if (nl) - printk("\n"); -} - /* takes bus and device/function and returns the device id * FIXME: should that be in generic PCI code? */ static inline u16 calc_devid(u8 bus, u8 devfn) diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index f2260609eadc..a53cf48d3be8 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -1074,8 +1074,7 @@ static int get_device_resources(struct device *dev, *domain = &dma_dom->domain; attach_device(*iommu, *domain, *bdf); printk(KERN_INFO "AMD IOMMU: Using protection domain %d for " - "device ", (*domain)->id); - print_devid(_bdf, 1); + "device %s\n", (*domain)->id, dev_name(dev)); } if (domain_for_device(_bdf) == NULL) -- cgit v1.2.3 From 0cfd7aa90be83a4d278810d231f9ef03f189b4f0 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 10 Dec 2008 19:58:00 +0100 Subject: AMD IOMMU: convert iommu->need_sync to bool Impact: use bool instead of int for iommu->need_sync Signed-off-by: Joerg Roedel --- arch/x86/include/asm/amd_iommu_types.h | 2 +- arch/x86/kernel/amd_iommu.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index ee8cfa00017d..c4b144ece1f8 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h @@ -302,7 +302,7 @@ struct amd_iommu { bool int_enabled; /* if one, we need to send a completion wait command */ - int need_sync; + bool need_sync; /* default dma_ops domain for that IOMMU */ struct dma_ops_domain *default_dom; diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index a53cf48d3be8..e410e97e5b0a 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -198,7 +198,7 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) spin_lock_irqsave(&iommu->lock, flags); ret = __iommu_queue_command(iommu, cmd); if (!ret) - iommu->need_sync = 1; + iommu->need_sync = true; spin_unlock_irqrestore(&iommu->lock, flags); return ret; @@ -263,7 +263,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu) ret = __iommu_completion_wait(iommu); - iommu->need_sync = 0; + iommu->need_sync = false; if (ret) goto out; -- cgit v1.2.3 From c226f853091577e665ebc02c064af4834d8d4f28 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 12 Dec 2008 13:53:54 +0100 Subject: AMD IOMMU: convert amd_iommu_isolate to bool Impact: cleanup Signed-off-by: Joerg Roedel --- arch/x86/include/asm/amd_iommu_types.h | 2 +- arch/x86/kernel/amd_iommu_init.c | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index c4b144ece1f8..7abf9cf0c1fe 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h @@ -381,7 +381,7 @@ extern struct protection_domain **amd_iommu_pd_table; extern unsigned long *amd_iommu_pd_alloc_bitmap; /* will be 1 if device isolation is enabled */ -extern int amd_iommu_isolate; +extern bool amd_iommu_isolate; /* * If true, the addresses will be flushed on unmap time, not when diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index c625800c55ca..47e163b4431e 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c @@ -122,7 +122,8 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings we find in ACPI */ unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */ -int amd_iommu_isolate = 1; /* if 1, device isolation is enabled */ +bool amd_iommu_isolate = true; /* if true, device isolation is + enabled */ bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the @@ -1218,9 +1219,9 @@ static int __init parse_amd_iommu_options(char *str) { for (; *str; ++str) { if (strncmp(str, "isolate", 7) == 0) - amd_iommu_isolate = 1; + amd_iommu_isolate = true; if (strncmp(str, "share", 5) == 0) - amd_iommu_isolate = 0; + amd_iommu_isolate = false; if (strncmp(str, "fullflush", 9) == 0) amd_iommu_unmap_flush = true; } -- cgit v1.2.3 From edcb34da259c503a2ffd37e51a658672ba3bc7a2 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 10 Dec 2008 20:01:45 +0100 Subject: AMD IOMMU: use calc_devid in prealloc_protection_domains Impact: cleanup Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index e410e97e5b0a..3011ea7a3f82 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -1543,7 +1543,7 @@ void prealloc_protection_domains(void) u16 devid; while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { - devid = (dev->bus->number << 8) | dev->devfn; + devid = calc_devid(dev->bus->number, dev->devfn); if (devid > amd_iommu_last_bdf) continue; devid = amd_iommu_alias_table[devid]; -- cgit v1.2.3 From a4e267c88b4acfc87ff2ab0cc8e9509878e9aaba Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 10 Dec 2008 20:04:18 +0100 Subject: AMD IOMMU: use dev_name in iommu_enable function Impact: cleanup Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu_init.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 47e163b4431e..be81f6125c51 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c @@ -246,12 +246,8 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit) /* Function to enable the hardware */ void __init iommu_enable(struct amd_iommu *iommu) { - printk(KERN_INFO "AMD IOMMU: Enabling IOMMU " - "at %02x:%02x.%x cap 0x%hx\n", - iommu->dev->bus->number, - PCI_SLOT(iommu->dev->devfn), - PCI_FUNC(iommu->dev->devfn), - iommu->cap_ptr); + printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at %s cap 0x%hx\n", + dev_name(&iommu->dev->dev), iommu->cap_ptr); iommu_feature_enable(iommu, CONTROL_IOMMU_EN); } -- cgit v1.2.3 From 2e117604a4e8f3f9cee4aec3373b0382159e152a Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 11 Dec 2008 19:00:12 +0100 Subject: AMD IOMMU: add Kconfig entry for statistic collection code Impact: adds new Kconfig entry Signed-off-by: Joerg Roedel --- arch/x86/Kconfig | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 249d1e0824b5..f9998d276eb3 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -586,6 +586,16 @@ config AMD_IOMMU your BIOS for an option to enable it or if you have an IVRS ACPI table. +config AMD_IOMMU_STATS + bool "Export AMD IOMMU statistics to debugfs" + depends on AMD_IOMMU + select DEBUG_FS + help + This option enables code in the AMD IOMMU driver to collect various + statistics about whats happening in the driver and exports that + information to userspace via debugfs. + If unsure, say N. + # need this always selected by IOMMU for the VIA workaround config SWIOTLB def_bool y if X86_64 -- cgit v1.2.3 From a9dddbe0497ab0df7ee729e8d4cb0ee2dec3e4ba Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 12 Dec 2008 12:33:06 +0100 Subject: AMD IOMMU: add necessary header defines for stats counting Impact: add defines to make iommu stats collection configurable Signed-off-by: Joerg Roedel --- arch/x86/include/asm/amd_iommu_types.h | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 7abf9cf0c1fe..1379c5fe86d0 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h @@ -396,4 +396,30 @@ static inline u16 calc_devid(u8 bus, u8 devfn) return (((u16)bus) << 8) | devfn; } +#ifdef CONFIG_AMD_IOMMU_STATS + +struct __iommu_counter { + char *name; + struct dentry *dent; + u64 value; +}; + +#define DECLARE_STATS_COUNTER(nm) \ + static struct __iommu_counter nm = { \ + .name = #nm, \ + } + +#define INC_STATS_COUNTER(name) name.value += 1 +#define ADD_STATS_COUNTER(name, x) name.value += (x) +#define SUB_STATS_COUNTER(name, x) name.value -= (x) + +#else /* CONFIG_AMD_IOMMU_STATS */ + +#define DECLARE_STATS_COUNTER(name) +#define INC_STATS_COUNTER(name) +#define ADD_STATS_COUNTER(name, x) +#define SUB_STATS_COUNTER(name, x) + +#endif /* CONFIG_AMD_IOMMU_STATS */ + #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ -- cgit v1.2.3 From 7f26508bbb76ce86aad1130ef6b7f1a4bb7de0c2 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 12 Dec 2008 13:50:21 +0100 Subject: AMD IOMMU: add init code for statistic collection Impact: create a new debugfs directory Signed-off-by: Joerg Roedel --- arch/x86/include/asm/amd_iommu_types.h | 2 ++ arch/x86/kernel/amd_iommu.c | 37 ++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 1379c5fe86d0..95c8cd9d22b5 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h @@ -420,6 +420,8 @@ struct __iommu_counter { #define ADD_STATS_COUNTER(name, x) #define SUB_STATS_COUNTER(name, x) +static inline void amd_iommu_stats_init(void) { } + #endif /* CONFIG_AMD_IOMMU_STATS */ #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 3011ea7a3f82..f98f70626bc6 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #ifdef CONFIG_IOMMU_API @@ -57,6 +58,40 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, static struct dma_ops_domain *find_protection_domain(u16 devid); +#ifdef CONFIG_AMD_IOMMU_STATS + +/* + * Initialization code for statistics collection + */ + +static struct dentry *stats_dir; +static struct dentry *de_isolate; +static struct dentry *de_fflush; + +static void amd_iommu_stats_add(struct __iommu_counter *cnt) +{ + if (stats_dir == NULL) + return; + + cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir, + &cnt->value); +} + +static void amd_iommu_stats_init(void) +{ + stats_dir = debugfs_create_dir("amd-iommu", NULL); + if (stats_dir == NULL) + return; + + de_isolate = debugfs_create_bool("isolation", 0444, stats_dir, + (u32 *)&amd_iommu_isolate); + + de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir, + (u32 *)&amd_iommu_unmap_flush); +} + +#endif + /* returns !0 if the IOMMU is caching non-present entries in its TLB */ static int iommu_has_npcache(struct amd_iommu *iommu) { @@ -1620,6 +1655,8 @@ int __init amd_iommu_init_dma_ops(void) bus_register_notifier(&pci_bus_type, &device_nb); + amd_iommu_stats_init(); + return 0; free_domains: -- cgit v1.2.3 From da49f6df726ecaaee87757e8b65a560679d32f99 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 12 Dec 2008 14:59:58 +0100 Subject: AMD IOMMU: add stats counter for completion wait events Impact: see number of completion wait events in debugfs Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index f98f70626bc6..b21435748e0d 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -64,6 +64,8 @@ static struct dma_ops_domain *find_protection_domain(u16 devid); * Initialization code for statistics collection */ +DECLARE_STATS_COUNTER(compl_wait); + static struct dentry *stats_dir; static struct dentry *de_isolate; static struct dentry *de_fflush; @@ -88,6 +90,8 @@ static void amd_iommu_stats_init(void) de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir, (u32 *)&amd_iommu_unmap_flush); + + amd_iommu_stats_add(&compl_wait); } #endif @@ -249,6 +253,8 @@ static void __iommu_wait_for_completion(struct amd_iommu *iommu) unsigned status = 0; unsigned long i = 0; + INC_STATS_COUNTER(compl_wait); + while (!ready && (i < EXIT_LOOP_COUNT)) { ++i; /* wait for the bit to become one */ -- cgit v1.2.3 From 0f2a86f200bc97ae6cefc5d3ac879094b3fcde48 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 12 Dec 2008 15:05:16 +0100 Subject: AMD IOMMU: add stats counter for map_single requests Impact: see number of map_single requests in debugfs Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index b21435748e0d..ef377865eb76 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -65,6 +65,7 @@ static struct dma_ops_domain *find_protection_domain(u16 devid); */ DECLARE_STATS_COUNTER(compl_wait); +DECLARE_STATS_COUNTER(cnt_map_single); static struct dentry *stats_dir; static struct dentry *de_isolate; @@ -92,6 +93,7 @@ static void amd_iommu_stats_init(void) (u32 *)&amd_iommu_unmap_flush); amd_iommu_stats_add(&compl_wait); + amd_iommu_stats_add(&cnt_map_single); } #endif @@ -1278,6 +1280,8 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, dma_addr_t addr; u64 dma_mask; + INC_STATS_COUNTER(cnt_map_single); + if (!check_device(dev)) return bad_dma_address; -- cgit v1.2.3 From 146a6917fc30616401a090f55cff2b855ee5b2ab Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 12 Dec 2008 15:07:12 +0100 Subject: AMD IOMMU: add stats counter for unmap_single requests Impact: see number of unmap_single requests in debugfs Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index ef377865eb76..71646c81421a 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -66,6 +66,7 @@ static struct dma_ops_domain *find_protection_domain(u16 devid); DECLARE_STATS_COUNTER(compl_wait); DECLARE_STATS_COUNTER(cnt_map_single); +DECLARE_STATS_COUNTER(cnt_unmap_single); static struct dentry *stats_dir; static struct dentry *de_isolate; @@ -94,6 +95,7 @@ static void amd_iommu_stats_init(void) amd_iommu_stats_add(&compl_wait); amd_iommu_stats_add(&cnt_map_single); + amd_iommu_stats_add(&cnt_unmap_single); } #endif @@ -1321,6 +1323,8 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr, struct protection_domain *domain; u16 devid; + INC_STATS_COUNTER(cnt_unmap_single); + if (!check_device(dev) || !get_device_resources(dev, &iommu, &domain, &devid)) /* device not handled by any AMD IOMMU */ -- cgit v1.2.3 From d03f067a9d0a1cc09529427af9a15e15d32ba1de Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 12 Dec 2008 15:09:48 +0100 Subject: AMD IOMMU: add stats counter for map_sg requests Impact: see number of map_sg requests in debugfs Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 71646c81421a..859ca741745b 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -67,6 +67,7 @@ static struct dma_ops_domain *find_protection_domain(u16 devid); DECLARE_STATS_COUNTER(compl_wait); DECLARE_STATS_COUNTER(cnt_map_single); DECLARE_STATS_COUNTER(cnt_unmap_single); +DECLARE_STATS_COUNTER(cnt_map_sg); static struct dentry *stats_dir; static struct dentry *de_isolate; @@ -96,6 +97,7 @@ static void amd_iommu_stats_init(void) amd_iommu_stats_add(&compl_wait); amd_iommu_stats_add(&cnt_map_single); amd_iommu_stats_add(&cnt_unmap_single); + amd_iommu_stats_add(&cnt_map_sg); } #endif @@ -1377,6 +1379,8 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, int mapped_elems = 0; u64 dma_mask; + INC_STATS_COUNTER(cnt_map_sg); + if (!check_device(dev)) return 0; -- cgit v1.2.3 From 55877a6bcdf0843414eecc658550c6551f5b5e1d Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 12 Dec 2008 15:12:14 +0100 Subject: AMD IOMMU: add stats counter for unmap_sg requests Impact: see number of unmap_sg requests in debugfs Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 859ca741745b..49f2c8533e12 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -68,6 +68,7 @@ DECLARE_STATS_COUNTER(compl_wait); DECLARE_STATS_COUNTER(cnt_map_single); DECLARE_STATS_COUNTER(cnt_unmap_single); DECLARE_STATS_COUNTER(cnt_map_sg); +DECLARE_STATS_COUNTER(cnt_unmap_sg); static struct dentry *stats_dir; static struct dentry *de_isolate; @@ -98,6 +99,7 @@ static void amd_iommu_stats_init(void) amd_iommu_stats_add(&cnt_map_single); amd_iommu_stats_add(&cnt_unmap_single); amd_iommu_stats_add(&cnt_map_sg); + amd_iommu_stats_add(&cnt_unmap_sg); } #endif @@ -1443,6 +1445,8 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, u16 devid; int i; + INC_STATS_COUNTER(cnt_unmap_sg); + if (!check_device(dev) || !get_device_resources(dev, &iommu, &domain, &devid)) return; -- cgit v1.2.3 From c8f0fb36bffa9e21d214a2910b825567d52bfc2c Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 12 Dec 2008 15:14:21 +0100 Subject: AMD IOMMU: add stats counter for alloc_coherent requests Impact: see number of alloc_coherent requests in debugfs Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 49f2c8533e12..ecc89f8857b6 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -69,6 +69,7 @@ DECLARE_STATS_COUNTER(cnt_map_single); DECLARE_STATS_COUNTER(cnt_unmap_single); DECLARE_STATS_COUNTER(cnt_map_sg); DECLARE_STATS_COUNTER(cnt_unmap_sg); +DECLARE_STATS_COUNTER(cnt_alloc_coherent); static struct dentry *stats_dir; static struct dentry *de_isolate; @@ -100,6 +101,7 @@ static void amd_iommu_stats_init(void) amd_iommu_stats_add(&cnt_unmap_single); amd_iommu_stats_add(&cnt_map_sg); amd_iommu_stats_add(&cnt_unmap_sg); + amd_iommu_stats_add(&cnt_alloc_coherent); } #endif @@ -1481,6 +1483,8 @@ static void *alloc_coherent(struct device *dev, size_t size, phys_addr_t paddr; u64 dma_mask = dev->coherent_dma_mask; + INC_STATS_COUNTER(cnt_alloc_coherent); + if (!check_device(dev)) return NULL; -- cgit v1.2.3 From 5d31ee7e08b7713596b999a42e67491bdf3665b3 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 12 Dec 2008 15:16:38 +0100 Subject: AMD IOMMU: add stats counter for free_coherent requests Impact: see number of free_coherent requests in debugfs Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index ecc89f8857b6..112412d733ab 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -70,6 +70,7 @@ DECLARE_STATS_COUNTER(cnt_unmap_single); DECLARE_STATS_COUNTER(cnt_map_sg); DECLARE_STATS_COUNTER(cnt_unmap_sg); DECLARE_STATS_COUNTER(cnt_alloc_coherent); +DECLARE_STATS_COUNTER(cnt_free_coherent); static struct dentry *stats_dir; static struct dentry *de_isolate; @@ -102,6 +103,7 @@ static void amd_iommu_stats_init(void) amd_iommu_stats_add(&cnt_map_sg); amd_iommu_stats_add(&cnt_unmap_sg); amd_iommu_stats_add(&cnt_alloc_coherent); + amd_iommu_stats_add(&cnt_free_coherent); } #endif @@ -1541,6 +1543,8 @@ static void free_coherent(struct device *dev, size_t size, struct protection_domain *domain; u16 devid; + INC_STATS_COUNTER(cnt_free_coherent); + if (!check_device(dev)) return; -- cgit v1.2.3 From c1858976f5ef05122bb671f678beaf7e1fe1dd74 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 12 Dec 2008 15:42:39 +0100 Subject: AMD IOMMU: add stats counter for cross-page request Impact: see number of requests for more than one page in debugfs Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 112412d733ab..f5455039b609 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -71,6 +71,7 @@ DECLARE_STATS_COUNTER(cnt_map_sg); DECLARE_STATS_COUNTER(cnt_unmap_sg); DECLARE_STATS_COUNTER(cnt_alloc_coherent); DECLARE_STATS_COUNTER(cnt_free_coherent); +DECLARE_STATS_COUNTER(cross_page); static struct dentry *stats_dir; static struct dentry *de_isolate; @@ -104,6 +105,7 @@ static void amd_iommu_stats_init(void) amd_iommu_stats_add(&cnt_unmap_sg); amd_iommu_stats_add(&cnt_alloc_coherent); amd_iommu_stats_add(&cnt_free_coherent); + amd_iommu_stats_add(&cross_page); } #endif @@ -1217,6 +1219,9 @@ static dma_addr_t __map_single(struct device *dev, pages = iommu_num_pages(paddr, size, PAGE_SIZE); paddr &= PAGE_MASK; + if (pages > 1) + INC_STATS_COUNTER(cross_page); + if (align) align_mask = (1UL << get_order(size)) - 1; -- cgit v1.2.3 From f57d98ae6979f7bcbf758023b4716f485385f903 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 12 Dec 2008 15:46:29 +0100 Subject: AMD IOMMU: add stats counter for single iommu domain tlb flushes Impact: see number of single iommu domain tlb flushes in debugfs Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index f5455039b609..e99022d3a394 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -72,6 +72,7 @@ DECLARE_STATS_COUNTER(cnt_unmap_sg); DECLARE_STATS_COUNTER(cnt_alloc_coherent); DECLARE_STATS_COUNTER(cnt_free_coherent); DECLARE_STATS_COUNTER(cross_page); +DECLARE_STATS_COUNTER(domain_flush_single); static struct dentry *stats_dir; static struct dentry *de_isolate; @@ -106,6 +107,7 @@ static void amd_iommu_stats_init(void) amd_iommu_stats_add(&cnt_alloc_coherent); amd_iommu_stats_add(&cnt_free_coherent); amd_iommu_stats_add(&cross_page); + amd_iommu_stats_add(&domain_flush_single); } #endif @@ -413,6 +415,8 @@ static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid) { u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; + INC_STATS_COUNTER(domain_flush_single); + iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1); } -- cgit v1.2.3 From 18811f55d48e5f3ee70c4744c592f940022fa592 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 12 Dec 2008 15:48:28 +0100 Subject: AMD IOMMU: add stats counter for domain tlb flushes Impact: see number of domain tlb flushes in debugfs Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index e99022d3a394..a897c7246dca 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -73,6 +73,7 @@ DECLARE_STATS_COUNTER(cnt_alloc_coherent); DECLARE_STATS_COUNTER(cnt_free_coherent); DECLARE_STATS_COUNTER(cross_page); DECLARE_STATS_COUNTER(domain_flush_single); +DECLARE_STATS_COUNTER(domain_flush_all); static struct dentry *stats_dir; static struct dentry *de_isolate; @@ -108,6 +109,7 @@ static void amd_iommu_stats_init(void) amd_iommu_stats_add(&cnt_free_coherent); amd_iommu_stats_add(&cross_page); amd_iommu_stats_add(&domain_flush_single); + amd_iommu_stats_add(&domain_flush_all); } #endif @@ -431,6 +433,8 @@ static void iommu_flush_domain(u16 domid) struct amd_iommu *iommu; struct iommu_cmd cmd; + INC_STATS_COUNTER(domain_flush_all); + __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, domid, 1, 1); -- cgit v1.2.3 From 5774f7c5fef2526bfa58eab628fbe91dce5e07b1 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 12 Dec 2008 15:57:30 +0100 Subject: AMD IOMMU: add statistics about allocated io memory Impact: see amount of allocated io memory in debugfs Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index a897c7246dca..69f367b033a2 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -74,6 +74,7 @@ DECLARE_STATS_COUNTER(cnt_free_coherent); DECLARE_STATS_COUNTER(cross_page); DECLARE_STATS_COUNTER(domain_flush_single); DECLARE_STATS_COUNTER(domain_flush_all); +DECLARE_STATS_COUNTER(alloced_io_mem); static struct dentry *stats_dir; static struct dentry *de_isolate; @@ -110,6 +111,7 @@ static void amd_iommu_stats_init(void) amd_iommu_stats_add(&cross_page); amd_iommu_stats_add(&domain_flush_single); amd_iommu_stats_add(&domain_flush_all); + amd_iommu_stats_add(&alloced_io_mem); } #endif @@ -1246,6 +1248,8 @@ static dma_addr_t __map_single(struct device *dev, } address += offset; + ADD_STATS_COUNTER(alloced_io_mem, size); + if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { iommu_flush_tlb(iommu, dma_dom->domain.id); dma_dom->need_flush = false; @@ -1282,6 +1286,8 @@ static void __unmap_single(struct amd_iommu *iommu, start += PAGE_SIZE; } + SUB_STATS_COUNTER(alloced_io_mem, size); + dma_ops_free_addresses(dma_dom, dma_addr, pages); if (amd_iommu_unmap_flush || dma_dom->need_flush) { -- cgit v1.2.3 From 8ecaf8f19f0f0627d6ac6d69ed9472e7d307f35b Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 12 Dec 2008 16:13:04 +0100 Subject: AMD IOMMU: add statistics about total number of map requests Impact: see total number of map requests in debugfs Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 69f367b033a2..0c504b207bf9 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -75,6 +75,7 @@ DECLARE_STATS_COUNTER(cross_page); DECLARE_STATS_COUNTER(domain_flush_single); DECLARE_STATS_COUNTER(domain_flush_all); DECLARE_STATS_COUNTER(alloced_io_mem); +DECLARE_STATS_COUNTER(total_map_requests); static struct dentry *stats_dir; static struct dentry *de_isolate; @@ -112,6 +113,7 @@ static void amd_iommu_stats_init(void) amd_iommu_stats_add(&domain_flush_single); amd_iommu_stats_add(&domain_flush_all); amd_iommu_stats_add(&alloced_io_mem); + amd_iommu_stats_add(&total_map_requests); } #endif @@ -1229,6 +1231,8 @@ static dma_addr_t __map_single(struct device *dev, pages = iommu_num_pages(paddr, size, PAGE_SIZE); paddr &= PAGE_MASK; + INC_STATS_COUNTER(total_map_requests); + if (pages > 1) INC_STATS_COUNTER(cross_page); -- cgit v1.2.3 From 0e93dd883537e628b809a2120854cd591c8935f1 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Mon, 29 Dec 2008 21:45:22 +0530 Subject: AMD IOMMU: prealloc_protection_domains should be static Impact: cleanup, reduce kernel size a bit, avoid sparse warning Fixes sparse warning: arch/x86/kernel/amd_iommu.c:1299:6: warning: symbol 'prealloc_protection_domains' was not declared. Should it be static? Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 0c504b207bf9..881c68ffdf2e 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -1622,7 +1622,7 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask) * we don't need to preallocate the protection domains anymore. * For now we have to. */ -void prealloc_protection_domains(void) +static void prealloc_protection_domains(void) { struct pci_dev *dev = NULL; struct dma_ops_domain *dma_dom; -- cgit v1.2.3 From 065a6d68c71af2a3bdd080fa5aa353b76eede8f5 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Sat, 3 Jan 2009 14:16:35 +0100 Subject: AMD IOMMU: remove now unnecessary #ifdefs The #ifdef's are no longer necessary when the iommu-api and the amd iommu updates are merged together. Signed-off-by: Joerg Roedel --- arch/x86/kernel/amd_iommu.c | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 881c68ffdf2e..5113c080f0c4 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -426,7 +426,6 @@ static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid) iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1); } -#ifdef CONFIG_IOMMU_API /* * This function is used to flush the IO/TLB for a given protection domain * on every IOMMU in the system @@ -450,7 +449,6 @@ static void iommu_flush_domain(u16 domid) spin_unlock_irqrestore(&iommu->lock, flags); } } -#endif /**************************************************************************** * @@ -516,7 +514,6 @@ static int iommu_map_page(struct protection_domain *dom, return 0; } -#ifdef CONFIG_IOMMU_API static void iommu_unmap_page(struct protection_domain *dom, unsigned long bus_addr) { @@ -538,7 +535,6 @@ static void iommu_unmap_page(struct protection_domain *dom, *pte = 0; } -#endif /* * This function checks if a specific unity mapping entry is needed for @@ -723,7 +719,6 @@ static u16 domain_id_alloc(void) return id; } -#ifdef CONFIG_IOMMU_API static void domain_id_free(int id) { unsigned long flags; @@ -733,7 +728,6 @@ static void domain_id_free(int id) __clear_bit(id, amd_iommu_pd_alloc_bitmap); write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); } -#endif /* * Used to reserve address ranges in the aperture (e.g. for exclusion @@ -1702,9 +1696,7 @@ int __init amd_iommu_init_dma_ops(void) /* Make the driver finally visible to the drivers */ dma_ops = &amd_iommu_dma_ops; -#ifdef CONFIG_IOMMU_API register_iommu(&amd_iommu_ops); -#endif bus_register_notifier(&pci_bus_type, &device_nb); @@ -1732,8 +1724,6 @@ free_domains: * *****************************************************************************/ -#ifdef CONFIG_IOMMU_API - static void cleanup_domain(struct protection_domain *domain) { unsigned long flags; @@ -1944,4 +1934,3 @@ static struct iommu_ops amd_iommu_ops = { .iova_to_phys = amd_iommu_iova_to_phys, }; -#endif -- cgit v1.2.3 From 730cf27246225d56ca1603b2f3c4fdbf882d4e51 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Wed, 31 Dec 2008 18:08:45 -0800 Subject: x86: enable cpus display of kernel_max and offlined cpus Impact: enables /sys/devices/system/cpu/{kernel_max,offline} user interface By setting total_cpus, the drivers/base/cpu.c will display the values of kernel_max (NR_CPUS-1) and the offlined cpu map. Signed-off-by: Mike Travis Acked-by: Rusty Russell Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 9e177a4077ee..f49c26bd7e2d 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1298,6 +1298,8 @@ __init void prefill_possible_map(void) else possible = setup_possible_cpus; + total_cpus = max_t(int, possible, num_processors + disabled_cpus); + if (possible > CONFIG_NR_CPUS) { printk(KERN_WARNING "%d Processors exceeds NR_CPUS limit of %d\n", -- cgit v1.2.3 From 9628937d5b37169151c5f6bbd40919c6ac958a46 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Wed, 31 Dec 2008 18:08:46 -0800 Subject: x86: cleanup some remaining usages of NR_CPUS where s/b nr_cpu_ids Impact: Reduce future system panics due to cpumask operations using NR_CPUS Insure that code does not look at bits >= nr_cpu_ids as when cpumasks are allocated based on nr_cpu_ids, these extra bits will not be defined. Also some other minor updates: * change in to use cpu accessor function set_cpu_present() instead of directly accessing cpu_present_map w/cpu_clear() [arch/x86/kernel/reboot.c] * use cpumask_of() instead of &cpumask_of_cpu() [arch/x86/kernel/reboot.c] * optimize some cpu_mask_to_apicid_and functions. Signed-off-by: Mike Travis Acked-by: Rusty Russell Signed-off-by: Ingo Molnar --- arch/x86/include/asm/es7000/apic.h | 32 +++------------------------- arch/x86/include/asm/lguest.h | 2 +- arch/x86/include/asm/numaq/apic.h | 4 ++-- arch/x86/include/asm/summit/apic.h | 42 +++++++------------------------------ arch/x86/kernel/acpi/boot.c | 2 +- arch/x86/kernel/apic.c | 4 ++-- arch/x86/kernel/cpu/common.c | 2 +- arch/x86/kernel/cpuid.c | 2 +- arch/x86/kernel/msr.c | 2 +- arch/x86/kernel/reboot.c | 4 ++-- arch/x86/kernel/smpboot.c | 2 +- arch/x86/mach-voyager/voyager_smp.c | 7 +++---- 12 files changed, 26 insertions(+), 79 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index 51ac1230294e..bc53d5ef1386 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -157,7 +157,7 @@ cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) num_bits_set = cpumask_weight(cpumask); /* Return id to all */ - if (num_bits_set == NR_CPUS) + if (num_bits_set == nr_cpu_ids) return 0xFF; /* * The cpus in the mask must all be on the apic cluster. If are not @@ -190,7 +190,7 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) num_bits_set = cpus_weight(*cpumask); /* Return id to all */ - if (num_bits_set == NR_CPUS) + if (num_bits_set == nr_cpu_ids) return cpu_to_logical_apicid(0); /* * The cpus in the mask must all be on the apic cluster. If are not @@ -218,9 +218,6 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, const struct cpumask *andmask) { - int num_bits_set; - int cpus_found = 0; - int cpu; int apicid = cpu_to_logical_apicid(0); cpumask_var_t cpumask; @@ -229,31 +226,8 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, cpumask_and(cpumask, inmask, andmask); cpumask_and(cpumask, cpumask, cpu_online_mask); + apicid = cpu_mask_to_apicid(cpumask); - num_bits_set = cpumask_weight(cpumask); - /* Return id to all */ - if (num_bits_set == NR_CPUS) - goto exit; - /* - * The cpus in the mask must all be on the apic cluster. If are not - * on the same apicid cluster return default value of TARGET_CPUS. - */ - cpu = cpumask_first(cpumask); - apicid = cpu_to_logical_apicid(cpu); - while (cpus_found < num_bits_set) { - if (cpumask_test_cpu(cpu, cpumask)) { - int new_apicid = cpu_to_logical_apicid(cpu); - if (apicid_cluster(apicid) != - apicid_cluster(new_apicid)){ - printk ("%s: Not a valid mask!\n", __func__); - return cpu_to_logical_apicid(0); - } - apicid = new_apicid; - cpus_found++; - } - cpu++; - } -exit: free_cpumask_var(cpumask); return apicid; } diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h index d28a507cef39..1caf57628b9c 100644 --- a/arch/x86/include/asm/lguest.h +++ b/arch/x86/include/asm/lguest.h @@ -15,7 +15,7 @@ #define SHARED_SWITCHER_PAGES \ DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE) /* Pages for switcher itself, then two pages per cpu */ -#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * NR_CPUS) +#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids) /* We map at -4M for ease of mapping into the guest (one PTE page). */ #define SWITCHER_ADDR 0xFFC00000 diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index c80f00d29965..bf37bc49bd8e 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h @@ -63,8 +63,8 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) extern u8 cpu_2_logical_apicid[]; static inline int cpu_to_logical_apicid(int cpu) { - if (cpu >= NR_CPUS) - return BAD_APICID; + if (cpu >= nr_cpu_ids) + return BAD_APICID; return (int)cpu_2_logical_apicid[cpu]; } diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 99327d1be49f..4bb5fb34f030 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -52,7 +52,7 @@ static inline void init_apic_ldr(void) int i; /* Create logical APIC IDs by counting CPUs already in cluster. */ - for (count = 0, i = NR_CPUS; --i >= 0; ) { + for (count = 0, i = nr_cpu_ids; --i >= 0; ) { lid = cpu_2_logical_apicid[i]; if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) ++count; @@ -97,8 +97,8 @@ static inline int apicid_to_node(int logical_apicid) static inline int cpu_to_logical_apicid(int cpu) { #ifdef CONFIG_SMP - if (cpu >= NR_CPUS) - return BAD_APICID; + if (cpu >= nr_cpu_ids) + return BAD_APICID; return (int)cpu_2_logical_apicid[cpu]; #else return logical_smp_processor_id(); @@ -107,7 +107,7 @@ static inline int cpu_to_logical_apicid(int cpu) static inline int cpu_present_to_apicid(int mps_cpu) { - if (mps_cpu < NR_CPUS) + if (mps_cpu < nr_cpu_ids) return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); else return BAD_APICID; @@ -146,7 +146,7 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) num_bits_set = cpus_weight(*cpumask); /* Return id to all */ - if (num_bits_set == NR_CPUS) + if (num_bits_set >= nr_cpu_ids) return (int) 0xFF; /* * The cpus in the mask must all be on the apic cluster. If are not @@ -173,42 +173,16 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, const struct cpumask *andmask) { - int num_bits_set; - int cpus_found = 0; - int cpu; - int apicid = 0xFF; + int apicid = cpu_to_logical_apicid(0); cpumask_var_t cpumask; if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) - return (int) 0xFF; + return apicid; cpumask_and(cpumask, inmask, andmask); cpumask_and(cpumask, cpumask, cpu_online_mask); + apicid = cpu_mask_to_apicid(cpumask); - num_bits_set = cpumask_weight(cpumask); - /* Return id to all */ - if (num_bits_set == nr_cpu_ids) - goto exit; - /* - * The cpus in the mask must all be on the apic cluster. If are not - * on the same apicid cluster return default value of TARGET_CPUS. - */ - cpu = cpumask_first(cpumask); - apicid = cpu_to_logical_apicid(cpu); - while (cpus_found < num_bits_set) { - if (cpumask_test_cpu(cpu, cpumask)) { - int new_apicid = cpu_to_logical_apicid(cpu); - if (apicid_cluster(apicid) != - apicid_cluster(new_apicid)){ - printk ("%s: Not a valid mask!\n", __func__); - return 0xFF; - } - apicid = apicid | new_apicid; - cpus_found++; - } - cpu++; - } -exit: free_cpumask_var(cpumask); return apicid; } diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 65d0b72777ea..fd24c55e4ae2 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -598,7 +598,7 @@ EXPORT_SYMBOL(acpi_map_lsapic); int acpi_unmap_lsapic(int cpu) { per_cpu(x86_cpu_to_apicid, cpu) = -1; - cpu_clear(cpu, cpu_present_map); + set_cpu_present(cpu, false); num_processors--; return (0); diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 6b7f824db160..99589245fd8d 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -140,7 +140,7 @@ static int lapic_next_event(unsigned long delta, struct clock_event_device *evt); static void lapic_timer_setup(enum clock_event_mode mode, struct clock_event_device *evt); -static void lapic_timer_broadcast(const cpumask_t *mask); +static void lapic_timer_broadcast(const struct cpumask *mask); static void apic_pm_activate(void); /* @@ -453,7 +453,7 @@ static void lapic_timer_setup(enum clock_event_mode mode, /* * Local APIC timer broadcast function */ -static void lapic_timer_broadcast(const cpumask_t *mask) +static void lapic_timer_broadcast(const struct cpumask *mask) { #ifdef CONFIG_SMP send_IPI_mask(mask, LOCAL_TIMER_VECTOR); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 42e0853030cb..3f95a40f718a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -355,7 +355,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); } else if (smp_num_siblings > 1) { - if (smp_num_siblings > NR_CPUS) { + if (smp_num_siblings > nr_cpu_ids) { printk(KERN_WARNING "CPU: Unsupported number of siblings %d", smp_num_siblings); smp_num_siblings = 1; diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 72cefd1e649b..62a3c23bd703 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c @@ -121,7 +121,7 @@ static int cpuid_open(struct inode *inode, struct file *file) lock_kernel(); cpu = iminor(file->f_path.dentry->d_inode); - if (cpu >= NR_CPUS || !cpu_online(cpu)) { + if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { ret = -ENXIO; /* No such CPU */ goto out; } diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 82a7c7ed6d45..726266695b2c 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -136,7 +136,7 @@ static int msr_open(struct inode *inode, struct file *file) lock_kernel(); cpu = iminor(file->f_path.dentry->d_inode); - if (cpu >= NR_CPUS || !cpu_online(cpu)) { + if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { ret = -ENXIO; /* No such CPU */ goto out; } diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index ba7b9a0e6063..de4a9d643bee 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -449,7 +449,7 @@ void native_machine_shutdown(void) #ifdef CONFIG_X86_32 /* See if there has been given a command line override */ - if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && + if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) && cpu_online(reboot_cpu)) reboot_cpu_id = reboot_cpu; #endif @@ -459,7 +459,7 @@ void native_machine_shutdown(void) reboot_cpu_id = smp_processor_id(); /* Make certain I only run on the appropriate processor */ - set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id)); + set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id)); /* O.K Now that I'm on the appropriate processor, * stop all of the others. diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f49c26bd7e2d..6bd4d9b73870 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1154,7 +1154,7 @@ static void __init smp_cpu_index_default(void) for_each_possible_cpu(i) { c = &cpu_data(i); /* mark all to hotplug */ - c->cpu_index = NR_CPUS; + c->cpu_index = nr_cpu_ids; } } diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index a5bc05492b1e..9840b7ec749a 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -357,9 +357,8 @@ void __init find_smp_config(void) printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id); /* initialize the CPU structures (moved from smp_boot_cpus) */ - for (i = 0; i < NR_CPUS; i++) { + for (i = 0; i < nr_cpu_ids; i++) cpu_irq_affinity[i] = ~0; - } cpu_online_map = cpumask_of_cpu(boot_cpu_id); /* The boot CPU must be extended */ @@ -1227,7 +1226,7 @@ int setup_profiling_timer(unsigned int multiplier) * new values until the next timer interrupt in which they do process * accounting. */ - for (i = 0; i < NR_CPUS; ++i) + for (i = 0; i < nr_cpu_ids; ++i) per_cpu(prof_multiplier, i) = multiplier; return 0; @@ -1257,7 +1256,7 @@ void __init voyager_smp_intr_init(void) int i; /* initialize the per cpu irq mask to all disabled */ - for (i = 0; i < NR_CPUS; i++) + for (i = 0; i < nr_cpu_ids; i++) vic_irq_mask[i] = 0xFFFF; VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt); -- cgit v1.2.3 From ee943a82b697456f9d2ac46f1e6d230beedb4b6c Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 31 Dec 2008 18:08:47 -0800 Subject: x86: use cpumask_var_t in acpi/boot.c Impact: reduce stack size, use new API. Replace cpumask_t with cpumask_var_t. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- arch/x86/kernel/acpi/boot.c | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index fd24c55e4ae2..29dc0c89d4af 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -538,9 +538,10 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; struct acpi_madt_local_apic *lapic; - cpumask_t tmp_map, new_map; + cpumask_var_t tmp_map, new_map; u8 physid; int cpu; + int retval = -ENOMEM; if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) return -EINVAL; @@ -569,23 +570,37 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) buffer.length = ACPI_ALLOCATE_BUFFER; buffer.pointer = NULL; - tmp_map = cpu_present_map; + if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL)) + goto out; + + if (!alloc_cpumask_var(&new_map, GFP_KERNEL)) + goto free_tmp_map; + + cpumask_copy(tmp_map, cpu_present_mask); acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED); /* * If mp_register_lapic successfully generates a new logical cpu * number, then the following will get us exactly what was mapped */ - cpus_andnot(new_map, cpu_present_map, tmp_map); - if (cpus_empty(new_map)) { + cpumask_andnot(new_map, cpu_present_mask, tmp_map); + if (cpumask_empty(new_map)) { printk ("Unable to map lapic to logical cpu number\n"); - return -EINVAL; + retval = -EINVAL; + goto free_new_map; } - cpu = first_cpu(new_map); + cpu = cpumask_first(new_map); *pcpu = cpu; - return 0; + retval = 0; + +free_new_map: + free_cpumask_var(new_map); +free_tmp_map: + free_cpumask_var(tmp_map); +out: + return retval; } /* wrapper to silence section mismatch warning */ -- cgit v1.2.3 From 2fdf66b491ac706657946442789ec644cc317e1a Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 31 Dec 2008 18:08:47 -0800 Subject: cpumask: convert shared_cpu_map in acpi_processor* structs to cpumask_var_t Impact: Reduce memory usage, use new API. This is part of an effort to reduce structure sizes for machines configured with large NR_CPUS. cpumask_t gets replaced by cpumask_var_t, which is either struct cpumask[1] (small NR_CPUS) or struct cpumask * (large NR_CPUS). (Changes to powernow-k* by .) Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 27 ++++++++-- arch/x86/kernel/cpu/cpufreq/powernow-k7.c | 9 ++++ arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 24 +++++---- drivers/acpi/processor_core.c | 14 ++++-- drivers/acpi/processor_perflib.c | 28 ++++++----- drivers/acpi/processor_throttling.c | 80 +++++++++++++++++++----------- include/acpi/processor.h | 4 +- 7 files changed, 128 insertions(+), 58 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 88ea02dcb622..d0a001093b2d 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -517,6 +517,17 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) } } +static void free_acpi_perf_data(void) +{ + unsigned int i; + + /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ + for_each_possible_cpu(i) + free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) + ->shared_cpu_map); + free_percpu(acpi_perf_data); +} + /* * acpi_cpufreq_early_init - initialize ACPI P-States library * @@ -527,6 +538,7 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) */ static int __init acpi_cpufreq_early_init(void) { + unsigned int i; dprintk("acpi_cpufreq_early_init\n"); acpi_perf_data = alloc_percpu(struct acpi_processor_performance); @@ -534,6 +546,15 @@ static int __init acpi_cpufreq_early_init(void) dprintk("Memory allocation error for acpi_perf_data.\n"); return -ENOMEM; } + for_each_possible_cpu(i) { + if (!alloc_cpumask_var(&per_cpu_ptr(acpi_perf_data, i) + ->shared_cpu_map, GFP_KERNEL)) { + + /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ + free_acpi_perf_data(); + return -ENOMEM; + } + } /* Do initialization in ACPI core */ acpi_processor_preregister_performance(acpi_perf_data); @@ -604,9 +625,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) */ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { - policy->cpus = perf->shared_cpu_map; + cpumask_copy(&policy->cpus, perf->shared_cpu_map); } - policy->related_cpus = perf->shared_cpu_map; + cpumask_copy(&policy->related_cpus, perf->shared_cpu_map); #ifdef CONFIG_SMP dmi_check_system(sw_any_bug_dmi_table); @@ -795,7 +816,7 @@ static int __init acpi_cpufreq_init(void) ret = cpufreq_register_driver(&acpi_cpufreq_driver); if (ret) - free_percpu(acpi_perf_data); + free_acpi_perf_data(); return ret; } diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c index 7c7d56b43136..1b446d79a8fd 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c @@ -310,6 +310,12 @@ static int powernow_acpi_init(void) goto err0; } + if (!alloc_cpumask_var(&acpi_processor_perf->shared_cpu_map, + GFP_KERNEL)) { + retval = -ENOMEM; + goto err05; + } + if (acpi_processor_register_performance(acpi_processor_perf, 0)) { retval = -EIO; goto err1; @@ -412,6 +418,8 @@ static int powernow_acpi_init(void) err2: acpi_processor_unregister_performance(acpi_processor_perf, 0); err1: + free_cpumask_var(acpi_processor_perf->shared_cpu_map); +err05: kfree(acpi_processor_perf); err0: printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n"); @@ -652,6 +660,7 @@ static int powernow_cpu_exit (struct cpufreq_policy *policy) { #ifdef CONFIG_X86_POWERNOW_K7_ACPI if (acpi_processor_perf) { acpi_processor_unregister_performance(acpi_processor_perf, 0); + free_cpumask_var(acpi_processor_perf->shared_cpu_map); kfree(acpi_processor_perf); } #endif diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 7f05f44b97e9..c3c9adbaa26f 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -766,7 +766,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { struct cpufreq_frequency_table *powernow_table; - int ret_val; + int ret_val = -ENODEV; if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { dprintk("register performance failed: bad ACPI data\n"); @@ -815,6 +815,13 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) /* notify BIOS that we exist */ acpi_processor_notify_smm(THIS_MODULE); + if (!alloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) { + printk(KERN_ERR PFX + "unable to alloc powernow_k8_data cpumask\n"); + ret_val = -ENOMEM; + goto err_out_mem; + } + return 0; err_out_mem: @@ -826,7 +833,7 @@ err_out: /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ data->acpi_data.state_count = 0; - return -ENODEV; + return ret_val; } static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table) @@ -929,6 +936,7 @@ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { if (data->acpi_data.state_count) acpi_processor_unregister_performance(&data->acpi_data, data->cpu); + free_cpumask_var(data->acpi_data.shared_cpu_map); } #else @@ -1134,7 +1142,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) data->cpu = pol->cpu; data->currpstate = HW_PSTATE_INVALID; - if (powernow_k8_cpu_init_acpi(data)) { + rc = powernow_k8_cpu_init_acpi(data); + if (rc) { /* * Use the PSB BIOS structure. This is only availabe on * an UP version, and is deprecated by AMD. @@ -1152,20 +1161,17 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) "ACPI maintainers and complain to your BIOS " "vendor.\n"); #endif - kfree(data); - return -ENODEV; + goto err_out; } if (pol->cpu != 0) { printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " "CPU other than CPU0. Complain to your BIOS " "vendor.\n"); - kfree(data); - return -ENODEV; + goto err_out; } rc = find_psb_table(data); if (rc) { - kfree(data); - return -ENODEV; + goto err_out; } } diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 34948362f41d..0cc2fd31e376 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -826,6 +826,11 @@ static int acpi_processor_add(struct acpi_device *device) if (!pr) return -ENOMEM; + if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { + kfree(pr); + return -ENOMEM; + } + pr->handle = device->handle; strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); @@ -845,10 +850,8 @@ static int acpi_processor_remove(struct acpi_device *device, int type) pr = acpi_driver_data(device); - if (pr->id >= nr_cpu_ids) { - kfree(pr); - return 0; - } + if (pr->id >= nr_cpu_ids) + goto free; if (type == ACPI_BUS_REMOVAL_EJECT) { if (acpi_processor_handle_eject(pr)) @@ -873,6 +876,9 @@ static int acpi_processor_remove(struct acpi_device *device, int type) per_cpu(processors, pr->id) = NULL; per_cpu(processor_device_array, pr->id) = NULL; + +free: + free_cpumask_var(pr->throttling.shared_cpu_map); kfree(pr); return 0; diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 0d7b772bef50..846e227592d4 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -588,12 +588,15 @@ int acpi_processor_preregister_performance( int count, count_target; int retval = 0; unsigned int i, j; - cpumask_t covered_cpus; + cpumask_var_t covered_cpus; struct acpi_processor *pr; struct acpi_psd_package *pdomain; struct acpi_processor *match_pr; struct acpi_psd_package *match_pdomain; + if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) + return -ENOMEM; + mutex_lock(&performance_mutex); retval = 0; @@ -617,7 +620,7 @@ int acpi_processor_preregister_performance( } pr->performance = percpu_ptr(performance, i); - cpu_set(i, pr->performance->shared_cpu_map); + cpumask_set_cpu(i, pr->performance->shared_cpu_map); if (acpi_processor_get_psd(pr)) { retval = -EINVAL; continue; @@ -650,18 +653,18 @@ int acpi_processor_preregister_performance( } } - cpus_clear(covered_cpus); + cpumask_clear(covered_cpus); for_each_possible_cpu(i) { pr = per_cpu(processors, i); if (!pr) continue; - if (cpu_isset(i, covered_cpus)) + if (cpumask_test_cpu(i, covered_cpus)) continue; pdomain = &(pr->performance->domain_info); - cpu_set(i, pr->performance->shared_cpu_map); - cpu_set(i, covered_cpus); + cpumask_set_cpu(i, pr->performance->shared_cpu_map); + cpumask_set_cpu(i, covered_cpus); if (pdomain->num_processors <= 1) continue; @@ -699,8 +702,8 @@ int acpi_processor_preregister_performance( goto err_ret; } - cpu_set(j, covered_cpus); - cpu_set(j, pr->performance->shared_cpu_map); + cpumask_set_cpu(j, covered_cpus); + cpumask_set_cpu(j, pr->performance->shared_cpu_map); count++; } @@ -718,8 +721,8 @@ int acpi_processor_preregister_performance( match_pr->performance->shared_type = pr->performance->shared_type; - match_pr->performance->shared_cpu_map = - pr->performance->shared_cpu_map; + cpumask_copy(match_pr->performance->shared_cpu_map, + pr->performance->shared_cpu_map); } } @@ -731,14 +734,15 @@ err_ret: /* Assume no coordination on any error parsing domain info */ if (retval) { - cpus_clear(pr->performance->shared_cpu_map); - cpu_set(i, pr->performance->shared_cpu_map); + cpumask_clear(pr->performance->shared_cpu_map); + cpumask_set_cpu(i, pr->performance->shared_cpu_map); pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; } pr->performance = NULL; /* Will be set for real in register */ } mutex_unlock(&performance_mutex); + free_cpumask_var(covered_cpus); return retval; } EXPORT_SYMBOL(acpi_processor_preregister_performance); diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index a0c38c94a8a0..d27838171f4a 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -61,11 +61,14 @@ static int acpi_processor_update_tsd_coord(void) int count, count_target; int retval = 0; unsigned int i, j; - cpumask_t covered_cpus; + cpumask_var_t covered_cpus; struct acpi_processor *pr, *match_pr; struct acpi_tsd_package *pdomain, *match_pdomain; struct acpi_processor_throttling *pthrottling, *match_pthrottling; + if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) + return -ENOMEM; + /* * Now that we have _TSD data from all CPUs, lets setup T-state * coordination between all CPUs. @@ -91,19 +94,19 @@ static int acpi_processor_update_tsd_coord(void) if (retval) goto err_ret; - cpus_clear(covered_cpus); + cpumask_clear(covered_cpus); for_each_possible_cpu(i) { pr = per_cpu(processors, i); if (!pr) continue; - if (cpu_isset(i, covered_cpus)) + if (cpumask_test_cpu(i, covered_cpus)) continue; pthrottling = &pr->throttling; pdomain = &(pthrottling->domain_info); - cpu_set(i, pthrottling->shared_cpu_map); - cpu_set(i, covered_cpus); + cpumask_set_cpu(i, pthrottling->shared_cpu_map); + cpumask_set_cpu(i, covered_cpus); /* * If the number of processor in the TSD domain is 1, it is * unnecessary to parse the coordination for this CPU. @@ -144,8 +147,8 @@ static int acpi_processor_update_tsd_coord(void) goto err_ret; } - cpu_set(j, covered_cpus); - cpu_set(j, pthrottling->shared_cpu_map); + cpumask_set_cpu(j, covered_cpus); + cpumask_set_cpu(j, pthrottling->shared_cpu_map); count++; } for_each_possible_cpu(j) { @@ -165,12 +168,14 @@ static int acpi_processor_update_tsd_coord(void) * If some CPUS have the same domain, they * will have the same shared_cpu_map. */ - match_pthrottling->shared_cpu_map = - pthrottling->shared_cpu_map; + cpumask_copy(match_pthrottling->shared_cpu_map, + pthrottling->shared_cpu_map); } } err_ret: + free_cpumask_var(covered_cpus); + for_each_possible_cpu(i) { pr = per_cpu(processors, i); if (!pr) @@ -182,8 +187,8 @@ err_ret: */ if (retval) { pthrottling = &(pr->throttling); - cpus_clear(pthrottling->shared_cpu_map); - cpu_set(i, pthrottling->shared_cpu_map); + cpumask_clear(pthrottling->shared_cpu_map); + cpumask_set_cpu(i, pthrottling->shared_cpu_map); pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; } } @@ -567,7 +572,7 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr) pthrottling = &pr->throttling; pthrottling->tsd_valid_flag = 1; pthrottling->shared_type = pdomain->coord_type; - cpu_set(pr->id, pthrottling->shared_cpu_map); + cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); /* * If the coordination type is not defined in ACPI spec, * the tsd_valid_flag will be clear and coordination type @@ -826,7 +831,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) static int acpi_processor_get_throttling(struct acpi_processor *pr) { - cpumask_t saved_mask; + cpumask_var_t saved_mask; int ret; if (!pr) @@ -834,14 +839,20 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr) if (!pr->flags.throttling) return -ENODEV; + + if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) + return -ENOMEM; + /* * Migrate task to the cpu pointed by pr. */ - saved_mask = current->cpus_allowed; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); + cpumask_copy(saved_mask, ¤t->cpus_allowed); + /* FIXME: use work_on_cpu() */ + set_cpus_allowed_ptr(current, cpumask_of(pr->id)); ret = pr->throttling.acpi_processor_get_throttling(pr); /* restore the previous state */ - set_cpus_allowed_ptr(current, &saved_mask); + set_cpus_allowed_ptr(current, saved_mask); + free_cpumask_var(saved_mask); return ret; } @@ -986,13 +997,13 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, int acpi_processor_set_throttling(struct acpi_processor *pr, int state) { - cpumask_t saved_mask; + cpumask_var_t saved_mask; int ret = 0; unsigned int i; struct acpi_processor *match_pr; struct acpi_processor_throttling *p_throttling; struct throttling_tstate t_state; - cpumask_t online_throttling_cpus; + cpumask_var_t online_throttling_cpus; if (!pr) return -EINVAL; @@ -1003,17 +1014,25 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) if ((state < 0) || (state > (pr->throttling.state_count - 1))) return -EINVAL; - saved_mask = current->cpus_allowed; + if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) + return -ENOMEM; + + if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) { + free_cpumask_var(saved_mask); + return -ENOMEM; + } + + cpumask_copy(saved_mask, ¤t->cpus_allowed); t_state.target_state = state; p_throttling = &(pr->throttling); - cpus_and(online_throttling_cpus, cpu_online_map, - p_throttling->shared_cpu_map); + cpumask_and(online_throttling_cpus, cpu_online_mask, + p_throttling->shared_cpu_map); /* * The throttling notifier will be called for every * affected cpu in order to get one proper T-state. * The notifier event is THROTTLING_PRECHANGE. */ - for_each_cpu_mask_nr(i, online_throttling_cpus) { + for_each_cpu(i, online_throttling_cpus) { t_state.cpu = i; acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, &t_state); @@ -1025,7 +1044,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) * it can be called only for the cpu pointed by pr. */ if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { - set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); + /* FIXME: use work_on_cpu() */ + set_cpus_allowed_ptr(current, cpumask_of(pr->id)); ret = p_throttling->acpi_processor_set_throttling(pr, t_state.target_state); } else { @@ -1034,7 +1054,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) * it is necessary to set T-state for every affected * cpus. */ - for_each_cpu_mask_nr(i, online_throttling_cpus) { + for_each_cpu(i, online_throttling_cpus) { match_pr = per_cpu(processors, i); /* * If the pointer is invalid, we will report the @@ -1056,7 +1076,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) continue; } t_state.cpu = i; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); + /* FIXME: use work_on_cpu() */ + set_cpus_allowed_ptr(current, cpumask_of(i)); ret = match_pr->throttling. acpi_processor_set_throttling( match_pr, t_state.target_state); @@ -1068,13 +1089,16 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) * affected cpu to update the T-states. * The notifier event is THROTTLING_POSTCHANGE */ - for_each_cpu_mask_nr(i, online_throttling_cpus) { + for_each_cpu(i, online_throttling_cpus) { t_state.cpu = i; acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, &t_state); } /* restore the previous state */ - set_cpus_allowed_ptr(current, &saved_mask); + /* FIXME: use work_on_cpu() */ + set_cpus_allowed_ptr(current, saved_mask); + free_cpumask_var(online_throttling_cpus); + free_cpumask_var(saved_mask); return ret; } @@ -1120,7 +1144,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr) if (acpi_processor_get_tsd(pr)) { pthrottling = &pr->throttling; pthrottling->tsd_valid_flag = 0; - cpu_set(pr->id, pthrottling->shared_cpu_map); + cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; } diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 3795590e152a..0574add2a1e3 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -127,7 +127,7 @@ struct acpi_processor_performance { unsigned int state_count; struct acpi_processor_px *states; struct acpi_psd_package domain_info; - cpumask_t shared_cpu_map; + cpumask_var_t shared_cpu_map; unsigned int shared_type; }; @@ -172,7 +172,7 @@ struct acpi_processor_throttling { unsigned int state_count; struct acpi_processor_tx_tss *states_tss; struct acpi_tsd_package domain_info; - cpumask_t shared_cpu_map; + cpumask_var_t shared_cpu_map; int (*acpi_processor_get_throttling) (struct acpi_processor * pr); int (*acpi_processor_set_throttling) (struct acpi_processor * pr, int state); -- cgit v1.2.3 From 80855f7361eb68205e6bc1981928629d9b02d5c9 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Wed, 31 Dec 2008 18:08:47 -0800 Subject: cpumask: use alloc_cpumask_var_node where appropriate Impact: Reduce inter-node memory traffic. Reduces inter-node memory traffic (offloading the global system bus) by allocating referenced struct cpumasks on the same node as the referring struct. Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 5 +++-- arch/x86/kernel/io_apic.c | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index d0a001093b2d..28102ad1a363 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -547,8 +547,9 @@ static int __init acpi_cpufreq_early_init(void) return -ENOMEM; } for_each_possible_cpu(i) { - if (!alloc_cpumask_var(&per_cpu_ptr(acpi_perf_data, i) - ->shared_cpu_map, GFP_KERNEL)) { + if (!alloc_cpumask_var_node( + &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, + GFP_KERNEL, cpu_to_node(i))) { /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ free_acpi_perf_data(); diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 3e070bb961d7..a25c3f76b8ac 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -212,11 +212,11 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu) cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); if (cfg) { - /* FIXME: needs alloc_cpumask_var_node() */ - if (!alloc_cpumask_var(&cfg->domain, GFP_ATOMIC)) { + if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { kfree(cfg); cfg = NULL; - } else if (!alloc_cpumask_var(&cfg->old_domain, GFP_ATOMIC)) { + } else if (!alloc_cpumask_var_node(&cfg->old_domain, + GFP_ATOMIC, node)) { free_cpumask_var(cfg->domain); kfree(cfg); cfg = NULL; -- cgit v1.2.3 From ab14398abd195af91a744c320a52a1bce814dd1e Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Fri, 2 Jan 2009 21:51:32 +0300 Subject: x86: setup_per_cpu_areas() cleanup Impact: cleanup __alloc_bootmem and __alloc_bootmem_node do panic for us in case of fail so no need for additional checks here. Also lets use pr_*() macros for printing. Signed-off-by: Cyrill Gorcunov Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup_percpu.c | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 49f3f709ee1f..a4b619c33106 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -153,12 +153,10 @@ void __init setup_per_cpu_areas(void) align = max_t(unsigned long, PAGE_SIZE, align); size = roundup(old_size, align); - printk(KERN_INFO - "NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", + pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); - printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", - size); + pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size); for_each_possible_cpu(cpu) { #ifndef CONFIG_NEED_MULTIPLE_NODES @@ -169,22 +167,15 @@ void __init setup_per_cpu_areas(void) if (!node_online(node) || !NODE_DATA(node)) { ptr = __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); - printk(KERN_INFO - "cpu %d has no node %d or node-local memory\n", + pr_info("cpu %d has no node %d or node-local memory\n", cpu, node); - if (ptr) - printk(KERN_DEBUG - "per cpu data for cpu%d at %016lx\n", - cpu, __pa(ptr)); - } - else { + pr_debug("per cpu data for cpu%d at %016lx\n", + cpu, __pa(ptr)); + } else { ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, __pa(MAX_DMA_ADDRESS)); - if (ptr) - printk(KERN_DEBUG - "per cpu data for cpu%d on node%d " - "at %016lx\n", - cpu, node, __pa(ptr)); + pr_debug("per cpu data for cpu%d on node%d at %016lx\n", + cpu, node, __pa(ptr)); } #endif per_cpu_offset(cpu) = ptr - __per_cpu_start; -- cgit v1.2.3 From 7aed55d1085f71241284a30af0300feea48c36db Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Sun, 4 Jan 2009 00:27:09 +0100 Subject: x86: fix RIP printout in early_idt_handler Impact: fix debug/crash printout Since errorcode is popped out, RIP is on the top of the stack. Use real RIP value instead of wrong CS. Signed-off-by: Jiri Slaby Cc: Signed-off-by: Ingo Molnar --- arch/x86/kernel/head_64.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 26cfdc1d7c7f..0e275d495563 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -305,7 +305,7 @@ ENTRY(early_idt_handler) call dump_stack #ifdef CONFIG_KALLSYMS leaq early_idt_ripmsg(%rip),%rdi - movq 8(%rsp),%rsi # get rip again + movq 0(%rsp),%rsi # get rip again call __print_symbol #endif #endif /* EARLY_PRINTK */ -- cgit v1.2.3 From f29521e4ee394ca241df2ba546e9d671394927a2 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 3 Jan 2009 15:46:57 +0530 Subject: x86: rename mp_config_table to mpc_table Impact: cleanup, solve 80 columns wrap problems mp_config_table should be renamed to mpc_table. The reason: the 'c' in MPC already means 'config' - no need to repeat that in the type name. Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/es7000/mpparse.h | 3 +-- arch/x86/include/asm/genapic_32.h | 4 ++-- arch/x86/include/asm/mach-default/mach_mpparse.h | 4 ++-- arch/x86/include/asm/mach-generic/mach_mpparse.h | 5 ++--- arch/x86/include/asm/mach-generic/mach_mpspec.h | 4 ++-- arch/x86/include/asm/mpspec_def.h | 2 +- arch/x86/include/asm/numaq/mpparse.h | 3 +-- arch/x86/include/asm/summit/mpparse.h | 2 +- arch/x86/kernel/mpparse.c | 10 ++++------ arch/x86/kernel/numaq_32.c | 3 +-- arch/x86/mach-generic/es7000.c | 4 ++-- arch/x86/mach-generic/numaq.c | 3 +-- arch/x86/mach-generic/probe.c | 3 +-- 13 files changed, 21 insertions(+), 29 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/es7000/mpparse.h b/arch/x86/include/asm/es7000/mpparse.h index ed5a3caae141..c1629b090ec2 100644 --- a/arch/x86/include/asm/es7000/mpparse.h +++ b/arch/x86/include/asm/es7000/mpparse.h @@ -10,8 +10,7 @@ extern void setup_unisys(void); #ifndef CONFIG_X86_GENERICARCH extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id); -extern int mps_oem_check(struct mp_config_table *mpc, char *oem, - char *productid); +extern int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid); #endif #ifdef CONFIG_ACPI diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h index 746f37a7963a..db02be008f71 100644 --- a/arch/x86/include/asm/genapic_32.h +++ b/arch/x86/include/asm/genapic_32.h @@ -16,7 +16,7 @@ */ struct mpc_config_bus; -struct mp_config_table; +struct mpc_table; struct mpc_config_processor; struct genapic { @@ -51,7 +51,7 @@ struct genapic { /* When one of the next two hooks returns 1 the genapic is switched to this. Essentially they are additional probe functions. */ - int (*mps_oem_check)(struct mp_config_table *mpc, char *oem, + int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid); int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); diff --git a/arch/x86/include/asm/mach-default/mach_mpparse.h b/arch/x86/include/asm/mach-default/mach_mpparse.h index 8c1ea21238a7..c70a263d68cd 100644 --- a/arch/x86/include/asm/mach-default/mach_mpparse.h +++ b/arch/x86/include/asm/mach-default/mach_mpparse.h @@ -1,8 +1,8 @@ #ifndef _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H #define _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H -static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, - char *productid) +static inline int +mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { return 0; } diff --git a/arch/x86/include/asm/mach-generic/mach_mpparse.h b/arch/x86/include/asm/mach-generic/mach_mpparse.h index 048f1d468535..9444ab8dca94 100644 --- a/arch/x86/include/asm/mach-generic/mach_mpparse.h +++ b/arch/x86/include/asm/mach-generic/mach_mpparse.h @@ -2,9 +2,8 @@ #define _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H -extern int mps_oem_check(struct mp_config_table *mpc, char *oem, - char *productid); +extern int mps_oem_check(struct mpc_table *, char *, char *); -extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id); +extern int acpi_madt_oem_check(char *, char *); #endif /* _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H */ diff --git a/arch/x86/include/asm/mach-generic/mach_mpspec.h b/arch/x86/include/asm/mach-generic/mach_mpspec.h index bbab5ccfd4fe..3bc407226578 100644 --- a/arch/x86/include/asm/mach-generic/mach_mpspec.h +++ b/arch/x86/include/asm/mach-generic/mach_mpspec.h @@ -7,6 +7,6 @@ /* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */ #define MAX_MP_BUSSES 260 -extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, - char *productid); +extern void numaq_mps_oem_check(struct mpc_table *, char *, char *); + #endif /* _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H */ diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h index e3ace7d1d35d..e260e6543b4c 100644 --- a/arch/x86/include/asm/mpspec_def.h +++ b/arch/x86/include/asm/mpspec_def.h @@ -39,7 +39,7 @@ struct intel_mp_floating { #define MPC_SIGNATURE "PCMP" -struct mp_config_table { +struct mpc_table { char mpc_signature[4]; unsigned short mpc_length; /* Size of table */ char mpc_spec; /* 0x01 */ diff --git a/arch/x86/include/asm/numaq/mpparse.h b/arch/x86/include/asm/numaq/mpparse.h index 252292e077b6..a2eeefcd1cc7 100644 --- a/arch/x86/include/asm/numaq/mpparse.h +++ b/arch/x86/include/asm/numaq/mpparse.h @@ -1,7 +1,6 @@ #ifndef __ASM_NUMAQ_MPPARSE_H #define __ASM_NUMAQ_MPPARSE_H -extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, - char *productid); +extern void numaq_mps_oem_check(struct mpc_table *, char *, char *); #endif /* __ASM_NUMAQ_MPPARSE_H */ diff --git a/arch/x86/include/asm/summit/mpparse.h b/arch/x86/include/asm/summit/mpparse.h index 013ce6fab2d5..380e86c02363 100644 --- a/arch/x86/include/asm/summit/mpparse.h +++ b/arch/x86/include/asm/summit/mpparse.h @@ -11,7 +11,7 @@ extern void setup_summit(void); #define setup_summit() {} #endif -static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, +static inline int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { if (!strncmp(oem, "IBM ENSW", 8) && diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index c5c5b8df1dbc..154de681e8b2 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -246,8 +246,7 @@ static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m) * Read/parse the MPC */ -static int __init smp_check_mpc(struct mp_config_table *mpc, char *oem, - char *str) +static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str) { if (memcmp(mpc->mpc_signature, MPC_SIGNATURE, 4)) { @@ -283,7 +282,7 @@ static int __init smp_check_mpc(struct mp_config_table *mpc, char *oem, return 1; } -static int __init smp_read_mpc(struct mp_config_table *mpc, unsigned early) +static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) { char str[16]; char oem[10]; @@ -843,7 +842,7 @@ static int __init get_MP_intsrc_index(struct mpc_config_intsrc *m) static struct mpc_config_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; #endif -static int __init replace_intsrc_all(struct mp_config_table *mpc, +static int __init replace_intsrc_all(struct mpc_table *mpc, unsigned long mpc_new_phys, unsigned long mpc_new_length) { @@ -1014,8 +1013,7 @@ static int __init update_mp_table(void) char str[16]; char oem[10]; struct intel_mp_floating *mpf; - struct mp_config_table *mpc; - struct mp_config_table *mpc_new; + struct mpc_table *mpc, *mpc_new; if (!enable_update_mptable) return 0; diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index 0deea37a53cf..454c55b56748 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -260,8 +260,7 @@ static struct x86_quirks numaq_x86_quirks __initdata = { .update_genapic = numaq_update_genapic, }; -void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, - char *productid) +void numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { if (strncmp(oem, "IBM NUMA", 8)) printk("Warning! Not a NUMA-Q system!\n"); diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 4ba5ccaa1584..3b580edf254b 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -43,8 +43,8 @@ static void __init enable_apic_mode(void) return; } -static __init int mps_oem_check(struct mp_config_table *mpc, char *oem, - char *productid) +static __init int +mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { if (mpc->mpc_oemptr) { struct mp_config_oemtable *oem_table = diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 511d7941364f..3679e2255645 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c @@ -19,8 +19,7 @@ #include #include -static int mps_oem_check(struct mp_config_table *mpc, char *oem, - char *productid) +static int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { numaq_mps_oem_check(mpc, oem, productid); return found_numaq; diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c index c346d9d0226f..15a38daef1a8 100644 --- a/arch/x86/mach-generic/probe.c +++ b/arch/x86/mach-generic/probe.c @@ -110,8 +110,7 @@ void __init generic_apic_probe(void) /* These functions can switch the APIC even after the initial ->probe() */ -int __init mps_oem_check(struct mp_config_table *mpc, char *oem, - char *productid) +int __init mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { int i; for (i = 0; apic_probe[i]; ++i) { -- cgit v1.2.3 From 00fb8606e5621a79ca2fe788e461fe8a59eb1450 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 3 Jan 2009 15:47:32 +0530 Subject: x86: rename mpc_config_bus to mpc_bus Impact: cleanup, solve 80 columns wrap problems mpc_config_bus should be renamed to mpc_bus. The reason: the 'c' in MPC already means 'config' - no need to repeat that in the type name. Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/genapic_32.h | 2 +- arch/x86/include/asm/mpspec_def.h | 2 +- arch/x86/include/asm/setup.h | 6 +++--- arch/x86/kernel/mpparse.c | 12 +++++------- arch/x86/kernel/numaq_32.c | 4 ++-- 5 files changed, 12 insertions(+), 14 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h index db02be008f71..057e2c2360f2 100644 --- a/arch/x86/include/asm/genapic_32.h +++ b/arch/x86/include/asm/genapic_32.h @@ -15,7 +15,7 @@ * Copyright 2003 Andi Kleen, SuSE Labs. */ -struct mpc_config_bus; +struct mpc_bus; struct mpc_table; struct mpc_config_processor; diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h index e260e6543b4c..daa3f4ab2ff3 100644 --- a/arch/x86/include/asm/mpspec_def.h +++ b/arch/x86/include/asm/mpspec_def.h @@ -80,7 +80,7 @@ struct mpc_config_processor { unsigned int mpc_reserved[2]; }; -struct mpc_config_bus { +struct mpc_bus { unsigned char mpc_type; unsigned char mpc_busid; unsigned char mpc_bustype[6]; diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 4fcd53fd5f43..26754ca35a60 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -26,7 +26,7 @@ extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip); * Any setup quirks to be performed? */ struct mpc_config_processor; -struct mpc_config_bus; +struct mpc_bus; struct mp_config_oemtable; struct x86_quirks { int (*arch_pre_time_init)(void); @@ -40,8 +40,8 @@ struct x86_quirks { int *mpc_record; int (*mpc_apic_id)(struct mpc_config_processor *m); - void (*mpc_oem_bus_info)(struct mpc_config_bus *m, char *name); - void (*mpc_oem_pci_bus)(struct mpc_config_bus *m); + void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name); + void (*mpc_oem_pci_bus)(struct mpc_bus *m); void (*smp_read_mpc_oem)(struct mp_config_oemtable *oemtable, unsigned short oemsize); int (*setup_ioapic_ids)(void); diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 154de681e8b2..edbd2b19c603 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -74,7 +74,7 @@ static void __init MP_processor_info(struct mpc_config_processor *m) } #ifdef CONFIG_X86_IO_APIC -static void __init MP_bus_info(struct mpc_config_bus *m) +static void __init MP_bus_info(struct mpc_bus *m) { char str[7]; memcpy(str, m->mpc_bustype, 6); @@ -338,8 +338,7 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) } case MP_BUS: { - struct mpc_config_bus *m = - (struct mpc_config_bus *)mpt; + struct mpc_bus *m = (struct mpc_bus *)mpt; #ifdef CONFIG_X86_IO_APIC MP_bus_info(m); #endif @@ -488,7 +487,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type) static void __init construct_ioapic_table(int mpc_default_type) { struct mpc_config_ioapic ioapic; - struct mpc_config_bus bus; + struct mpc_bus bus; bus.mpc_type = MP_BUS; bus.mpc_busid = 0; @@ -656,7 +655,7 @@ static void __init __get_smp_config(unsigned int early) * ISA defaults and hope it will work. */ if (!mp_irq_entries) { - struct mpc_config_bus bus; + struct mpc_bus bus; printk(KERN_ERR "BIOS bug, no explicit IRQ entries, " "using default mptable. " @@ -867,8 +866,7 @@ static int __init replace_intsrc_all(struct mpc_table *mpc, } case MP_BUS: { - struct mpc_config_bus *m = - (struct mpc_config_bus *)mpt; + struct mpc_bus *m = (struct mpc_bus *)mpt; mpt += sizeof(*m); count += sizeof(*m); break; diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index 454c55b56748..813542692777 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -135,7 +135,7 @@ int mp_bus_id_to_node[MAX_MP_BUSSES]; int mp_bus_id_to_local[MAX_MP_BUSSES]; /* x86_quirks member */ -static void mpc_oem_bus_info(struct mpc_config_bus *m, char *name) +static void mpc_oem_bus_info(struct mpc_bus *m, char *name) { int quad = translation_table[mpc_record]->trans_quad; int local = translation_table[mpc_record]->trans_local; @@ -149,7 +149,7 @@ static void mpc_oem_bus_info(struct mpc_config_bus *m, char *name) int quad_local_to_mp_bus_id [NR_CPUS/4][4]; /* x86_quirks member */ -static void mpc_oem_pci_bus(struct mpc_config_bus *m) +static void mpc_oem_pci_bus(struct mpc_bus *m) { int quad = translation_table[mpc_record]->trans_quad; int local = translation_table[mpc_record]->trans_local; -- cgit v1.2.3 From f4f21b716bb997dcafca622d9e232f855a64c7b6 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 3 Jan 2009 15:48:52 +0530 Subject: x86: rename mpc_config_processor to mpc_cpu Impact: cleanup, solve 80 columns wrap problems mpc_config_processor should be renamed to mpc_cpu. The reason: the 'c' in MPC already means 'config' - no need to repeat that in the type name. Plus 'processor' is a lot longer than 'cpu' - so we try to use 'cpu' in all type names, as much as possible. Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/genapic_32.h | 2 +- arch/x86/include/asm/mpspec_def.h | 2 +- arch/x86/include/asm/setup.h | 4 ++-- arch/x86/kernel/mpparse.c | 10 ++++------ arch/x86/kernel/numaq_32.c | 2 +- arch/x86/kernel/visws_quirks.c | 4 ++-- 6 files changed, 11 insertions(+), 13 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h index 057e2c2360f2..2c05b737ee22 100644 --- a/arch/x86/include/asm/genapic_32.h +++ b/arch/x86/include/asm/genapic_32.h @@ -17,7 +17,7 @@ struct mpc_bus; struct mpc_table; -struct mpc_config_processor; +struct mpc_cpu; struct genapic { char *name; diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h index daa3f4ab2ff3..7e8ca5136b29 100644 --- a/arch/x86/include/asm/mpspec_def.h +++ b/arch/x86/include/asm/mpspec_def.h @@ -70,7 +70,7 @@ struct mpc_table { #define CPU_MODEL_MASK 0x00F0 #define CPU_FAMILY_MASK 0x0F00 -struct mpc_config_processor { +struct mpc_cpu { unsigned char mpc_type; unsigned char mpc_apicid; /* Local APIC number */ unsigned char mpc_apicver; /* Its versions */ diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 26754ca35a60..8cf77ec2088a 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -25,7 +25,7 @@ extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip); /* * Any setup quirks to be performed? */ -struct mpc_config_processor; +struct mpc_cpu; struct mpc_bus; struct mp_config_oemtable; struct x86_quirks { @@ -39,7 +39,7 @@ struct x86_quirks { int (*mach_find_smp_config)(unsigned int reserve); int *mpc_record; - int (*mpc_apic_id)(struct mpc_config_processor *m); + int (*mpc_apic_id)(struct mpc_cpu *m); void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name); void (*mpc_oem_pci_bus)(struct mpc_bus *m); void (*smp_read_mpc_oem)(struct mp_config_oemtable *oemtable, diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index edbd2b19c603..1ec13bb18730 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -49,7 +49,7 @@ static int __init mpf_checksum(unsigned char *mp, int len) return sum & 0xFF; } -static void __init MP_processor_info(struct mpc_config_processor *m) +static void __init MP_processor_info(struct mpc_cpu *m) { int apicid; char *bootup_cpu = ""; @@ -327,8 +327,7 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) switch (*mpt) { case MP_PROCESSOR: { - struct mpc_config_processor *m = - (struct mpc_config_processor *)mpt; + struct mpc_cpu *m = (struct mpc_cpu *)mpt; /* ACPI may have already provided this data */ if (!acpi_lapic) MP_processor_info(m); @@ -534,7 +533,7 @@ static inline void __init construct_ioapic_table(int mpc_default_type) { } static inline void __init construct_default_ISA_mptable(int mpc_default_type) { - struct mpc_config_processor processor; + struct mpc_cpu processor; struct mpc_config_lintsrc lintsrc; int linttypes[2] = { mp_ExtINT, mp_NMI }; int i; @@ -858,8 +857,7 @@ static int __init replace_intsrc_all(struct mpc_table *mpc, switch (*mpt) { case MP_PROCESSOR: { - struct mpc_config_processor *m = - (struct mpc_config_processor *)mpt; + struct mpc_cpu *m = (struct mpc_cpu *)mpt; mpt += sizeof(*m); count += sizeof(*m); break; diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index 813542692777..bcc3da1644fe 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -117,7 +117,7 @@ static inline int generate_logical_apicid(int quad, int phys_apicid) } /* x86_quirks member */ -static int mpc_apic_id(struct mpc_config_processor *m) +static int mpc_apic_id(struct mpc_cpu *m) { int quad = translation_table[mpc_record]->trans_quad; int logical_apicid = generate_logical_apicid(quad, m->mpc_apicid); diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index 0c9667f0752a..ca12afa63475 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c @@ -176,7 +176,7 @@ static int __init visws_get_smp_config(unsigned int early) * No problem for Linux. */ -static void __init MP_processor_info(struct mpc_config_processor *m) +static void __init MP_processor_info(struct mpc_cpu *m) { int ver, logical_apicid; physid_mask_t apic_cpus; @@ -218,7 +218,7 @@ static void __init MP_processor_info(struct mpc_config_processor *m) static int __init visws_find_smp_config(unsigned int reserve) { - struct mpc_config_processor *mp = phys_to_virt(CO_CPU_TAB_PHYS); + struct mpc_cpu *mp = phys_to_virt(CO_CPU_TAB_PHYS); unsigned short ncpus = readw(phys_to_virt(CO_CPU_NUM_PHYS)); if (ncpus > CO_CPU_MAX) { -- cgit v1.2.3 From 2b85b5fb47c95cd4cbd15f0c0ef0242d8f36e2e0 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 3 Jan 2009 15:49:57 +0530 Subject: x86: rename mpc_config_ioapic to mpc_ioapic Impact: cleanup, solve 80 columns wrap problems mpc_config_ioapic should be renamed to mpc_ioapic. The reason: the 'c' in MPC already means 'config' - no need to repeat that in the type name. Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mpspec_def.h | 2 +- arch/x86/kernel/mpparse.c | 15 +++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h index 7e8ca5136b29..79289c70afd6 100644 --- a/arch/x86/include/asm/mpspec_def.h +++ b/arch/x86/include/asm/mpspec_def.h @@ -108,7 +108,7 @@ struct mpc_bus { #define MPC_APIC_USABLE 0x01 -struct mpc_config_ioapic { +struct mpc_ioapic { unsigned char mpc_type; unsigned char mpc_apicid; unsigned char mpc_apicver; diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 1ec13bb18730..7ec4b0da9851 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -133,7 +133,7 @@ static int bad_ioapic(unsigned long address) return 0; } -static void __init MP_ioapic_info(struct mpc_config_ioapic *m) +static void __init MP_ioapic_info(struct mpc_ioapic *m) { if (!(m->mpc_flags & MPC_APIC_USABLE)) return; @@ -348,12 +348,11 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) case MP_IOAPIC: { #ifdef CONFIG_X86_IO_APIC - struct mpc_config_ioapic *m = - (struct mpc_config_ioapic *)mpt; + struct mpc_ioapic *m = (struct mpc_ioapic *)mpt; MP_ioapic_info(m); #endif - mpt += sizeof(struct mpc_config_ioapic); - count += sizeof(struct mpc_config_ioapic); + mpt += sizeof(struct mpc_ioapic); + count += sizeof(struct mpc_ioapic); break; } case MP_INTSRC: @@ -485,7 +484,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type) static void __init construct_ioapic_table(int mpc_default_type) { - struct mpc_config_ioapic ioapic; + struct mpc_ioapic ioapic; struct mpc_bus bus; bus.mpc_type = MP_BUS; @@ -871,8 +870,8 @@ static int __init replace_intsrc_all(struct mpc_table *mpc, } case MP_IOAPIC: { - mpt += sizeof(struct mpc_config_ioapic); - count += sizeof(struct mpc_config_ioapic); + mpt += sizeof(struct mpc_ioapic); + count += sizeof(struct mpc_ioapic); break; } case MP_INTSRC: -- cgit v1.2.3 From 540d4e72e1ec6f0d07c252abc97616173aa0382b Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 3 Jan 2009 15:50:52 +0530 Subject: x86: rename mpc_config_intsrc to mpc_intsrc Impact: cleanup, solve 80 columns wrap problems mpc_config_intsrc should be renamed to mpc_intsrc. The reason: the 'c' in MPC already means 'config' - no need to repeat that in the type name. Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mpspec_def.h | 2 +- arch/x86/kernel/mpparse.c | 37 +++++++++++++++++-------------------- 2 files changed, 18 insertions(+), 21 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h index 79289c70afd6..e5a8468c215b 100644 --- a/arch/x86/include/asm/mpspec_def.h +++ b/arch/x86/include/asm/mpspec_def.h @@ -116,7 +116,7 @@ struct mpc_ioapic { unsigned int mpc_apicaddr; }; -struct mpc_config_intsrc { +struct mpc_intsrc { unsigned char mpc_type; unsigned char mpc_irqtype; unsigned short mpc_irqflag; diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 7ec4b0da9851..6b75d3143586 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -152,7 +152,7 @@ static void __init MP_ioapic_info(struct mpc_ioapic *m) nr_ioapics++; } -static void print_MP_intsrc_info(struct mpc_config_intsrc *m) +static void print_MP_intsrc_info(struct mpc_intsrc *m) { apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," " IRQ %02x, APIC ID %x, APIC INT %02x\n", @@ -170,7 +170,7 @@ static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq) mp_irq->mp_srcbusirq, mp_irq->mp_dstapic, mp_irq->mp_dstirq); } -static void __init assign_to_mp_irq(struct mpc_config_intsrc *m, +static void __init assign_to_mp_irq(struct mpc_intsrc *m, struct mp_config_intsrc *mp_irq) { mp_irq->mp_dstapic = m->mpc_dstapic; @@ -183,7 +183,7 @@ static void __init assign_to_mp_irq(struct mpc_config_intsrc *m, } static void __init assign_to_mpc_intsrc(struct mp_config_intsrc *mp_irq, - struct mpc_config_intsrc *m) + struct mpc_intsrc *m) { m->mpc_dstapic = mp_irq->mp_dstapic; m->mpc_type = mp_irq->mp_type; @@ -195,7 +195,7 @@ static void __init assign_to_mpc_intsrc(struct mp_config_intsrc *mp_irq, } static int __init mp_irq_mpc_intsrc_cmp(struct mp_config_intsrc *mp_irq, - struct mpc_config_intsrc *m) + struct mpc_intsrc *m) { if (mp_irq->mp_dstapic != m->mpc_dstapic) return 1; @@ -215,7 +215,7 @@ static int __init mp_irq_mpc_intsrc_cmp(struct mp_config_intsrc *mp_irq, return 0; } -static void __init MP_intsrc_info(struct mpc_config_intsrc *m) +static void __init MP_intsrc_info(struct mpc_intsrc *m) { int i; @@ -358,13 +358,12 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) case MP_INTSRC: { #ifdef CONFIG_X86_IO_APIC - struct mpc_config_intsrc *m = - (struct mpc_config_intsrc *)mpt; + struct mpc_intsrc *m = (struct mpc_intsrc *)mpt; MP_intsrc_info(m); #endif - mpt += sizeof(struct mpc_config_intsrc); - count += sizeof(struct mpc_config_intsrc); + mpt += sizeof(struct mpc_intsrc); + count += sizeof(struct mpc_intsrc); break; } case MP_LINTSRC: @@ -413,7 +412,7 @@ static int __init ELCR_trigger(unsigned int irq) static void __init construct_default_ioirq_mptable(int mpc_default_type) { - struct mpc_config_intsrc intsrc; + struct mpc_intsrc intsrc; int i; int ELCR_fallback = 0; @@ -799,7 +798,7 @@ void __init find_smp_config(void) #ifdef CONFIG_X86_IO_APIC static u8 __initdata irq_used[MAX_IRQ_SOURCES]; -static int __init get_MP_intsrc_index(struct mpc_config_intsrc *m) +static int __init get_MP_intsrc_index(struct mpc_intsrc *m) { int i; @@ -836,7 +835,7 @@ static int __init get_MP_intsrc_index(struct mpc_config_intsrc *m) #define SPARE_SLOT_NUM 20 -static struct mpc_config_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; +static struct mpc_intsrc __initdata *m_spare[SPARE_SLOT_NUM]; #endif static int __init replace_intsrc_all(struct mpc_table *mpc, @@ -877,8 +876,7 @@ static int __init replace_intsrc_all(struct mpc_table *mpc, case MP_INTSRC: { #ifdef CONFIG_X86_IO_APIC - struct mpc_config_intsrc *m = - (struct mpc_config_intsrc *)mpt; + struct mpc_intsrc *m = (struct mpc_intsrc *)mpt; printk(KERN_INFO "OLD "); print_MP_intsrc_info(m); @@ -899,8 +897,8 @@ static int __init replace_intsrc_all(struct mpc_table *mpc, nr_m_spare++; } #endif - mpt += sizeof(struct mpc_config_intsrc); - count += sizeof(struct mpc_config_intsrc); + mpt += sizeof(struct mpc_intsrc); + count += sizeof(struct mpc_intsrc); break; } case MP_LINTSRC: @@ -938,9 +936,8 @@ static int __init replace_intsrc_all(struct mpc_table *mpc, assign_to_mpc_intsrc(&mp_irqs[i], m_spare[nr_m_spare]); m_spare[nr_m_spare] = NULL; } else { - struct mpc_config_intsrc *m = - (struct mpc_config_intsrc *)mpt; - count += sizeof(struct mpc_config_intsrc); + struct mpc_intsrc *m = (struct mpc_intsrc *)mpt; + count += sizeof(struct mpc_intsrc); if (!mpc_new_phys) { printk(KERN_INFO "No spare slots, try to append...take your risk, new mpc_length %x\n", count); } else { @@ -953,7 +950,7 @@ static int __init replace_intsrc_all(struct mpc_table *mpc, } assign_to_mpc_intsrc(&mp_irqs[i], m); mpc->mpc_length = count; - mpt += sizeof(struct mpc_config_intsrc); + mpt += sizeof(struct mpc_intsrc); } print_mp_irq_info(&mp_irqs[i]); } -- cgit v1.2.3 From 8fb2952b8a8b8b3f982297f0fca288ce04f419a8 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 3 Jan 2009 15:51:54 +0530 Subject: x86: rename mpc_config_lintsrc to mpc_lintsrc Impact: cleanup, solve 80 columns wrap problems mpc_config_lintsrc should be renamed to mpc_lintsrc. The reason: the 'c' in MPC already means 'config' - no need to repeat that in the type name. Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mpspec_def.h | 2 +- arch/x86/kernel/mpparse.c | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h index e5a8468c215b..fabf970813bf 100644 --- a/arch/x86/include/asm/mpspec_def.h +++ b/arch/x86/include/asm/mpspec_def.h @@ -139,7 +139,7 @@ enum mp_irq_source_types { #define MP_APIC_ALL 0xFF -struct mpc_config_lintsrc { +struct mpc_lintsrc { unsigned char mpc_type; unsigned char mpc_irqtype; unsigned short mpc_irqflag; diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 6b75d3143586..defe87e12056 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -233,7 +233,7 @@ static void __init MP_intsrc_info(struct mpc_intsrc *m) #endif -static void __init MP_lintsrc_info(struct mpc_config_lintsrc *m) +static void __init MP_lintsrc_info(struct mpc_lintsrc *m) { apic_printk(APIC_VERBOSE, "Lint: type %d, pol %d, trig %d, bus %02x," " IRQ %02x, APIC ID %x, APIC LINT %02x\n", @@ -368,8 +368,8 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) } case MP_LINTSRC: { - struct mpc_config_lintsrc *m = - (struct mpc_config_lintsrc *)mpt; + struct mpc_lintsrc *m = + (struct mpc_lintsrc *)mpt; MP_lintsrc_info(m); mpt += sizeof(*m); count += sizeof(*m); @@ -532,7 +532,7 @@ static inline void __init construct_ioapic_table(int mpc_default_type) { } static inline void __init construct_default_ISA_mptable(int mpc_default_type) { struct mpc_cpu processor; - struct mpc_config_lintsrc lintsrc; + struct mpc_lintsrc lintsrc; int linttypes[2] = { mp_ExtINT, mp_NMI }; int i; @@ -903,8 +903,8 @@ static int __init replace_intsrc_all(struct mpc_table *mpc, } case MP_LINTSRC: { - struct mpc_config_lintsrc *m = - (struct mpc_config_lintsrc *)mpt; + struct mpc_lintsrc *m = + (struct mpc_lintsrc *)mpt; mpt += sizeof(*m); count += sizeof(*m); break; -- cgit v1.2.3 From b0e239ffad5aff50823c91c10cf6b72a1a651c2f Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 3 Jan 2009 15:52:54 +0530 Subject: x86: rename mpc_config_oemtable to mpc_oemtable Impact: cleanup, solve 80 columns wrap problems mpc_config_oemtable should be renamed to mpc_oemtable. The reason: the 'c' in MPC already means 'config' - no need to repeat that in the type name. Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mpspec_def.h | 2 +- arch/x86/include/asm/setup.h | 4 ++-- arch/x86/kernel/mpparse.c | 2 +- arch/x86/kernel/numaq_32.c | 2 +- arch/x86/mach-generic/es7000.c | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h index fabf970813bf..ba8ed0cf658d 100644 --- a/arch/x86/include/asm/mpspec_def.h +++ b/arch/x86/include/asm/mpspec_def.h @@ -151,7 +151,7 @@ struct mpc_lintsrc { #define MPC_OEM_SIGNATURE "_OEM" -struct mp_config_oemtable { +struct mpc_oemtable { char oem_signature[4]; unsigned short oem_length; /* Size of table */ char oem_rev; /* 0x01 */ diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index 8cf77ec2088a..ebe858cdc8a3 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -27,7 +27,7 @@ extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip); */ struct mpc_cpu; struct mpc_bus; -struct mp_config_oemtable; +struct mpc_oemtable; struct x86_quirks { int (*arch_pre_time_init)(void); int (*arch_time_init)(void); @@ -42,7 +42,7 @@ struct x86_quirks { int (*mpc_apic_id)(struct mpc_cpu *m); void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name); void (*mpc_oem_pci_bus)(struct mpc_bus *m); - void (*smp_read_mpc_oem)(struct mp_config_oemtable *oemtable, + void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable, unsigned short oemsize); int (*setup_ioapic_ids)(void); int (*update_genapic)(void); diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index defe87e12056..1ccb2537e3fd 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -313,7 +313,7 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) return 1; if (mpc->mpc_oemptr && x86_quirks->smp_read_mpc_oem) { - struct mp_config_oemtable *oem_table = (struct mp_config_oemtable *)(unsigned long)mpc->mpc_oemptr; + struct mpc_oemtable *oem_table = (void *)(long)mpc->mpc_oemptr; x86_quirks->smp_read_mpc_oem(oem_table, mpc->mpc_oemsize); } diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index bcc3da1644fe..aa1e5a068551 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -186,7 +186,7 @@ static int __init mpf_checksum(unsigned char *mp, int len) * Read/parse the MPC oem tables */ -static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, +static void __init smp_read_mpc_oem(struct mpc_oemtable *oemtable, unsigned short oemsize) { int count = sizeof(*oemtable); /* the header size */ diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 3b580edf254b..39243a796e18 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -47,8 +47,8 @@ static __init int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { if (mpc->mpc_oemptr) { - struct mp_config_oemtable *oem_table = - (struct mp_config_oemtable *)mpc->mpc_oemptr; + struct mpc_oemtable *oem_table = + (struct mpc_oemtable *)mpc->mpc_oemptr; if (!strncmp(oem, "UNISYS", 6)) return parse_unisys_oem((char *)oem_table); } -- cgit v1.2.3 From e423e33ec100a4a21943f6535e9971bb3aacfe38 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 4 Jan 2009 16:16:25 +0530 Subject: x86: apic.c fix style problems Impact: cleanup Fix: WARNING: Use #include instead of WARNING: Use #include instead of WARNING: Use #include instead of WARNING: line over 80 characters ERROR: else should follow close brace '}' total: 2 errors, 4 warnings Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index d652515e2855..f314304ed06d 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -31,9 +31,11 @@ #include #include #include +#include +#include +#include #include -#include #include #include #include @@ -41,10 +43,8 @@ #include #include #include -#include #include #include -#include #include #include @@ -687,7 +687,7 @@ static int __init calibrate_APIC_clock(void) local_irq_enable(); if (levt->features & CLOCK_EVT_FEAT_DUMMY) { - pr_warning("APIC timer disabled due to verification failure.\n"); + pr_warning("APIC timer disabled due to verification failure\n"); return -1; } @@ -2087,14 +2087,12 @@ __cpuinit int apic_is_clustered_box(void) /* are we being called early in kernel startup? */ if (bios_cpu_apicid) { id = bios_cpu_apicid[i]; - } - else if (i < nr_cpu_ids) { + } else if (i < nr_cpu_ids) { if (cpu_present(i)) id = per_cpu(x86_bios_cpu_apicid, i); else continue; - } - else + } else break; if (id != BAD_APICID) -- cgit v1.2.3 From befa9e780d49f23b051a1ad96881274ceb275ae5 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 4 Jan 2009 16:18:56 +0530 Subject: x86: process_32.c fix style problems Impact: cleanup Fix: WARNING: Use #include instead of WARNING: Use #include instead of WARNING: Use #include instead of WARNING: Use #include instead of ERROR: "foo * bar" should be "foo *bar" ERROR: trailing whitespace ERROR: spaces required around that ':' (ctx:WxO) ERROR: spaces required around that ':' (ctx:OxW) total: 7 errors, 4 warnings Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/process_32.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 3ba155d24884..a546f55c77b4 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -39,11 +39,12 @@ #include #include #include +#include +#include +#include -#include #include #include -#include #include #include #include @@ -56,10 +57,8 @@ #include #include -#include #include #include -#include #include asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); @@ -205,7 +204,7 @@ extern void kernel_thread_helper(void); /* * Create a kernel thread */ -int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) +int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) { struct pt_regs regs; @@ -266,7 +265,7 @@ void flush_thread(void) tsk->thread.debugreg3 = 0; tsk->thread.debugreg6 = 0; tsk->thread.debugreg7 = 0; - memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); + memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); clear_tsk_thread_flag(tsk, TIF_DEBUG); /* * Forget coprocessor state.. @@ -293,9 +292,9 @@ void prepare_to_copy(struct task_struct *tsk) int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, unsigned long unused, - struct task_struct * p, struct pt_regs * regs) + struct task_struct *p, struct pt_regs *regs) { - struct pt_regs * childregs; + struct pt_regs *childregs; struct task_struct *tsk; int err; @@ -347,7 +346,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { - __asm__("movl %0, %%gs" :: "r"(0)); + __asm__("movl %0, %%gs" : : "r"(0)); regs->fs = 0; set_fs(USER_DS); regs->ds = __USER_DS; @@ -638,7 +637,7 @@ asmlinkage int sys_vfork(struct pt_regs regs) asmlinkage int sys_execve(struct pt_regs regs) { int error; - char * filename; + char *filename; filename = getname((char __user *) regs.bx); error = PTR_ERR(filename); -- cgit v1.2.3 From 60d53c305805fcb612d3982dfef5c9e678e27f42 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 4 Jan 2009 16:20:40 +0530 Subject: x86: traps.c fix style problems Impact: cleanup Fix: WARNING: Use #include instead of WARNING: Use #include instead of WARNING: Use #include instead of total: 0 errors, 3 warnings Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/traps.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index ce6650eb64e9..25d5c3073582 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -65,9 +65,6 @@ #else #include #include -#include -#include -#include #include #include "cpu/mcheck/mce.h" -- cgit v1.2.3 From 6a02e71099a87d13f2aa49cb491b5bfb5e662fae Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 4 Jan 2009 16:22:17 +0530 Subject: x86: irq.c fix style problems Impact: cleanup Fix: WARNING: Use #include instead of total: 0 errors, 1 warnings Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index bce53e1352a0..3973e2df7f87 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -5,10 +5,10 @@ #include #include #include +#include #include #include -#include #include atomic_t irq_err_count; -- cgit v1.2.3 From 5f66b2a0d919e84ee921dc21c9dbfddd1215c0e9 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 4 Jan 2009 16:25:19 +0530 Subject: x86: irq_64.c fix style problems Impact: cleanup, fix style problems, more readable Fix: WARNING: Use #include instead of WARNING: Use #include instead of ERROR: code indent should use tabs where possible total: 9 errors, 2 warnings Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/irq_64.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 6383d50f82ea..63c88e6ec025 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -14,10 +14,10 @@ #include #include #include -#include +#include +#include #include #include -#include /* * Probabilistic stack overflow check: @@ -142,18 +142,18 @@ extern void call_softirq(void); asmlinkage void do_softirq(void) { - __u32 pending; - unsigned long flags; + __u32 pending; + unsigned long flags; - if (in_interrupt()) - return; + if (in_interrupt()) + return; - local_irq_save(flags); - pending = local_softirq_pending(); - /* Switch to interrupt stack */ - if (pending) { + local_irq_save(flags); + pending = local_softirq_pending(); + /* Switch to interrupt stack */ + if (pending) { call_softirq(); WARN_ON_ONCE(softirq_count()); } - local_irq_restore(flags); + local_irq_restore(flags); } -- cgit v1.2.3 From 69036c8cc29465f866f352199534be603acc79dd Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 4 Jan 2009 16:26:52 +0530 Subject: x86: time_32.c fix style problems Impact: cleanup Fix: ERROR: space prohibited after that open parenthesis '(' ERROR: space prohibited before that close parenthesis ')' total: 4 errors, 0 warnings Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/time_32.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c index 65309e4cb1c0..3985cac0ed47 100644 --- a/arch/x86/kernel/time_32.c +++ b/arch/x86/kernel/time_32.c @@ -105,8 +105,8 @@ irqreturn_t timer_interrupt(int irq, void *dev_id) high bit of the PPI port B (0x61). Note that some PS/2s, notably the 55SX, work fine if this is removed. */ - u8 irq_v = inb_p( 0x61 ); /* read the current state */ - outb_p( irq_v|0x80, 0x61 ); /* reset the IRQ */ + u8 irq_v = inb_p(0x61); /* read the current state */ + outb_p(irq_v | 0x80, 0x61); /* reset the IRQ */ } #endif -- cgit v1.2.3 From fe331184a33bf0458a04e8fb3d5c006e6520a871 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 4 Jan 2009 16:28:22 +0530 Subject: x86: time_64.c fix style problems Impact: cleanup Fix: WARNING: Use #include instead of total: 0 errors, 1 warnings Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/time_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c index 891e7a7c4334..e6e695acd725 100644 --- a/arch/x86/kernel/time_64.c +++ b/arch/x86/kernel/time_64.c @@ -17,10 +17,10 @@ #include #include #include +#include #include #include -#include #include #include #include -- cgit v1.2.3 From 5866e1b49dd2cdc9024e2269e7b1201a57bf9e19 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 4 Jan 2009 16:29:32 +0530 Subject: x86: ioport.c fix style problems Impact: cleanup Fix: ERROR: "foo * bar" should be "foo *bar" total: 2 errors, 0 warnings Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/ioport.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index 191914302744..b12208f4dfee 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c @@ -35,8 +35,8 @@ static void set_bitmap(unsigned long *bitmap, unsigned int base, */ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) { - struct thread_struct * t = ¤t->thread; - struct tss_struct * tss; + struct thread_struct *t = ¤t->thread; + struct tss_struct *tss; unsigned int i, max_long, bytes, bytes_updated; if ((from + num <= from) || (from + num > IO_BITMAP_BITS)) -- cgit v1.2.3 From c2d1cec1c77f7714672c1efeae075424c929e0d5 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Sun, 4 Jan 2009 05:18:03 -0800 Subject: x86: cleanup remaining cpumask_t ops in smpboot code Impact: use new cpumask API to reduce memory and stack usage Allocate the following local cpumasks based on the number of cpus that are present. References will use new cpumask API. (Currently only modified for x86_64, x86_32 continues to use the *_map variants.) cpu_callin_mask cpu_callout_mask cpu_initialized_mask cpu_sibling_setup_mask Provide the following accessor functions: struct cpumask *cpu_sibling_mask(int cpu) struct cpumask *cpu_core_mask(int cpu) Other changes are when setting or clearing the cpu online, possible or present maps, use the accessor functions. Signed-off-by: Mike Travis Acked-by: Rusty Russell Signed-off-by: Ingo Molnar --- arch/x86/include/asm/smp.h | 32 ++++++++++- arch/x86/kernel/cpu/common.c | 26 +++++++-- arch/x86/kernel/setup_percpu.c | 25 +++++++- arch/x86/kernel/smp.c | 17 ++++-- arch/x86/kernel/smpboot.c | 128 +++++++++++++++++++++-------------------- 5 files changed, 152 insertions(+), 76 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 830b9fcb6427..19953df61c52 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -18,9 +18,26 @@ #include #include +#ifdef CONFIG_X86_64 + +extern cpumask_var_t cpu_callin_mask; +extern cpumask_var_t cpu_callout_mask; +extern cpumask_var_t cpu_initialized_mask; +extern cpumask_var_t cpu_sibling_setup_mask; + +#else /* CONFIG_X86_32 */ + +extern cpumask_t cpu_callin_map; extern cpumask_t cpu_callout_map; extern cpumask_t cpu_initialized; -extern cpumask_t cpu_callin_map; +extern cpumask_t cpu_sibling_setup_map; + +#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map) +#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map) +#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized) +#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map) + +#endif /* CONFIG_X86_32 */ extern void (*mtrr_hook)(void); extern void zap_low_mappings(void); @@ -29,7 +46,6 @@ extern int __cpuinit get_local_pda(int cpu); extern int smp_num_siblings; extern unsigned int num_processors; -extern cpumask_t cpu_initialized; DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_t, cpu_core_map); @@ -38,6 +54,16 @@ DECLARE_PER_CPU(u16, cpu_llc_id); DECLARE_PER_CPU(int, cpu_number); #endif +static inline struct cpumask *cpu_sibling_mask(int cpu) +{ + return &per_cpu(cpu_sibling_map, cpu); +} + +static inline struct cpumask *cpu_core_mask(int cpu) +{ + return &per_cpu(cpu_core_map, cpu); +} + DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); @@ -149,7 +175,7 @@ void smp_store_cpu_info(int id); /* We don't mark CPUs online until __cpu_up(), so we need another measure */ static inline int num_booting_cpus(void) { - return cpus_weight(cpu_callout_map); + return cpumask_weight(cpu_callout_mask); } #else static inline void prefill_possible_map(void) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 3f95a40f718a..83492b1f93b1 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -40,6 +40,26 @@ #include "cpu.h" +#ifdef CONFIG_X86_64 + +/* all of these masks are initialized in setup_cpu_local_masks() */ +cpumask_var_t cpu_callin_mask; +cpumask_var_t cpu_callout_mask; +cpumask_var_t cpu_initialized_mask; + +/* representing cpus for which sibling maps can be computed */ +cpumask_var_t cpu_sibling_setup_mask; + +#else /* CONFIG_X86_32 */ + +cpumask_t cpu_callin_map; +cpumask_t cpu_callout_map; +cpumask_t cpu_initialized; +cpumask_t cpu_sibling_setup_map; + +#endif /* CONFIG_X86_32 */ + + static struct cpu_dev *this_cpu __cpuinitdata; #ifdef CONFIG_X86_64 @@ -856,8 +876,6 @@ static __init int setup_disablecpuid(char *arg) } __setup("clearcpuid=", setup_disablecpuid); -cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; - #ifdef CONFIG_X86_64 struct x8664_pda **_cpu_pda __read_mostly; EXPORT_SYMBOL(_cpu_pda); @@ -976,7 +994,7 @@ void __cpuinit cpu_init(void) me = current; - if (cpu_test_and_set(cpu, cpu_initialized)) + if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) panic("CPU#%d already initialized!\n", cpu); printk(KERN_INFO "Initializing CPU#%d\n", cpu); @@ -1085,7 +1103,7 @@ void __cpuinit cpu_init(void) struct tss_struct *t = &per_cpu(init_tss, cpu); struct thread_struct *thread = &curr->thread; - if (cpu_test_and_set(cpu, cpu_initialized)) { + if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); for (;;) local_irq_enable(); } diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index a4b619c33106..aa55764602b1 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -131,7 +131,27 @@ static void __init setup_cpu_pda_map(void) /* point to new pointer table */ _cpu_pda = new_cpu_pda; } -#endif + +#endif /* CONFIG_SMP && CONFIG_X86_64 */ + +#ifdef CONFIG_X86_64 + +/* correctly size the local cpu masks */ +static void setup_cpu_local_masks(void) +{ + alloc_bootmem_cpumask_var(&cpu_initialized_mask); + alloc_bootmem_cpumask_var(&cpu_callin_mask); + alloc_bootmem_cpumask_var(&cpu_callout_mask); + alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); +} + +#else /* CONFIG_X86_32 */ + +static inline void setup_cpu_local_masks(void) +{ +} + +#endif /* CONFIG_X86_32 */ /* * Great future plan: @@ -187,6 +207,9 @@ void __init setup_per_cpu_areas(void) /* Setup node to cpumask map */ setup_node_to_cpumask_map(); + + /* Setup cpu initialized, callin, callout masks */ + setup_cpu_local_masks(); } #endif diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index beea2649a240..182135ba1eaf 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -128,16 +128,23 @@ void native_send_call_func_single_ipi(int cpu) void native_send_call_func_ipi(const struct cpumask *mask) { - cpumask_t allbutself; + cpumask_var_t allbutself; - allbutself = cpu_online_map; - cpu_clear(smp_processor_id(), allbutself); + if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) { + send_IPI_mask(mask, CALL_FUNCTION_VECTOR); + return; + } - if (cpus_equal(*mask, allbutself) && - cpus_equal(cpu_online_map, cpu_callout_map)) + cpumask_copy(allbutself, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), allbutself); + + if (cpumask_equal(mask, allbutself) && + cpumask_equal(cpu_online_mask, cpu_callout_mask)) send_IPI_allbutself(CALL_FUNCTION_VECTOR); else send_IPI_mask(mask, CALL_FUNCTION_VECTOR); + + free_cpumask_var(allbutself); } /* diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 6bd4d9b73870..00e17e589482 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -102,9 +102,6 @@ EXPORT_SYMBOL(smp_num_siblings); /* Last level cache ID of each logical CPU */ DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; -cpumask_t cpu_callin_map; -cpumask_t cpu_callout_map; - /* representing HT siblings of each logical CPU */ DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); @@ -120,9 +117,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info); static atomic_t init_deasserted; -/* representing cpus for which sibling maps can be computed */ -static cpumask_t cpu_sibling_setup_map; - /* Set if we find a B stepping CPU */ static int __cpuinitdata smp_b_stepping; @@ -140,7 +134,7 @@ EXPORT_SYMBOL(cpu_to_node_map); static void map_cpu_to_node(int cpu, int node) { printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); - cpu_set(cpu, node_to_cpumask_map[node]); + cpumask_set_cpu(cpu, &node_to_cpumask_map[node]); cpu_to_node_map[cpu] = node; } @@ -151,7 +145,7 @@ static void unmap_cpu_to_node(int cpu) printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); for (node = 0; node < MAX_NUMNODES; node++) - cpu_clear(cpu, node_to_cpumask_map[node]); + cpumask_clear_cpu(cpu, &node_to_cpumask_map[node]); cpu_to_node_map[cpu] = 0; } #else /* !(CONFIG_NUMA && CONFIG_X86_32) */ @@ -209,7 +203,7 @@ static void __cpuinit smp_callin(void) */ phys_id = read_apic_id(); cpuid = smp_processor_id(); - if (cpu_isset(cpuid, cpu_callin_map)) { + if (cpumask_test_cpu(cpuid, cpu_callin_mask)) { panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, phys_id, cpuid); } @@ -231,7 +225,7 @@ static void __cpuinit smp_callin(void) /* * Has the boot CPU finished it's STARTUP sequence? */ - if (cpu_isset(cpuid, cpu_callout_map)) + if (cpumask_test_cpu(cpuid, cpu_callout_mask)) break; cpu_relax(); } @@ -274,7 +268,7 @@ static void __cpuinit smp_callin(void) /* * Allow the master to continue. */ - cpu_set(cpuid, cpu_callin_map); + cpumask_set_cpu(cpuid, cpu_callin_mask); } static int __cpuinitdata unsafe_smp; @@ -332,7 +326,7 @@ notrace static void __cpuinit start_secondary(void *unused) ipi_call_lock(); lock_vector_lock(); __setup_vector_irq(smp_processor_id()); - cpu_set(smp_processor_id(), cpu_online_map); + set_cpu_online(smp_processor_id(), true); unlock_vector_lock(); ipi_call_unlock(); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; @@ -438,50 +432,52 @@ void __cpuinit set_cpu_sibling_map(int cpu) int i; struct cpuinfo_x86 *c = &cpu_data(cpu); - cpu_set(cpu, cpu_sibling_setup_map); + cpumask_set_cpu(cpu, cpu_sibling_setup_mask); if (smp_num_siblings > 1) { - for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { - if (c->phys_proc_id == cpu_data(i).phys_proc_id && - c->cpu_core_id == cpu_data(i).cpu_core_id) { - cpu_set(i, per_cpu(cpu_sibling_map, cpu)); - cpu_set(cpu, per_cpu(cpu_sibling_map, i)); - cpu_set(i, per_cpu(cpu_core_map, cpu)); - cpu_set(cpu, per_cpu(cpu_core_map, i)); - cpu_set(i, c->llc_shared_map); - cpu_set(cpu, cpu_data(i).llc_shared_map); + for_each_cpu(i, cpu_sibling_setup_mask) { + struct cpuinfo_x86 *o = &cpu_data(i); + + if (c->phys_proc_id == o->phys_proc_id && + c->cpu_core_id == o->cpu_core_id) { + cpumask_set_cpu(i, cpu_sibling_mask(cpu)); + cpumask_set_cpu(cpu, cpu_sibling_mask(i)); + cpumask_set_cpu(i, cpu_core_mask(cpu)); + cpumask_set_cpu(cpu, cpu_core_mask(i)); + cpumask_set_cpu(i, &c->llc_shared_map); + cpumask_set_cpu(cpu, &o->llc_shared_map); } } } else { - cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); + cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); } - cpu_set(cpu, c->llc_shared_map); + cpumask_set_cpu(cpu, &c->llc_shared_map); if (current_cpu_data.x86_max_cores == 1) { - per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); + cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); c->booted_cores = 1; return; } - for_each_cpu_mask_nr(i, cpu_sibling_setup_map) { + for_each_cpu(i, cpu_sibling_setup_mask) { if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { - cpu_set(i, c->llc_shared_map); - cpu_set(cpu, cpu_data(i).llc_shared_map); + cpumask_set_cpu(i, &c->llc_shared_map); + cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map); } if (c->phys_proc_id == cpu_data(i).phys_proc_id) { - cpu_set(i, per_cpu(cpu_core_map, cpu)); - cpu_set(cpu, per_cpu(cpu_core_map, i)); + cpumask_set_cpu(i, cpu_core_mask(cpu)); + cpumask_set_cpu(cpu, cpu_core_mask(i)); /* * Does this new cpu bringup a new core? */ - if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { + if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) { /* * for each core in package, increment * the booted_cores for this new cpu */ - if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) + if (cpumask_first(cpu_sibling_mask(i)) == i) c->booted_cores++; /* * increment the core count for all @@ -504,7 +500,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu) * And for power savings, we return cpu_core_map */ if (sched_mc_power_savings || sched_smt_power_savings) - return &per_cpu(cpu_core_map, cpu); + return cpu_core_mask(cpu); else return &c->llc_shared_map; } @@ -523,7 +519,7 @@ static void impress_friends(void) */ pr_debug("Before bogomips.\n"); for_each_possible_cpu(cpu) - if (cpu_isset(cpu, cpu_callout_map)) + if (cpumask_test_cpu(cpu, cpu_callout_mask)) bogosum += cpu_data(cpu).loops_per_jiffy; printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", @@ -904,19 +900,19 @@ do_rest: * allow APs to start initializing. */ pr_debug("Before Callout %d.\n", cpu); - cpu_set(cpu, cpu_callout_map); + cpumask_set_cpu(cpu, cpu_callout_mask); pr_debug("After Callout %d.\n", cpu); /* * Wait 5s total for a response */ for (timeout = 0; timeout < 50000; timeout++) { - if (cpu_isset(cpu, cpu_callin_map)) + if (cpumask_test_cpu(cpu, cpu_callin_mask)) break; /* It has booted */ udelay(100); } - if (cpu_isset(cpu, cpu_callin_map)) { + if (cpumask_test_cpu(cpu, cpu_callin_mask)) { /* number CPUs logically, starting from 1 (BSP is 0) */ pr_debug("OK.\n"); printk(KERN_INFO "CPU%d: ", cpu); @@ -941,9 +937,14 @@ restore_state: if (boot_error) { /* Try to put things back the way they were before ... */ numa_remove_cpu(cpu); /* was set by numa_add_cpu */ - cpu_clear(cpu, cpu_callout_map); /* was set by do_boot_cpu() */ - cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ - cpu_clear(cpu, cpu_present_map); + + /* was set by do_boot_cpu() */ + cpumask_clear_cpu(cpu, cpu_callout_mask); + + /* was set by cpu_init() */ + cpumask_clear_cpu(cpu, cpu_initialized_mask); + + set_cpu_present(cpu, false); per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; } @@ -977,7 +978,7 @@ int __cpuinit native_cpu_up(unsigned int cpu) /* * Already booted CPU? */ - if (cpu_isset(cpu, cpu_callin_map)) { + if (cpumask_test_cpu(cpu, cpu_callin_mask)) { pr_debug("do_boot_cpu %d Already started\n", cpu); return -ENOSYS; } @@ -1032,8 +1033,9 @@ int __cpuinit native_cpu_up(unsigned int cpu) */ static __init void disable_smp(void) { - cpu_present_map = cpumask_of_cpu(0); - cpu_possible_map = cpumask_of_cpu(0); + /* use the read/write pointers to the present and possible maps */ + cpumask_copy(&cpu_present_map, cpumask_of(0)); + cpumask_copy(&cpu_possible_map, cpumask_of(0)); smpboot_clear_io_apic_irqs(); if (smp_found_config) @@ -1041,8 +1043,8 @@ static __init void disable_smp(void) else physid_set_mask_of_physid(0, &phys_cpu_present_map); map_cpu_to_logical_apicid(); - cpu_set(0, per_cpu(cpu_sibling_map, 0)); - cpu_set(0, per_cpu(cpu_core_map, 0)); + cpumask_set_cpu(0, cpu_sibling_mask(0)); + cpumask_set_cpu(0, cpu_core_mask(0)); } /* @@ -1064,14 +1066,14 @@ static int __init smp_sanity_check(unsigned max_cpus) nr = 0; for_each_present_cpu(cpu) { if (nr >= 8) - cpu_clear(cpu, cpu_present_map); + set_cpu_present(cpu, false); nr++; } nr = 0; for_each_possible_cpu(cpu) { if (nr >= 8) - cpu_clear(cpu, cpu_possible_map); + set_cpu_possible(cpu, false); nr++; } @@ -1167,7 +1169,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) preempt_disable(); smp_cpu_index_default(); current_cpu_data = boot_cpu_data; - cpu_callin_map = cpumask_of_cpu(0); + cpumask_copy(cpu_callin_mask, cpumask_of(0)); mb(); /* * Setup boot CPU information @@ -1242,8 +1244,8 @@ void __init native_smp_prepare_boot_cpu(void) init_gdt(me); #endif switch_to_new_gdt(); - /* already set me in cpu_online_map in boot_cpu_init() */ - cpu_set(me, cpu_callout_map); + /* already set me in cpu_online_mask in boot_cpu_init() */ + cpumask_set_cpu(me, cpu_callout_mask); per_cpu(cpu_state, me) = CPU_ONLINE; } @@ -1311,7 +1313,7 @@ __init void prefill_possible_map(void) possible, max_t(int, possible - num_processors, 0)); for (i = 0; i < possible; i++) - cpu_set(i, cpu_possible_map); + set_cpu_possible(i, true); nr_cpu_ids = possible; } @@ -1323,31 +1325,31 @@ static void remove_siblinginfo(int cpu) int sibling; struct cpuinfo_x86 *c = &cpu_data(cpu); - for_each_cpu_mask_nr(sibling, per_cpu(cpu_core_map, cpu)) { - cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); + for_each_cpu(sibling, cpu_core_mask(cpu)) { + cpumask_clear_cpu(cpu, cpu_core_mask(sibling)); /*/ * last thread sibling in this cpu core going down */ - if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) + if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) cpu_data(sibling).booted_cores--; } - for_each_cpu_mask_nr(sibling, per_cpu(cpu_sibling_map, cpu)) - cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); - cpus_clear(per_cpu(cpu_sibling_map, cpu)); - cpus_clear(per_cpu(cpu_core_map, cpu)); + for_each_cpu(sibling, cpu_sibling_mask(cpu)) + cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling)); + cpumask_clear(cpu_sibling_mask(cpu)); + cpumask_clear(cpu_core_mask(cpu)); c->phys_proc_id = 0; c->cpu_core_id = 0; - cpu_clear(cpu, cpu_sibling_setup_map); + cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); } static void __ref remove_cpu_from_maps(int cpu) { - cpu_clear(cpu, cpu_online_map); - cpu_clear(cpu, cpu_callout_map); - cpu_clear(cpu, cpu_callin_map); + set_cpu_online(cpu, false); + cpumask_clear_cpu(cpu, cpu_callout_mask); + cpumask_clear_cpu(cpu, cpu_callin_mask); /* was set by cpu_init() */ - cpu_clear(cpu, cpu_initialized); + cpumask_clear_cpu(cpu, cpu_initialized_mask); numa_remove_cpu(cpu); } -- cgit v1.2.3 From 72ade5f9ca7b384a180ae6b42987f627acd9fe95 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 4 Jan 2009 16:32:36 +0530 Subject: x86: irq_32.c fix style problems Impact: cleanup Fix: WARNING: Use #include instead of ERROR: "(foo*)" should be "(foo *)" ERROR: space required after that ',' (ctx:VxV) total: 5 errors, 1 warnings Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/irq_32.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 9dc5588f336a..74b9ff7341e9 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -15,9 +15,9 @@ #include #include #include +#include #include -#include DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); EXPORT_PER_CPU_SYMBOL(irq_stat); @@ -93,7 +93,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) return 0; /* build the stack frame on the IRQ stack */ - isp = (u32 *) ((char*)irqctx + sizeof(*irqctx)); + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); irqctx->tinfo.task = curctx->tinfo.task; irqctx->tinfo.previous_esp = current_stack_pointer; @@ -137,7 +137,7 @@ void __cpuinit irq_ctx_init(int cpu) hardirq_ctx[cpu] = irqctx; - irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE]; + irqctx = (union irq_ctx *) &softirq_stack[cpu*THREAD_SIZE]; irqctx->tinfo.task = NULL; irqctx->tinfo.exec_domain = NULL; irqctx->tinfo.cpu = cpu; @@ -147,7 +147,7 @@ void __cpuinit irq_ctx_init(int cpu) softirq_ctx[cpu] = irqctx; printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", - cpu,hardirq_ctx[cpu],softirq_ctx[cpu]); + cpu, hardirq_ctx[cpu], softirq_ctx[cpu]); } void irq_ctx_exit(int cpu) @@ -174,7 +174,7 @@ asmlinkage void do_softirq(void) irqctx->tinfo.previous_esp = current_stack_pointer; /* build the stack frame on the softirq stack */ - isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); call_on_stack(__do_softirq, isp); /* -- cgit v1.2.3 From 7bafaf306769348723471e133adaa57238770492 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 4 Jan 2009 16:33:52 +0530 Subject: x86: i8259.c fix style problems Impact: cleanup Fix: WARNING: Use #include instead of WARNING: Use #include instead of WARNING: Use #include instead of ERROR: code indent should use tabs where possible total: 1 errors, 3 warnings Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/i8259.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 4b8a53d841f7..11d5093eb281 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -11,15 +11,15 @@ #include #include #include +#include +#include +#include -#include #include #include -#include #include #include #include -#include #include #include #include @@ -323,7 +323,7 @@ void init_8259A(int auto_eoi) outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 on x86-64, - to 0x20-0x27 on i386 */ + to 0x20-0x27 on i386 */ outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */ -- cgit v1.2.3 From aa09e6cdae4c60c3070176498abf99b81e1b40fe Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 4 Jan 2009 16:35:17 +0530 Subject: x86: irqinit_32.c fix style problems Impact: cleanup Fix: WARNING: Use #include instead of WARNING: Use #include instead of ERROR: trailing whitespace WARNING: externs should be avoided in .c files ERROR: space required after that ',' (ctx:VxV) WARNING: space prohibited between function name and open parenthesis '(' total: 2 errors, 4 warnings Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/irqinit_32.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index 84723295f88a..1507ad4e674d 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c @@ -9,18 +9,18 @@ #include #include #include +#include +#include #include #include -#include #include #include -#include #include #include #include #include - +#include /* @@ -34,12 +34,10 @@ * leads to races. IBM designers who came up with it should * be shot. */ - static irqreturn_t math_error_irq(int cpl, void *dev_id) { - extern void math_error(void __user *); - outb(0,0xF0); + outb(0, 0xF0); if (ignore_fpu_irq || !boot_cpu_data.hard_math) return IRQ_NONE; math_error((void __user *)get_irq_regs()->ip); @@ -56,7 +54,7 @@ static struct irqaction fpu_irq = { .name = "fpu", }; -void __init init_ISA_irqs (void) +void __init init_ISA_irqs(void) { int i; -- cgit v1.2.3 From f50cec36403674ca39663f44b6a491daa1731920 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 4 Jan 2009 16:36:25 +0530 Subject: x86: irqinit_64.c fix style problems Impact: cleanup, fix style problems Fix: WARNING: Use #include instead of WARNING: Use #include instead of WARNING: Use #include instead of total: 0 errors, 3 warnings Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/irqinit_64.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index 31ebfe38e96c..cb875a63867d 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c @@ -11,14 +11,14 @@ #include #include #include +#include +#include +#include -#include #include #include -#include #include #include -#include #include #include #include -- cgit v1.2.3 From 8a87dd9a20ca895527b039e64e54adcbc64b256a Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 4 Jan 2009 17:04:26 +0530 Subject: x86: setup_percpu.c fix style problems Impact: cleanup Fix: WARNING: Use #include instead of WARNING: Use #include instead of WARNING: Use #include instead of WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable ERROR: spaces required around that '?' (ctx:VxW) ERROR: spaces required around that ':' (ctx:VxV) total: 2 errors, 4 warnings Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup_percpu.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 0b63b08e7530..c73d9a815b6d 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -5,12 +5,11 @@ #include #include #include -#include -#include +#include +#include #include #include #include -#include #include #include #include @@ -20,8 +19,8 @@ unsigned int num_processors; unsigned disabled_cpus __cpuinitdata; /* Processor that is doing the boot up */ unsigned int boot_cpu_physical_apicid = -1U; -unsigned int max_physical_apicid; EXPORT_SYMBOL(boot_cpu_physical_apicid); +unsigned int max_physical_apicid; /* Bitmask of physically existing CPUs */ physid_mask_t phys_cpu_present_map; @@ -289,8 +288,8 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable) cpulist_scnprintf(buf, sizeof(buf), mask); printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", - enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf); - } + enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf); +} void __cpuinit numa_add_cpu(int cpu) { -- cgit v1.2.3 From dd399dcb4830cd0e9f51d014ec098421171876d4 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Tue, 30 Dec 2008 21:10:13 +0530 Subject: x86: irqinit_64.c init_ISA_irqs should be static Signed-off-by: Jaswinder Singh Signed-off-by: Ingo Molnar --- arch/x86/kernel/irqinit_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index cb875a63867d..da481a1e3f30 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c @@ -81,7 +81,7 @@ int vector_used_by_percpu_irq(unsigned int vector) return 0; } -void __init init_ISA_irqs(void) +static void __init init_ISA_irqs(void) { int i; -- cgit v1.2.3 From 5df82c7d18f2dd51aeb763f2a3c1d7714a28218c Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 4 Jan 2009 21:54:39 +0530 Subject: x86: rename all fields of mpc_iopic mpc_X to X Impact: cleanup, solve 80 columns wrap problems It would be cleaner to rename all the mpc->mpc_X fields to mpc->X - that alone would give 4 characters per usage site. (we already know that it's an 'mpc' entity - no need to duplicate that in the field too) Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mpspec_def.h | 10 +++++----- arch/x86/kernel/mpparse.c | 26 +++++++++++++------------- 2 files changed, 18 insertions(+), 18 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h index ba8ed0cf658d..f805682d80dc 100644 --- a/arch/x86/include/asm/mpspec_def.h +++ b/arch/x86/include/asm/mpspec_def.h @@ -109,11 +109,11 @@ struct mpc_bus { #define MPC_APIC_USABLE 0x01 struct mpc_ioapic { - unsigned char mpc_type; - unsigned char mpc_apicid; - unsigned char mpc_apicver; - unsigned char mpc_flags; - unsigned int mpc_apicaddr; + unsigned char type; + unsigned char apicid; + unsigned char apicver; + unsigned char flags; + unsigned int apicaddr; }; struct mpc_intsrc { diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 1ccb2537e3fd..f36d9daaf12c 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -135,20 +135,20 @@ static int bad_ioapic(unsigned long address) static void __init MP_ioapic_info(struct mpc_ioapic *m) { - if (!(m->mpc_flags & MPC_APIC_USABLE)) + if (!(m->flags & MPC_APIC_USABLE)) return; printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n", - m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr); + m->apicid, m->apicver, m->apicaddr); - if (bad_ioapic(m->mpc_apicaddr)) + if (bad_ioapic(m->apicaddr)) return; - mp_ioapics[nr_ioapics].mp_apicaddr = m->mpc_apicaddr; - mp_ioapics[nr_ioapics].mp_apicid = m->mpc_apicid; - mp_ioapics[nr_ioapics].mp_type = m->mpc_type; - mp_ioapics[nr_ioapics].mp_apicver = m->mpc_apicver; - mp_ioapics[nr_ioapics].mp_flags = m->mpc_flags; + mp_ioapics[nr_ioapics].mp_apicaddr = m->apicaddr; + mp_ioapics[nr_ioapics].mp_apicid = m->apicid; + mp_ioapics[nr_ioapics].mp_type = m->type; + mp_ioapics[nr_ioapics].mp_apicver = m->apicver; + mp_ioapics[nr_ioapics].mp_flags = m->flags; nr_ioapics++; } @@ -513,11 +513,11 @@ static void __init construct_ioapic_table(int mpc_default_type) MP_bus_info(&bus); } - ioapic.mpc_type = MP_IOAPIC; - ioapic.mpc_apicid = 2; - ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; - ioapic.mpc_flags = MPC_APIC_USABLE; - ioapic.mpc_apicaddr = 0xFEC00000; + ioapic.type = MP_IOAPIC; + ioapic.apicid = 2; + ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01; + ioapic.flags = MPC_APIC_USABLE; + ioapic.apicaddr = 0xFEC00000; MP_ioapic_info(&ioapic); /* -- cgit v1.2.3 From b5ced7cdb018b8728ddcfb175c26f30cc185cf66 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 4 Jan 2009 21:55:53 +0530 Subject: x86: rename all fields of mpc_lintsrc mpc_X to X Impact: cleanup, solve 80 columns wrap problems It would be cleaner to rename all the mpc->mpc_X fields to mpc->X - that alone would give 4 characters per usage site. (we already know that it's an 'mpc' entity - no need to duplicate that in the field too) Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mpspec_def.h | 14 +++++++------- arch/x86/kernel/mpparse.c | 19 +++++++++---------- 2 files changed, 16 insertions(+), 17 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h index f805682d80dc..c8bbb3212bd7 100644 --- a/arch/x86/include/asm/mpspec_def.h +++ b/arch/x86/include/asm/mpspec_def.h @@ -140,13 +140,13 @@ enum mp_irq_source_types { #define MP_APIC_ALL 0xFF struct mpc_lintsrc { - unsigned char mpc_type; - unsigned char mpc_irqtype; - unsigned short mpc_irqflag; - unsigned char mpc_srcbusid; - unsigned char mpc_srcbusirq; - unsigned char mpc_destapic; - unsigned char mpc_destapiclint; + unsigned char type; + unsigned char irqtype; + unsigned short irqflag; + unsigned char srcbusid; + unsigned char srcbusirq; + unsigned char destapic; + unsigned char destapiclint; }; #define MPC_OEM_SIGNATURE "_OEM" diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index f36d9daaf12c..a5c728be41cd 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -237,9 +237,8 @@ static void __init MP_lintsrc_info(struct mpc_lintsrc *m) { apic_printk(APIC_VERBOSE, "Lint: type %d, pol %d, trig %d, bus %02x," " IRQ %02x, APIC ID %x, APIC LINT %02x\n", - m->mpc_irqtype, m->mpc_irqflag & 3, - (m->mpc_irqflag >> 2) & 3, m->mpc_srcbusid, - m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint); + m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbusid, + m->srcbusirq, m->destapic, m->destapiclint); } /* @@ -560,14 +559,14 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) construct_ioapic_table(mpc_default_type); - lintsrc.mpc_type = MP_LINTSRC; - lintsrc.mpc_irqflag = 0; /* conforming */ - lintsrc.mpc_srcbusid = 0; - lintsrc.mpc_srcbusirq = 0; - lintsrc.mpc_destapic = MP_APIC_ALL; + lintsrc.type = MP_LINTSRC; + lintsrc.irqflag = 0; /* conforming */ + lintsrc.srcbusid = 0; + lintsrc.srcbusirq = 0; + lintsrc.destapic = MP_APIC_ALL; for (i = 0; i < 2; i++) { - lintsrc.mpc_irqtype = linttypes[i]; - lintsrc.mpc_destapiclint = i; + lintsrc.irqtype = linttypes[i]; + lintsrc.destapiclint = i; MP_lintsrc_info(&lintsrc); } } -- cgit v1.2.3 From e253b396b145311477b23c176e868e5af6f79174 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 4 Jan 2009 21:56:44 +0530 Subject: x86: rename all fields of mpc_intsrc mpc_X to X Impact: cleanup, solve 80 columns wrap problems It would be cleaner to rename all the mpc->mpc_X fields to mpc->X - that alone would give 4 characters per usage site. (we already know that it's an 'mpc' entity - no need to duplicate that in the field too) Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mpspec_def.h | 14 +++---- arch/x86/kernel/mpparse.c | 79 +++++++++++++++++++-------------------- 2 files changed, 46 insertions(+), 47 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h index c8bbb3212bd7..e24e1bc9c15e 100644 --- a/arch/x86/include/asm/mpspec_def.h +++ b/arch/x86/include/asm/mpspec_def.h @@ -117,13 +117,13 @@ struct mpc_ioapic { }; struct mpc_intsrc { - unsigned char mpc_type; - unsigned char mpc_irqtype; - unsigned short mpc_irqflag; - unsigned char mpc_srcbus; - unsigned char mpc_srcbusirq; - unsigned char mpc_dstapic; - unsigned char mpc_dstirq; + unsigned char type; + unsigned char irqtype; + unsigned short irqflag; + unsigned char srcbus; + unsigned char srcbusirq; + unsigned char dstapic; + unsigned char dstirq; }; enum mp_irq_source_types { diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index a5c728be41cd..a804f107db8a 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -156,9 +156,8 @@ static void print_MP_intsrc_info(struct mpc_intsrc *m) { apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," " IRQ %02x, APIC ID %x, APIC INT %02x\n", - m->mpc_irqtype, m->mpc_irqflag & 3, - (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, - m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq); + m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus, + m->srcbusirq, m->dstapic, m->dstirq); } static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq) @@ -173,43 +172,43 @@ static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq) static void __init assign_to_mp_irq(struct mpc_intsrc *m, struct mp_config_intsrc *mp_irq) { - mp_irq->mp_dstapic = m->mpc_dstapic; - mp_irq->mp_type = m->mpc_type; - mp_irq->mp_irqtype = m->mpc_irqtype; - mp_irq->mp_irqflag = m->mpc_irqflag; - mp_irq->mp_srcbus = m->mpc_srcbus; - mp_irq->mp_srcbusirq = m->mpc_srcbusirq; - mp_irq->mp_dstirq = m->mpc_dstirq; + mp_irq->mp_dstapic = m->dstapic; + mp_irq->mp_type = m->type; + mp_irq->mp_irqtype = m->irqtype; + mp_irq->mp_irqflag = m->irqflag; + mp_irq->mp_srcbus = m->srcbus; + mp_irq->mp_srcbusirq = m->srcbusirq; + mp_irq->mp_dstirq = m->dstirq; } static void __init assign_to_mpc_intsrc(struct mp_config_intsrc *mp_irq, struct mpc_intsrc *m) { - m->mpc_dstapic = mp_irq->mp_dstapic; - m->mpc_type = mp_irq->mp_type; - m->mpc_irqtype = mp_irq->mp_irqtype; - m->mpc_irqflag = mp_irq->mp_irqflag; - m->mpc_srcbus = mp_irq->mp_srcbus; - m->mpc_srcbusirq = mp_irq->mp_srcbusirq; - m->mpc_dstirq = mp_irq->mp_dstirq; + m->dstapic = mp_irq->mp_dstapic; + m->type = mp_irq->mp_type; + m->irqtype = mp_irq->mp_irqtype; + m->irqflag = mp_irq->mp_irqflag; + m->srcbus = mp_irq->mp_srcbus; + m->srcbusirq = mp_irq->mp_srcbusirq; + m->dstirq = mp_irq->mp_dstirq; } static int __init mp_irq_mpc_intsrc_cmp(struct mp_config_intsrc *mp_irq, struct mpc_intsrc *m) { - if (mp_irq->mp_dstapic != m->mpc_dstapic) + if (mp_irq->mp_dstapic != m->dstapic) return 1; - if (mp_irq->mp_type != m->mpc_type) + if (mp_irq->mp_type != m->type) return 2; - if (mp_irq->mp_irqtype != m->mpc_irqtype) + if (mp_irq->mp_irqtype != m->irqtype) return 3; - if (mp_irq->mp_irqflag != m->mpc_irqflag) + if (mp_irq->mp_irqflag != m->irqflag) return 4; - if (mp_irq->mp_srcbus != m->mpc_srcbus) + if (mp_irq->mp_srcbus != m->srcbus) return 5; - if (mp_irq->mp_srcbusirq != m->mpc_srcbusirq) + if (mp_irq->mp_srcbusirq != m->srcbusirq) return 6; - if (mp_irq->mp_dstirq != m->mpc_dstirq) + if (mp_irq->mp_dstirq != m->dstirq) return 7; return 0; @@ -415,12 +414,12 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type) int i; int ELCR_fallback = 0; - intsrc.mpc_type = MP_INTSRC; - intsrc.mpc_irqflag = 0; /* conforming */ - intsrc.mpc_srcbus = 0; - intsrc.mpc_dstapic = mp_ioapics[0].mp_apicid; + intsrc.type = MP_INTSRC; + intsrc.irqflag = 0; /* conforming */ + intsrc.srcbus = 0; + intsrc.dstapic = mp_ioapics[0].mp_apicid; - intsrc.mpc_irqtype = mp_INT; + intsrc.irqtype = mp_INT; /* * If true, we have an ISA/PCI system with no IRQ entries @@ -463,19 +462,19 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type) * irqflag field (level sensitive, active high polarity). */ if (ELCR_trigger(i)) - intsrc.mpc_irqflag = 13; + intsrc.irqflag = 13; else - intsrc.mpc_irqflag = 0; + intsrc.irqflag = 0; } - intsrc.mpc_srcbusirq = i; - intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ + intsrc.srcbusirq = i; + intsrc.dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ MP_intsrc_info(&intsrc); } - intsrc.mpc_irqtype = mp_ExtINT; - intsrc.mpc_srcbusirq = 0; - intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */ + intsrc.irqtype = mp_ExtINT; + intsrc.srcbusirq = 0; + intsrc.dstirq = 0; /* 8259A to INTIN0 */ MP_intsrc_info(&intsrc); } @@ -801,10 +800,10 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m) { int i; - if (m->mpc_irqtype != mp_INT) + if (m->irqtype != mp_INT) return 0; - if (m->mpc_irqflag != 0x0f) + if (m->irqflag != 0x0f) return 0; /* not legacy */ @@ -816,9 +815,9 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m) if (mp_irqs[i].mp_irqflag != 0x0f) continue; - if (mp_irqs[i].mp_srcbus != m->mpc_srcbus) + if (mp_irqs[i].mp_srcbus != m->srcbus) continue; - if (mp_irqs[i].mp_srcbusirq != m->mpc_srcbusirq) + if (mp_irqs[i].mp_srcbusirq != m->srcbusirq) continue; if (irq_used[i]) { /* already claimed */ -- cgit v1.2.3 From c4563826b72b0d12500260093179a660e79bf412 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 4 Jan 2009 21:58:25 +0530 Subject: x86: rename all fields of mpc_cpu mpc_X to X Impact: cleanup, solve 80 columns wrap problems It would be cleaner to rename all the mpc->mpc_X fields to mpc->X - that alone would give 4 characters per usage site. (we already know that it's an 'mpc' entity - no need to duplicate that in the field too) Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mpspec_def.h | 14 +++++++------- arch/x86/kernel/mpparse.c | 28 ++++++++++++++-------------- arch/x86/kernel/numaq_32.c | 9 ++++----- arch/x86/kernel/visws_quirks.c | 28 +++++++++++++--------------- 4 files changed, 38 insertions(+), 41 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h index e24e1bc9c15e..78057aaba259 100644 --- a/arch/x86/include/asm/mpspec_def.h +++ b/arch/x86/include/asm/mpspec_def.h @@ -71,13 +71,13 @@ struct mpc_table { #define CPU_FAMILY_MASK 0x0F00 struct mpc_cpu { - unsigned char mpc_type; - unsigned char mpc_apicid; /* Local APIC number */ - unsigned char mpc_apicver; /* Its versions */ - unsigned char mpc_cpuflag; - unsigned int mpc_cpufeature; - unsigned int mpc_featureflag; /* CPUID feature value */ - unsigned int mpc_reserved[2]; + unsigned char type; + unsigned char apicid; /* Local APIC number */ + unsigned char apicver; /* Its versions */ + unsigned char cpuflag; + unsigned int cpufeature; + unsigned int featureflag; /* CPUID feature value */ + unsigned int reserved[2]; }; struct mpc_bus { diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index a804f107db8a..a8b9ba9709fa 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -54,7 +54,7 @@ static void __init MP_processor_info(struct mpc_cpu *m) int apicid; char *bootup_cpu = ""; - if (!(m->mpc_cpuflag & CPU_ENABLED)) { + if (!(m->cpuflag & CPU_ENABLED)) { disabled_cpus++; return; } @@ -62,15 +62,15 @@ static void __init MP_processor_info(struct mpc_cpu *m) if (x86_quirks->mpc_apic_id) apicid = x86_quirks->mpc_apic_id(m); else - apicid = m->mpc_apicid; + apicid = m->apicid; - if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { + if (m->cpuflag & CPU_BOOTPROCESSOR) { bootup_cpu = " (Bootup-CPU)"; - boot_cpu_physical_apicid = m->mpc_apicid; + boot_cpu_physical_apicid = m->apicid; } - printk(KERN_INFO "Processor #%d%s\n", m->mpc_apicid, bootup_cpu); - generic_processor_info(apicid, m->mpc_apicver); + printk(KERN_INFO "Processor #%d%s\n", m->apicid, bootup_cpu); + generic_processor_info(apicid, m->apicver); } #ifdef CONFIG_X86_IO_APIC @@ -542,17 +542,17 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) /* * 2 CPUs, numbered 0 & 1. */ - processor.mpc_type = MP_PROCESSOR; + processor.type = MP_PROCESSOR; /* Either an integrated APIC or a discrete 82489DX. */ - processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; - processor.mpc_cpuflag = CPU_ENABLED; - processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | + processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01; + processor.cpuflag = CPU_ENABLED; + processor.cpufeature = (boot_cpu_data.x86 << 8) | (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; - processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; - processor.mpc_reserved[0] = 0; - processor.mpc_reserved[1] = 0; + processor.featureflag = boot_cpu_data.x86_capability[0]; + processor.reserved[0] = 0; + processor.reserved[1] = 0; for (i = 0; i < 2; i++) { - processor.mpc_apicid = i; + processor.apicid = i; MP_processor_info(&processor); } diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index aa1e5a068551..a53831c64c58 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -120,13 +120,12 @@ static inline int generate_logical_apicid(int quad, int phys_apicid) static int mpc_apic_id(struct mpc_cpu *m) { int quad = translation_table[mpc_record]->trans_quad; - int logical_apicid = generate_logical_apicid(quad, m->mpc_apicid); + int logical_apicid = generate_logical_apicid(quad, m->apicid); printk(KERN_DEBUG "Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n", - m->mpc_apicid, - (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, - m->mpc_apicver, quad, logical_apicid); + m->apicid, (m->cpufeature & CPU_FAMILY_MASK) >> 8, + (m->cpufeature & CPU_MODEL_MASK) >> 4, + m->apicver, quad, logical_apicid); return logical_apicid; } diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index ca12afa63475..d801d06af068 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c @@ -181,28 +181,26 @@ static void __init MP_processor_info(struct mpc_cpu *m) int ver, logical_apicid; physid_mask_t apic_cpus; - if (!(m->mpc_cpuflag & CPU_ENABLED)) + if (!(m->cpuflag & CPU_ENABLED)) return; - logical_apicid = m->mpc_apicid; + logical_apicid = m->apicid; printk(KERN_INFO "%sCPU #%d %u:%u APIC version %d\n", - m->mpc_cpuflag & CPU_BOOTPROCESSOR ? "Bootup " : "", - m->mpc_apicid, - (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, - m->mpc_apicver); + m->cpuflag & CPU_BOOTPROCESSOR ? "Bootup " : "", + m->apicid, (m->cpufeature & CPU_FAMILY_MASK) >> 8, + (m->cpufeature & CPU_MODEL_MASK) >> 4, m->apicver); - if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) - boot_cpu_physical_apicid = m->mpc_apicid; + if (m->cpuflag & CPU_BOOTPROCESSOR) + boot_cpu_physical_apicid = m->apicid; - ver = m->mpc_apicver; - if ((ver >= 0x14 && m->mpc_apicid >= 0xff) || m->mpc_apicid >= 0xf) { + ver = m->apicver; + if ((ver >= 0x14 && m->apicid >= 0xff) || m->apicid >= 0xf) { printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n", - m->mpc_apicid, MAX_APICS); + m->apicid, MAX_APICS); return; } - apic_cpus = apicid_to_cpu_present(m->mpc_apicid); + apic_cpus = apicid_to_cpu_present(m->apicid); physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus); /* * Validate version @@ -210,10 +208,10 @@ static void __init MP_processor_info(struct mpc_cpu *m) if (ver == 0x0) { printk(KERN_ERR "BIOS bug, APIC version is 0 for CPU#%d! " "fixing up to 0x10. (tell your hw vendor)\n", - m->mpc_apicid); + m->apicid); ver = 0x10; } - apic_version[m->mpc_apicid] = ver; + apic_version[m->apicid] = ver; } static int __init visws_find_smp_config(unsigned int reserve) -- cgit v1.2.3 From d4c715fad5604f25ea1e5c90f280cb818851c10b Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 4 Jan 2009 21:59:26 +0530 Subject: x86: rename all fields of mpc_bus mpc_X to X Impact: cleanup, solve 80 columns wrap problems It would be cleaner to rename all the mpc->mpc_X fields to mpc->X - that alone would give 4 characters per usage site. (we already know that it's an 'mpc' entity - no need to duplicate that in the field too) Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mpspec_def.h | 6 +++--- arch/x86/kernel/mpparse.c | 40 +++++++++++++++++++-------------------- arch/x86/kernel/numaq_32.c | 8 ++++---- 3 files changed, 27 insertions(+), 27 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h index 78057aaba259..60ba5439e9f2 100644 --- a/arch/x86/include/asm/mpspec_def.h +++ b/arch/x86/include/asm/mpspec_def.h @@ -81,9 +81,9 @@ struct mpc_cpu { }; struct mpc_bus { - unsigned char mpc_type; - unsigned char mpc_busid; - unsigned char mpc_bustype[6]; + unsigned char type; + unsigned char busid; + unsigned char bustype[6]; }; /* List of Bus Type string values, Intel MP Spec. */ diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index a8b9ba9709fa..d780ddddbc37 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -77,39 +77,39 @@ static void __init MP_processor_info(struct mpc_cpu *m) static void __init MP_bus_info(struct mpc_bus *m) { char str[7]; - memcpy(str, m->mpc_bustype, 6); + memcpy(str, m->bustype, 6); str[6] = 0; if (x86_quirks->mpc_oem_bus_info) x86_quirks->mpc_oem_bus_info(m, str); else - apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->mpc_busid, str); + apic_printk(APIC_VERBOSE, "Bus #%d is %s\n", m->busid, str); #if MAX_MP_BUSSES < 256 - if (m->mpc_busid >= MAX_MP_BUSSES) { + if (m->busid >= MAX_MP_BUSSES) { printk(KERN_WARNING "MP table busid value (%d) for bustype %s " " is too large, max. supported is %d\n", - m->mpc_busid, str, MAX_MP_BUSSES - 1); + m->busid, str, MAX_MP_BUSSES - 1); return; } #endif if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { - set_bit(m->mpc_busid, mp_bus_not_pci); + set_bit(m->busid, mp_bus_not_pci); #if defined(CONFIG_EISA) || defined(CONFIG_MCA) - mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; + mp_bus_id_to_type[m->busid] = MP_BUS_ISA; #endif } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { if (x86_quirks->mpc_oem_pci_bus) x86_quirks->mpc_oem_pci_bus(m); - clear_bit(m->mpc_busid, mp_bus_not_pci); + clear_bit(m->busid, mp_bus_not_pci); #if defined(CONFIG_EISA) || defined(CONFIG_MCA) - mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; + mp_bus_id_to_type[m->busid] = MP_BUS_PCI; } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) { - mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; + mp_bus_id_to_type[m->busid] = MP_BUS_EISA; } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA) - 1) == 0) { - mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA; + mp_bus_id_to_type[m->busid] = MP_BUS_MCA; #endif } else printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); @@ -484,8 +484,8 @@ static void __init construct_ioapic_table(int mpc_default_type) struct mpc_ioapic ioapic; struct mpc_bus bus; - bus.mpc_type = MP_BUS; - bus.mpc_busid = 0; + bus.type = MP_BUS; + bus.busid = 0; switch (mpc_default_type) { default: printk(KERN_ERR "???\nUnknown standard configuration %d\n", @@ -493,21 +493,21 @@ static void __init construct_ioapic_table(int mpc_default_type) /* fall through */ case 1: case 5: - memcpy(bus.mpc_bustype, "ISA ", 6); + memcpy(bus.bustype, "ISA ", 6); break; case 2: case 6: case 3: - memcpy(bus.mpc_bustype, "EISA ", 6); + memcpy(bus.bustype, "EISA ", 6); break; case 4: case 7: - memcpy(bus.mpc_bustype, "MCA ", 6); + memcpy(bus.bustype, "MCA ", 6); } MP_bus_info(&bus); if (mpc_default_type > 4) { - bus.mpc_busid = 1; - memcpy(bus.mpc_bustype, "PCI ", 6); + bus.busid = 1; + memcpy(bus.bustype, "PCI ", 6); MP_bus_info(&bus); } @@ -656,9 +656,9 @@ static void __init __get_smp_config(unsigned int early) "using default mptable. " "(tell your hw vendor)\n"); - bus.mpc_type = MP_BUS; - bus.mpc_busid = 0; - memcpy(bus.mpc_bustype, "ISA ", 6); + bus.type = MP_BUS; + bus.busid = 0; + memcpy(bus.bustype, "ISA ", 6); MP_bus_info(&bus); construct_default_ioirq_mptable(0); diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index a53831c64c58..8242fef6d0e5 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -139,10 +139,10 @@ static void mpc_oem_bus_info(struct mpc_bus *m, char *name) int quad = translation_table[mpc_record]->trans_quad; int local = translation_table[mpc_record]->trans_local; - mp_bus_id_to_node[m->mpc_busid] = quad; - mp_bus_id_to_local[m->mpc_busid] = local; + mp_bus_id_to_node[m->busid] = quad; + mp_bus_id_to_local[m->busid] = local; printk(KERN_INFO "Bus #%d is %s (node %d)\n", - m->mpc_busid, name, quad); + m->busid, name, quad); } int quad_local_to_mp_bus_id [NR_CPUS/4][4]; @@ -153,7 +153,7 @@ static void mpc_oem_pci_bus(struct mpc_bus *m) int quad = translation_table[mpc_record]->trans_quad; int local = translation_table[mpc_record]->trans_local; - quad_local_to_mp_bus_id[quad][local] = m->mpc_busid; + quad_local_to_mp_bus_id[quad][local] = m->busid; } static void __init MP_translation_info(struct mpc_config_translation *m) -- cgit v1.2.3 From a1d0272a4676460787bcd7352414e0286763968d Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 4 Jan 2009 22:00:46 +0530 Subject: x86: rename all fields of mpc_oemtable oem_X to X Impact: cleanup, solve 80 columns wrap problems It would be cleaner to rename all the mpc->oem_X fields to mpc->X - that alone would give 4 characters per usage site. (we already know that it's an 'oem' entity - no need to duplicate that in the field too) Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mpspec_def.h | 10 +++++----- arch/x86/kernel/numaq_32.c | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h index 60ba5439e9f2..6ea62ea2dc21 100644 --- a/arch/x86/include/asm/mpspec_def.h +++ b/arch/x86/include/asm/mpspec_def.h @@ -152,11 +152,11 @@ struct mpc_lintsrc { #define MPC_OEM_SIGNATURE "_OEM" struct mpc_oemtable { - char oem_signature[4]; - unsigned short oem_length; /* Size of table */ - char oem_rev; /* 0x01 */ - char oem_checksum; - char mpc_oem[8]; + char signature[4]; + unsigned short length; /* Size of table */ + char rev; /* 0x01 */ + char checksum; + char mpc[8]; }; /* diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c index 8242fef6d0e5..f2191d4f2717 100644 --- a/arch/x86/kernel/numaq_32.c +++ b/arch/x86/kernel/numaq_32.c @@ -194,18 +194,18 @@ static void __init smp_read_mpc_oem(struct mpc_oemtable *oemtable, mpc_record = 0; printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable); - if (memcmp(oemtable->oem_signature, MPC_OEM_SIGNATURE, 4)) { + if (memcmp(oemtable->signature, MPC_OEM_SIGNATURE, 4)) { printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n", - oemtable->oem_signature[0], oemtable->oem_signature[1], - oemtable->oem_signature[2], oemtable->oem_signature[3]); + oemtable->signature[0], oemtable->signature[1], + oemtable->signature[2], oemtable->signature[3]); return; } - if (mpf_checksum((unsigned char *)oemtable, oemtable->oem_length)) { + if (mpf_checksum((unsigned char *)oemtable, oemtable->length)) { printk(KERN_WARNING "SMP oem mptable: checksum error!\n"); return; } - while (count < oemtable->oem_length) { + while (count < oemtable->length) { switch (*oemptr) { case MP_TRANSLATION: { -- cgit v1.2.3 From 6c65da50bd4589b6b627d4842b8e6705a0ccaab5 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 4 Jan 2009 22:22:56 +0530 Subject: x86: rename all fields of mpc_table mpc_X to X Impact: cleanup, solve 80 columns wrap problems It would be cleaner to rename all the mpc->mpc_X fields to mpc->X - that alone would give 4 characters per usage site. (we already know that it's an 'mpc' entity - no need to duplicate that in the field too) Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mpspec_def.h | 20 ++++++------- arch/x86/kernel/mpparse.c | 59 +++++++++++++++++++-------------------- arch/x86/mach-generic/es7000.c | 4 +-- 3 files changed, 41 insertions(+), 42 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h index 6ea62ea2dc21..59568bc4767f 100644 --- a/arch/x86/include/asm/mpspec_def.h +++ b/arch/x86/include/asm/mpspec_def.h @@ -40,16 +40,16 @@ struct intel_mp_floating { #define MPC_SIGNATURE "PCMP" struct mpc_table { - char mpc_signature[4]; - unsigned short mpc_length; /* Size of table */ - char mpc_spec; /* 0x01 */ - char mpc_checksum; - char mpc_oem[8]; - char mpc_productid[12]; - unsigned int mpc_oemptr; /* 0 if not present */ - unsigned short mpc_oemsize; /* 0 if not present */ - unsigned short mpc_oemcount; - unsigned int mpc_lapic; /* APIC address */ + char signature[4]; + unsigned short length; /* Size of table */ + char spec; /* 0x01 */ + char checksum; + char oem[8]; + char productid[12]; + unsigned int oemptr; /* 0 if not present */ + unsigned short oemsize; /* 0 if not present */ + unsigned short oemcount; + unsigned int lapic; /* APIC address */ unsigned int reserved; }; diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index d780ddddbc37..e6a9724fefc1 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -247,35 +247,35 @@ static void __init MP_lintsrc_info(struct mpc_lintsrc *m) static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str) { - if (memcmp(mpc->mpc_signature, MPC_SIGNATURE, 4)) { + if (memcmp(mpc->signature, MPC_SIGNATURE, 4)) { printk(KERN_ERR "MPTABLE: bad signature [%c%c%c%c]!\n", - mpc->mpc_signature[0], mpc->mpc_signature[1], - mpc->mpc_signature[2], mpc->mpc_signature[3]); + mpc->signature[0], mpc->signature[1], + mpc->signature[2], mpc->signature[3]); return 0; } - if (mpf_checksum((unsigned char *)mpc, mpc->mpc_length)) { + if (mpf_checksum((unsigned char *)mpc, mpc->length)) { printk(KERN_ERR "MPTABLE: checksum error!\n"); return 0; } - if (mpc->mpc_spec != 0x01 && mpc->mpc_spec != 0x04) { + if (mpc->spec != 0x01 && mpc->spec != 0x04) { printk(KERN_ERR "MPTABLE: bad table version (%d)!!\n", - mpc->mpc_spec); + mpc->spec); return 0; } - if (!mpc->mpc_lapic) { + if (!mpc->lapic) { printk(KERN_ERR "MPTABLE: null local APIC address!\n"); return 0; } - memcpy(oem, mpc->mpc_oem, 8); + memcpy(oem, mpc->oem, 8); oem[8] = 0; printk(KERN_INFO "MPTABLE: OEM ID: %s\n", oem); - memcpy(str, mpc->mpc_productid, 12); + memcpy(str, mpc->productid, 12); str[12] = 0; printk(KERN_INFO "MPTABLE: Product ID: %s\n", str); - printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->mpc_lapic); + printk(KERN_INFO "MPTABLE: APIC at: 0x%X\n", mpc->lapic); return 1; } @@ -305,14 +305,14 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) #endif /* save the local APIC address, it might be non-default */ if (!acpi_lapic) - mp_lapic_addr = mpc->mpc_lapic; + mp_lapic_addr = mpc->lapic; if (early) return 1; - if (mpc->mpc_oemptr && x86_quirks->smp_read_mpc_oem) { - struct mpc_oemtable *oem_table = (void *)(long)mpc->mpc_oemptr; - x86_quirks->smp_read_mpc_oem(oem_table, mpc->mpc_oemsize); + if (mpc->oemptr && x86_quirks->smp_read_mpc_oem) { + struct mpc_oemtable *oem_table = (void *)(long)mpc->oemptr; + x86_quirks->smp_read_mpc_oem(oem_table, mpc->oemsize); } /* @@ -321,7 +321,7 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) if (x86_quirks->mpc_record) *x86_quirks->mpc_record = 0; - while (count < mpc->mpc_length) { + while (count < mpc->length) { switch (*mpt) { case MP_PROCESSOR: { @@ -378,8 +378,8 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n"); printk(KERN_ERR "type %x\n", *mpt); print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16, - 1, mpc, mpc->mpc_length, 1); - count = mpc->mpc_length; + 1, mpc, mpc->length, 1); + count = mpc->length; break; } if (x86_quirks->mpc_record) @@ -848,8 +848,8 @@ static int __init replace_intsrc_all(struct mpc_table *mpc, int count = sizeof(*mpc); unsigned char *mpt = ((unsigned char *)mpc) + count; - printk(KERN_INFO "mpc_length %x\n", mpc->mpc_length); - while (count < mpc->mpc_length) { + printk(KERN_INFO "mpc_length %x\n", mpc->length); + while (count < mpc->length) { switch (*mpt) { case MP_PROCESSOR: { @@ -912,7 +912,7 @@ static int __init replace_intsrc_all(struct mpc_table *mpc, printk(KERN_ERR "Your mptable is wrong, contact your HW vendor!\n"); printk(KERN_ERR "type %x\n", *mpt); print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_ADDRESS, 16, - 1, mpc, mpc->mpc_length, 1); + 1, mpc, mpc->length, 1); goto out; } } @@ -947,7 +947,7 @@ static int __init replace_intsrc_all(struct mpc_table *mpc, } } assign_to_mpc_intsrc(&mp_irqs[i], m); - mpc->mpc_length = count; + mpc->length = count; mpt += sizeof(struct mpc_intsrc); } print_mp_irq_info(&mp_irqs[i]); @@ -955,9 +955,8 @@ static int __init replace_intsrc_all(struct mpc_table *mpc, #endif out: /* update checksum */ - mpc->mpc_checksum = 0; - mpc->mpc_checksum -= mpf_checksum((unsigned char *)mpc, - mpc->mpc_length); + mpc->checksum = 0; + mpc->checksum -= mpf_checksum((unsigned char *)mpc, mpc->length); return 0; } @@ -1029,7 +1028,7 @@ static int __init update_mp_table(void) printk(KERN_INFO "mpf: %lx\n", virt_to_phys(mpf)); printk(KERN_INFO "mpf_physptr: %x\n", mpf->mpf_physptr); - if (mpc_new_phys && mpc->mpc_length > mpc_new_length) { + if (mpc_new_phys && mpc->length > mpc_new_length) { mpc_new_phys = 0; printk(KERN_INFO "mpc_new_length is %ld, please use alloc_mptable=8k\n", mpc_new_length); @@ -1038,10 +1037,10 @@ static int __init update_mp_table(void) if (!mpc_new_phys) { unsigned char old, new; /* check if we can change the postion */ - mpc->mpc_checksum = 0; - old = mpf_checksum((unsigned char *)mpc, mpc->mpc_length); - mpc->mpc_checksum = 0xff; - new = mpf_checksum((unsigned char *)mpc, mpc->mpc_length); + mpc->checksum = 0; + old = mpf_checksum((unsigned char *)mpc, mpc->length); + mpc->checksum = 0xff; + new = mpf_checksum((unsigned char *)mpc, mpc->length); if (old == new) { printk(KERN_INFO "mpc is readonly, please try alloc_mptable instead\n"); return 0; @@ -1050,7 +1049,7 @@ static int __init update_mp_table(void) } else { mpf->mpf_physptr = mpc_new_phys; mpc_new = phys_to_virt(mpc_new_phys); - memcpy(mpc_new, mpc, mpc->mpc_length); + memcpy(mpc_new, mpc, mpc->length); mpc = mpc_new; /* check if we can modify that */ if (mpc_new_phys - mpf->mpf_physptr) { diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 39243a796e18..c2ded1448024 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c @@ -46,9 +46,9 @@ static void __init enable_apic_mode(void) static __init int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { - if (mpc->mpc_oemptr) { + if (mpc->oemptr) { struct mpc_oemtable *oem_table = - (struct mpc_oemtable *)mpc->mpc_oemptr; + (struct mpc_oemtable *)mpc->oemptr; if (!strncmp(oem, "UNISYS", 6)) return parse_unisys_oem((char *)oem_table); } -- cgit v1.2.3 From 87c6fe26186d734e932426cc8ab9fd8cf9aeed94 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Mon, 5 Jan 2009 14:08:04 +0000 Subject: x86: update Alan Cox's email addresses Signed-off-by: Alan Cox Signed-off-by: Ingo Molnar --- arch/x86/kernel/apm_32.c | 4 ++-- arch/x86/kernel/cpu/mcheck/mce_32.c | 2 +- arch/x86/kernel/cpu/mcheck/p5.c | 2 +- arch/x86/kernel/cpu/mcheck/p6.c | 2 +- arch/x86/kernel/cpu/mcheck/winchip.c | 2 +- arch/x86/kernel/mpparse.c | 2 +- arch/x86/kernel/smp.c | 2 +- arch/x86/kernel/smpboot.c | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 3a26525a3f31..98807bb095ad 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -160,9 +160,9 @@ * Work around byte swap bug in one of the Vaio's BIOS's * (Marc Boucher ). * Exposed the disable flag to dmi so that we can handle known - * broken APM (Alan Cox ). + * broken APM (Alan Cox ). * 1.14ac: If the BIOS says "I slowed the CPU down" then don't spin - * calling it - instead idle. (Alan Cox ) + * calling it - instead idle. (Alan Cox ) * If an APM idle fails log it and idle sensibly * 1.15: Don't queue events to clients who open the device O_WRONLY. * Don't expect replies from clients who open the device O_RDONLY. diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c index 0ebf3fc6a610..dfaebce3633e 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_32.c +++ b/arch/x86/kernel/cpu/mcheck/mce_32.c @@ -1,6 +1,6 @@ /* * mce.c - x86 Machine Check Exception Reporting - * (c) 2002 Alan Cox , Dave Jones + * (c) 2002 Alan Cox , Dave Jones */ #include diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c index bfa5817afdda..c9f77ea69edc 100644 --- a/arch/x86/kernel/cpu/mcheck/p5.c +++ b/arch/x86/kernel/cpu/mcheck/p5.c @@ -1,6 +1,6 @@ /* * P5 specific Machine Check Exception Reporting - * (C) Copyright 2002 Alan Cox + * (C) Copyright 2002 Alan Cox */ #include diff --git a/arch/x86/kernel/cpu/mcheck/p6.c b/arch/x86/kernel/cpu/mcheck/p6.c index 62efc9c2b3af..2ac52d7b434b 100644 --- a/arch/x86/kernel/cpu/mcheck/p6.c +++ b/arch/x86/kernel/cpu/mcheck/p6.c @@ -1,6 +1,6 @@ /* * P6 specific Machine Check Exception Reporting - * (C) Copyright 2002 Alan Cox + * (C) Copyright 2002 Alan Cox */ #include diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c index f2be3e190c6b..2a043d89811d 100644 --- a/arch/x86/kernel/cpu/mcheck/winchip.c +++ b/arch/x86/kernel/cpu/mcheck/winchip.c @@ -1,6 +1,6 @@ /* * IDT Winchip specific Machine Check Exception Reporting - * (C) Copyright 2002 Alan Cox + * (C) Copyright 2002 Alan Cox */ #include diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index c5c5b8df1dbc..cead7e27585d 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -2,7 +2,7 @@ * Intel Multiprocessor Specification 1.1 and 1.4 * compliant MP-table parsing routines. * - * (c) 1995 Alan Cox, Building #3 + * (c) 1995 Alan Cox, Building #3 * (c) 1998, 1999, 2000 Ingo Molnar * (c) 2008 Alexey Starikovskiy */ diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index beea2649a240..cf1f075886b4 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -1,7 +1,7 @@ /* * Intel SMP support routines. * - * (c) 1995 Alan Cox, Building #3 + * (c) 1995 Alan Cox, Building #3 * (c) 1998-99, 2000 Ingo Molnar * (c) 2002,2003 Andi Kleen, SuSE Labs. * diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 31869bf5fabd..c628f9178cd8 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1,7 +1,7 @@ /* * x86 SMP booting functions * - * (c) 1995 Alan Cox, Building #3 + * (c) 1995 Alan Cox, Building #3 * (c) 1998, 1999, 2000 Ingo Molnar * Copyright 2001 Andi Kleen, SuSE Labs. * -- cgit v1.2.3 From 5641f1fde074651ce2488e93944cf05dedd9bf74 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 5 Jan 2009 17:19:02 +0000 Subject: X86_DEBUGCTLMSR won't work on uml Signed-off-by: Al Viro Signed-off-by: Linus Torvalds --- arch/x86/Kconfig.cpu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 85a78575956c..8078955845ae 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -408,7 +408,7 @@ config X86_MINIMUM_CPU_FAMILY config X86_DEBUGCTLMSR def_bool y - depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) + depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) && !UML menuconfig PROCESSOR_SELECT bool "Supported processor vendors" if EMBEDDED -- cgit v1.2.3 From 5cb0535f1713b51610f2881b17d0fe3656114364 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Sun, 4 Jan 2009 05:18:05 -0800 Subject: cpumask: replace CPUMASK_ALLOC etc with cpumask_var_t Impact: cleanup There's only one user, and it's a fairly easy conversion. Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Acked-by: Dave Jones Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index f0ea6fa2f53c..d2cc4991cbaa 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c @@ -458,11 +458,6 @@ static int centrino_verify (struct cpufreq_policy *policy) * * Sets a new CPUFreq policy. */ -struct allmasks { - cpumask_t saved_mask; - cpumask_t covered_cpus; -}; - static int centrino_target (struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) @@ -472,12 +467,15 @@ static int centrino_target (struct cpufreq_policy *policy, struct cpufreq_freqs freqs; int retval = 0; unsigned int j, k, first_cpu, tmp; - CPUMASK_ALLOC(allmasks); - CPUMASK_PTR(saved_mask, allmasks); - CPUMASK_PTR(covered_cpus, allmasks); + cpumask_var_t saved_mask, covered_cpus; - if (unlikely(allmasks == NULL)) + if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL))) return -ENOMEM; + if (unlikely(!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))) { + free_cpumask_var(saved_mask); + return -ENOMEM; + } + cpumask_copy(saved_mask, ¤t->cpus_allowed); if (unlikely(per_cpu(centrino_model, cpu) == NULL)) { retval = -ENODEV; @@ -493,9 +491,7 @@ static int centrino_target (struct cpufreq_policy *policy, goto out; } - *saved_mask = current->cpus_allowed; first_cpu = 1; - cpus_clear(*covered_cpus); for_each_cpu_mask_nr(j, policy->cpus) { const cpumask_t *mask; @@ -605,7 +601,8 @@ migrate_end: preempt_enable(); set_cpus_allowed_ptr(current, saved_mask); out: - CPUMASK_FREE(allmasks); + free_cpumask_var(saved_mask); + free_cpumask_var(covered_cpus); return retval; } -- cgit v1.2.3 From 835481d9bcd65720b473db6b38746a74a3964218 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Sun, 4 Jan 2009 05:18:06 -0800 Subject: cpumask: convert struct cpufreq_policy to cpumask_var_t Impact: use new cpumask API to reduce memory usage This is part of an effort to reduce structure sizes for machines configured with large NR_CPUS. cpumask_t gets replaced by cpumask_var_t, which is either struct cpumask[1] (small NR_CPUS) or struct cpumask * (large NR_CPUS). Signed-off-by: Rusty Russell Signed-off-by: Mike Travis Acked-by: Dave Jones Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 10 +++--- arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | 8 ++--- arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 6 ++-- arch/x86/kernel/cpu/cpufreq/powernow-k8.h | 2 +- arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 14 ++++---- arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | 18 +++++----- drivers/cpufreq/cpufreq.c | 42 ++++++++++++++++-------- drivers/cpufreq/cpufreq_conservative.c | 2 +- drivers/cpufreq/cpufreq_ondemand.c | 4 +-- include/linux/cpufreq.h | 4 +-- 10 files changed, 62 insertions(+), 48 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 28102ad1a363..0b31939862d6 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -411,7 +411,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, #ifdef CONFIG_HOTPLUG_CPU /* cpufreq holds the hotplug lock, so we are safe from here on */ - cpus_and(online_policy_cpus, cpu_online_map, policy->cpus); + cpumask_and(&online_policy_cpus, cpu_online_mask, policy->cpus); #else online_policy_cpus = policy->cpus; #endif @@ -626,15 +626,15 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) */ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { - cpumask_copy(&policy->cpus, perf->shared_cpu_map); + cpumask_copy(policy->cpus, perf->shared_cpu_map); } - cpumask_copy(&policy->related_cpus, perf->shared_cpu_map); + cpumask_copy(policy->related_cpus, perf->shared_cpu_map); #ifdef CONFIG_SMP dmi_check_system(sw_any_bug_dmi_table); - if (bios_with_sw_any_bug && cpus_weight(policy->cpus) == 1) { + if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) { policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; - policy->cpus = per_cpu(cpu_core_map, cpu); + cpumask_copy(policy->cpus, cpu_core_mask(cpu)); } #endif diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index beea4466b063..b585e04cbc9e 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c @@ -122,7 +122,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, return 0; /* notifiers */ - for_each_cpu_mask_nr(i, policy->cpus) { + for_each_cpu(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } @@ -130,11 +130,11 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy, /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software * Developer's Manual, Volume 3 */ - for_each_cpu_mask_nr(i, policy->cpus) + for_each_cpu(i, policy->cpus) cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); /* notifiers */ - for_each_cpu_mask_nr(i, policy->cpus) { + for_each_cpu(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } @@ -203,7 +203,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) unsigned int i; #ifdef CONFIG_SMP - policy->cpus = per_cpu(cpu_sibling_map, policy->cpu); + cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); #endif /* Errata workaround */ diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index c3c9adbaa26f..5c28b37dea11 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -1199,10 +1199,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) set_cpus_allowed_ptr(current, &oldmask); if (cpu_family == CPU_HW_PSTATE) - pol->cpus = cpumask_of_cpu(pol->cpu); + cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); else - pol->cpus = per_cpu(cpu_core_map, pol->cpu); - data->available_cores = &(pol->cpus); + cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu)); + data->available_cores = pol->cpus; /* Take a crude guess here. * That guess was in microseconds, so multiply with 1000 */ diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h index 65cfb5d7f77f..8ecc75b6c7c3 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h @@ -53,7 +53,7 @@ struct powernow_k8_data { /* we need to keep track of associated cores, but let cpufreq * handle hotplug events - so just point at cpufreq pol->cpus * structure */ - cpumask_t *available_cores; + struct cpumask *available_cores; }; diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index d2cc4991cbaa..f08998278a3a 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c @@ -492,8 +492,8 @@ static int centrino_target (struct cpufreq_policy *policy, } first_cpu = 1; - for_each_cpu_mask_nr(j, policy->cpus) { - const cpumask_t *mask; + for_each_cpu(j, policy->cpus) { + const struct cpumask *mask; /* cpufreq holds the hotplug lock, so we are safe here */ if (!cpu_online(j)) @@ -504,9 +504,9 @@ static int centrino_target (struct cpufreq_policy *policy, * Make sure we are running on CPU that wants to change freq */ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) - mask = &policy->cpus; + mask = policy->cpus; else - mask = &cpumask_of_cpu(j); + mask = cpumask_of(j); set_cpus_allowed_ptr(current, mask); preempt_disable(); @@ -538,7 +538,7 @@ static int centrino_target (struct cpufreq_policy *policy, dprintk("target=%dkHz old=%d new=%d msr=%04x\n", target_freq, freqs.old, freqs.new, msr); - for_each_cpu_mask_nr(k, policy->cpus) { + for_each_cpu(k, policy->cpus) { if (!cpu_online(k)) continue; freqs.cpu = k; @@ -563,7 +563,7 @@ static int centrino_target (struct cpufreq_policy *policy, preempt_enable(); } - for_each_cpu_mask_nr(k, policy->cpus) { + for_each_cpu(k, policy->cpus) { if (!cpu_online(k)) continue; freqs.cpu = k; @@ -586,7 +586,7 @@ static int centrino_target (struct cpufreq_policy *policy, tmp = freqs.new; freqs.new = freqs.old; freqs.old = tmp; - for_each_cpu_mask_nr(j, policy->cpus) { + for_each_cpu(j, policy->cpus) { if (!cpu_online(j)) continue; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 04d0376b64b0..dedc1e98f168 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c @@ -229,7 +229,7 @@ static unsigned int speedstep_detect_chipset (void) return 0; } -static unsigned int _speedstep_get(const cpumask_t *cpus) +static unsigned int _speedstep_get(const struct cpumask *cpus) { unsigned int speed; cpumask_t cpus_allowed; @@ -244,7 +244,7 @@ static unsigned int _speedstep_get(const cpumask_t *cpus) static unsigned int speedstep_get(unsigned int cpu) { - return _speedstep_get(&cpumask_of_cpu(cpu)); + return _speedstep_get(cpumask_of(cpu)); } /** @@ -267,7 +267,7 @@ static int speedstep_target (struct cpufreq_policy *policy, if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) return -EINVAL; - freqs.old = _speedstep_get(&policy->cpus); + freqs.old = _speedstep_get(policy->cpus); freqs.new = speedstep_freqs[newstate].frequency; freqs.cpu = policy->cpu; @@ -279,20 +279,20 @@ static int speedstep_target (struct cpufreq_policy *policy, cpus_allowed = current->cpus_allowed; - for_each_cpu_mask_nr(i, policy->cpus) { + for_each_cpu(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } /* switch to physical CPU where state is to be changed */ - set_cpus_allowed_ptr(current, &policy->cpus); + set_cpus_allowed_ptr(current, policy->cpus); speedstep_set_state(newstate); /* allow to be run on all CPUs */ set_cpus_allowed_ptr(current, &cpus_allowed); - for_each_cpu_mask_nr(i, policy->cpus) { + for_each_cpu(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } @@ -322,11 +322,11 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) /* only run on CPU to be set, or on its sibling */ #ifdef CONFIG_SMP - policy->cpus = per_cpu(cpu_sibling_map, policy->cpu); + cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); #endif cpus_allowed = current->cpus_allowed; - set_cpus_allowed_ptr(current, &policy->cpus); + set_cpus_allowed_ptr(current, policy->cpus); /* detect low and high frequency and transition latency */ result = speedstep_get_freqs(speedstep_processor, @@ -339,7 +339,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) return result; /* get current speed setting */ - speed = _speedstep_get(&policy->cpus); + speed = _speedstep_get(policy->cpus); if (!speed) return -EIO; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 01dde80597f7..b55cb67435bd 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -584,12 +584,12 @@ out: return i; } -static ssize_t show_cpus(cpumask_t mask, char *buf) +static ssize_t show_cpus(const struct cpumask *mask, char *buf) { ssize_t i = 0; unsigned int cpu; - for_each_cpu_mask_nr(cpu, mask) { + for_each_cpu(cpu, mask) { if (i) i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); @@ -606,7 +606,7 @@ static ssize_t show_cpus(cpumask_t mask, char *buf) */ static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) { - if (cpus_empty(policy->related_cpus)) + if (cpumask_empty(policy->related_cpus)) return show_cpus(policy->cpus, buf); return show_cpus(policy->related_cpus, buf); } @@ -806,9 +806,20 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) ret = -ENOMEM; goto nomem_out; } + if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) { + kfree(policy); + ret = -ENOMEM; + goto nomem_out; + } + if (!alloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { + free_cpumask_var(policy->cpus); + kfree(policy); + ret = -ENOMEM; + goto nomem_out; + } policy->cpu = cpu; - policy->cpus = cpumask_of_cpu(cpu); + cpumask_copy(policy->cpus, cpumask_of(cpu)); /* Initially set CPU itself as the policy_cpu */ per_cpu(policy_cpu, cpu) = cpu; @@ -843,7 +854,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) } #endif - for_each_cpu_mask_nr(j, policy->cpus) { + for_each_cpu(j, policy->cpus) { if (cpu == j) continue; @@ -861,7 +872,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) goto err_out_driver_exit; spin_lock_irqsave(&cpufreq_driver_lock, flags); - managed_policy->cpus = policy->cpus; + cpumask_copy(managed_policy->cpus, policy->cpus); per_cpu(cpufreq_cpu_data, cpu) = managed_policy; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); @@ -916,14 +927,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) } spin_lock_irqsave(&cpufreq_driver_lock, flags); - for_each_cpu_mask_nr(j, policy->cpus) { + for_each_cpu(j, policy->cpus) { per_cpu(cpufreq_cpu_data, j) = policy; per_cpu(policy_cpu, j) = policy->cpu; } spin_unlock_irqrestore(&cpufreq_driver_lock, flags); /* symlink affected CPUs */ - for_each_cpu_mask_nr(j, policy->cpus) { + for_each_cpu(j, policy->cpus) { if (j == cpu) continue; if (!cpu_online(j)) @@ -963,7 +974,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) err_out_unregister: spin_lock_irqsave(&cpufreq_driver_lock, flags); - for_each_cpu_mask_nr(j, policy->cpus) + for_each_cpu(j, policy->cpus) per_cpu(cpufreq_cpu_data, j) = NULL; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); @@ -1024,7 +1035,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) */ if (unlikely(cpu != data->cpu)) { dprintk("removing link\n"); - cpu_clear(cpu, data->cpus); + cpumask_clear_cpu(cpu, data->cpus); spin_unlock_irqrestore(&cpufreq_driver_lock, flags); sysfs_remove_link(&sys_dev->kobj, "cpufreq"); cpufreq_cpu_put(data); @@ -1045,8 +1056,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) * per_cpu(cpufreq_cpu_data) while holding the lock, and remove * the sysfs links afterwards. */ - if (unlikely(cpus_weight(data->cpus) > 1)) { - for_each_cpu_mask_nr(j, data->cpus) { + if (unlikely(cpumask_weight(data->cpus) > 1)) { + for_each_cpu(j, data->cpus) { if (j == cpu) continue; per_cpu(cpufreq_cpu_data, j) = NULL; @@ -1055,8 +1066,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - if (unlikely(cpus_weight(data->cpus) > 1)) { - for_each_cpu_mask_nr(j, data->cpus) { + if (unlikely(cpumask_weight(data->cpus) > 1)) { + for_each_cpu(j, data->cpus) { if (j == cpu) continue; dprintk("removing link for cpu %u\n", j); @@ -1090,7 +1101,10 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) if (cpufreq_driver->exit) cpufreq_driver->exit(data); + free_cpumask_var(data->related_cpus); + free_cpumask_var(data->cpus); kfree(data); + per_cpu(cpufreq_cpu_data, cpu) = NULL; cpufreq_debug_enable_ratelimit(); return 0; diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index e2657837d954..0320962c4ec5 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -498,7 +498,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, return rc; } - for_each_cpu_mask_nr(j, policy->cpus) { + for_each_cpu(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info->cur_policy = policy; diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 2ab3c12b88af..6a2b036c9389 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -400,7 +400,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) /* Get Absolute Load - in terms of freq */ max_load_freq = 0; - for_each_cpu_mask_nr(j, policy->cpus) { + for_each_cpu(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; cputime64_t cur_wall_time, cur_idle_time; unsigned int idle_time, wall_time; @@ -568,7 +568,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, return rc; } - for_each_cpu_mask_nr(j, policy->cpus) { + for_each_cpu(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info->cur_policy = policy; diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 484b3abf61bb..384b38d3e8e2 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -80,8 +80,8 @@ struct cpufreq_real_policy { }; struct cpufreq_policy { - cpumask_t cpus; /* CPUs requiring sw coordination */ - cpumask_t related_cpus; /* CPUs with any coordination */ + cpumask_var_t cpus; /* CPUs requiring sw coordination */ + cpumask_var_t related_cpus; /* CPUs with any coordination */ unsigned int shared_type; /* ANY or ALL affected CPUs should set cpufreq */ unsigned int cpu; /* cpu nr of registered CPU */ -- cgit v1.2.3 From c74f31c035f46a095a0c72f80246a65b314205a5 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Sun, 4 Jan 2009 05:18:07 -0800 Subject: cpumask: use work_on_cpu in acpi/cstate.c Impact: use new cpumask API to reduce stack usage Replace the saving of current->cpus_allowed and set_cpus_allowed_ptr() with a work_on_cpu function for the acpi_processor_ffh_cstate_probe() function. Basically splits acpi_processor_ffh_cstate_probe() into two functions, the other being acpi_processor_ffh_cstate_probe_cpu which is the work function run on the designated cpu. Signed-off-by: Mike Travis Acked-by: Rusty Russell Signed-off-by: Ingo Molnar --- arch/x86/kernel/acpi/cstate.c | 70 +++++++++++++++++++++++-------------------- 1 file changed, 37 insertions(+), 33 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index c2502eb9aa83..cf5ec586f220 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -66,35 +66,15 @@ static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; #define NATIVE_CSTATE_BEYOND_HALT (2) -int acpi_processor_ffh_cstate_probe(unsigned int cpu, - struct acpi_processor_cx *cx, struct acpi_power_register *reg) +static long acpi_processor_ffh_cstate_probe_cpu(void *_cx) { - struct cstate_entry *percpu_entry; - struct cpuinfo_x86 *c = &cpu_data(cpu); - - cpumask_t saved_mask; - int retval; + struct acpi_processor_cx *cx = _cx; + long retval; unsigned int eax, ebx, ecx, edx; unsigned int edx_part; unsigned int cstate_type; /* C-state type and not ACPI C-state type */ unsigned int num_cstate_subtype; - if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF ) - return -1; - - if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT) - return -1; - - percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); - percpu_entry->states[cx->index].eax = 0; - percpu_entry->states[cx->index].ecx = 0; - - /* Make sure we are running on right CPU */ - saved_mask = current->cpus_allowed; - retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); - if (retval) - return -1; - cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); /* Check whether this particular cx_type (in CST) is supported or not */ @@ -114,21 +94,45 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, retval = -1; goto out; } - percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK; - - /* Use the hint in CST */ - percpu_entry->states[cx->index].eax = cx->address; if (!mwait_supported[cstate_type]) { mwait_supported[cstate_type] = 1; - printk(KERN_DEBUG "Monitor-Mwait will be used to enter C-%d " - "state\n", cx->type); + printk(KERN_DEBUG + "Monitor-Mwait will be used to enter C-%d " + "state\n", cx->type); } - snprintf(cx->desc, ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x", - cx->address); - + snprintf(cx->desc, + ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x", + cx->address); out: - set_cpus_allowed_ptr(current, &saved_mask); + return retval; +} + +int acpi_processor_ffh_cstate_probe(unsigned int cpu, + struct acpi_processor_cx *cx, struct acpi_power_register *reg) +{ + struct cstate_entry *percpu_entry; + struct cpuinfo_x86 *c = &cpu_data(cpu); + long retval; + + if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF) + return -1; + + if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT) + return -1; + + percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); + percpu_entry->states[cx->index].eax = 0; + percpu_entry->states[cx->index].ecx = 0; + + /* Make sure we are running on right CPU */ + + retval = work_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx); + if (retval == 0) { + /* Use the hint in CST */ + percpu_entry->states[cx->index].eax = cx->address; + percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK; + } return retval; } EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); -- cgit v1.2.3 From 4d8bb5374924fc736ce275bfd1619b87a2106860 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Sun, 4 Jan 2009 05:18:08 -0800 Subject: cpumask: use cpumask_var_t in acpi-cpufreq.c Impact: cleanup, reduce stack usage, use new cpumask API. Replace the cpumask_t in struct drv_cmd with a cpumask_var_t. Remove unneeded online_policy_cpus cpumask_t in acpi_cpufreq_target. Update refs to use new cpumask API. Signed-off-by: Mike Travis Acked-by: Rusty Russell Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 58 +++++++++++++++--------------- 1 file changed, 29 insertions(+), 29 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 0b31939862d6..fb594170dc53 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -145,7 +145,7 @@ typedef union { struct drv_cmd { unsigned int type; - cpumask_t mask; + cpumask_var_t mask; drv_addr_union addr; u32 val; }; @@ -193,7 +193,7 @@ static void drv_read(struct drv_cmd *cmd) cpumask_t saved_mask = current->cpus_allowed; cmd->val = 0; - set_cpus_allowed_ptr(current, &cmd->mask); + set_cpus_allowed_ptr(current, cmd->mask); do_drv_read(cmd); set_cpus_allowed_ptr(current, &saved_mask); } @@ -203,8 +203,8 @@ static void drv_write(struct drv_cmd *cmd) cpumask_t saved_mask = current->cpus_allowed; unsigned int i; - for_each_cpu_mask_nr(i, cmd->mask) { - set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); + for_each_cpu(i, cmd->mask) { + set_cpus_allowed_ptr(current, cpumask_of(i)); do_drv_write(cmd); } @@ -212,22 +212,22 @@ static void drv_write(struct drv_cmd *cmd) return; } -static u32 get_cur_val(const cpumask_t *mask) +static u32 get_cur_val(const struct cpumask *mask) { struct acpi_processor_performance *perf; struct drv_cmd cmd; - if (unlikely(cpus_empty(*mask))) + if (unlikely(cpumask_empty(mask))) return 0; - switch (per_cpu(drv_data, first_cpu(*mask))->cpu_feature) { + switch (per_cpu(drv_data, cpumask_first(mask))->cpu_feature) { case SYSTEM_INTEL_MSR_CAPABLE: cmd.type = SYSTEM_INTEL_MSR_CAPABLE; cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; break; case SYSTEM_IO_CAPABLE: cmd.type = SYSTEM_IO_CAPABLE; - perf = per_cpu(drv_data, first_cpu(*mask))->acpi_data; + perf = per_cpu(drv_data, cpumask_first(mask))->acpi_data; cmd.addr.io.port = perf->control_register.address; cmd.addr.io.bit_width = perf->control_register.bit_width; break; @@ -235,7 +235,7 @@ static u32 get_cur_val(const cpumask_t *mask) return 0; } - cmd.mask = *mask; + cpumask_copy(cmd.mask, mask); drv_read(&cmd); @@ -386,7 +386,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu); struct acpi_processor_performance *perf; struct cpufreq_freqs freqs; - cpumask_t online_policy_cpus; struct drv_cmd cmd; unsigned int next_state = 0; /* Index into freq_table */ unsigned int next_perf_state = 0; /* Index into perf table */ @@ -401,20 +400,18 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, return -ENODEV; } + if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL))) + return -ENOMEM; + perf = data->acpi_data; result = cpufreq_frequency_table_target(policy, data->freq_table, target_freq, relation, &next_state); - if (unlikely(result)) - return -ENODEV; - -#ifdef CONFIG_HOTPLUG_CPU - /* cpufreq holds the hotplug lock, so we are safe from here on */ - cpumask_and(&online_policy_cpus, cpu_online_mask, policy->cpus); -#else - online_policy_cpus = policy->cpus; -#endif + if (unlikely(result)) { + result = -ENODEV; + goto out; + } next_perf_state = data->freq_table[next_state].index; if (perf->state == next_perf_state) { @@ -425,7 +422,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, } else { dprintk("Already at target state (P%d)\n", next_perf_state); - return 0; + goto out; } } @@ -444,19 +441,19 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, cmd.val = (u32) perf->states[next_perf_state].control; break; default: - return -ENODEV; + result = -ENODEV; + goto out; } - cpus_clear(cmd.mask); - + /* cpufreq holds the hotplug lock, so we are safe from here on */ if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) - cmd.mask = online_policy_cpus; + cpumask_and(cmd.mask, cpu_online_mask, policy->cpus); else - cpu_set(policy->cpu, cmd.mask); + cpumask_copy(cmd.mask, cpumask_of(policy->cpu)); freqs.old = perf->states[perf->state].core_frequency * 1000; freqs.new = data->freq_table[next_state].frequency; - for_each_cpu_mask_nr(i, cmd.mask) { + for_each_cpu(i, cmd.mask) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } @@ -464,19 +461,22 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, drv_write(&cmd); if (acpi_pstate_strict) { - if (!check_freqs(&cmd.mask, freqs.new, data)) { + if (!check_freqs(cmd.mask, freqs.new, data)) { dprintk("acpi_cpufreq_target failed (%d)\n", policy->cpu); - return -EAGAIN; + result = -EAGAIN; + goto out; } } - for_each_cpu_mask_nr(i, cmd.mask) { + for_each_cpu(i, cmd.mask) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } perf->state = next_perf_state; +out: + free_cpumask_var(cmd.mask); return result; } -- cgit v1.2.3 From 7503bfbae89eba07b46441a5d1594647f6b8ab7d Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Sun, 4 Jan 2009 05:18:09 -0800 Subject: cpumask: use work_on_cpu in acpi-cpufreq.c for drv_read and drv_write Impact: use new cpumask API to reduce stack usage Replace the saving of current->cpus_allowed and set_cpus_allowed_ptr() with a work_on_cpu function for drv_read() and drv_write(). Basically converts do_drv_{read,write} into "work_on_cpu" functions that are now called by drv_read and drv_write. Signed-off-by: Mike Travis Acked-by: Rusty Russell Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index fb594170dc53..4e4f2b04dac2 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -150,8 +150,9 @@ struct drv_cmd { u32 val; }; -static void do_drv_read(struct drv_cmd *cmd) +static long do_drv_read(void *_cmd) { + struct drv_cmd *cmd = _cmd; u32 h; switch (cmd->type) { @@ -166,10 +167,12 @@ static void do_drv_read(struct drv_cmd *cmd) default: break; } + return 0; } -static void do_drv_write(struct drv_cmd *cmd) +static long do_drv_write(void *_cmd) { + struct drv_cmd *cmd = _cmd; u32 lo, hi; switch (cmd->type) { @@ -186,30 +189,23 @@ static void do_drv_write(struct drv_cmd *cmd) default: break; } + return 0; } static void drv_read(struct drv_cmd *cmd) { - cpumask_t saved_mask = current->cpus_allowed; cmd->val = 0; - set_cpus_allowed_ptr(current, cmd->mask); - do_drv_read(cmd); - set_cpus_allowed_ptr(current, &saved_mask); + work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); } static void drv_write(struct drv_cmd *cmd) { - cpumask_t saved_mask = current->cpus_allowed; unsigned int i; for_each_cpu(i, cmd->mask) { - set_cpus_allowed_ptr(current, cpumask_of(i)); - do_drv_write(cmd); + work_on_cpu(i, do_drv_write, cmd); } - - set_cpus_allowed_ptr(current, &saved_mask); - return; } static u32 get_cur_val(const struct cpumask *mask) @@ -235,10 +231,15 @@ static u32 get_cur_val(const struct cpumask *mask) return 0; } + if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL))) + return 0; + cpumask_copy(cmd.mask, mask); drv_read(&cmd); + free_cpumask_var(cmd.mask); + dprintk("get_cur_val = %u\n", cmd.val); return cmd.val; -- cgit v1.2.3 From e39ad415ac15116df213dfa2aa2a4f1b0857af9c Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Sun, 4 Jan 2009 05:18:10 -0800 Subject: cpumask: use work_on_cpu in acpi-cpufreq.c for read_measured_perf_ctrs Impact: use new cpumask API to reduce stack usage Replace the saving of current->cpus_allowed and set_cpus_allowed_ptr() with a work_on_cpu function for read_measured_perf_ctrs(). Basically splits off the work function from get_measured_perf which is run on the designated cpu. Moves definition of struct perf_cur out of function local namespace, and is used as the work function argument. References in get_measured_perf use values in the perf_cur struct. Signed-off-by: Mike Travis Acked-by: Rusty Russell Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 83 ++++++++++++++++-------------- 1 file changed, 43 insertions(+), 40 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 4e4f2b04dac2..06fcd8f9323c 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -245,6 +245,30 @@ static u32 get_cur_val(const struct cpumask *mask) return cmd.val; } +struct perf_cur { + union { + struct { + u32 lo; + u32 hi; + } split; + u64 whole; + } aperf_cur, mperf_cur; +}; + + +static long read_measured_perf_ctrs(void *_cur) +{ + struct perf_cur *cur = _cur; + + rdmsr(MSR_IA32_APERF, cur->aperf_cur.split.lo, cur->aperf_cur.split.hi); + rdmsr(MSR_IA32_MPERF, cur->mperf_cur.split.lo, cur->mperf_cur.split.hi); + + wrmsr(MSR_IA32_APERF, 0, 0); + wrmsr(MSR_IA32_MPERF, 0, 0); + + return 0; +} + /* * Return the measured active (C0) frequency on this CPU since last call * to this function. @@ -261,31 +285,12 @@ static u32 get_cur_val(const struct cpumask *mask) static unsigned int get_measured_perf(struct cpufreq_policy *policy, unsigned int cpu) { - union { - struct { - u32 lo; - u32 hi; - } split; - u64 whole; - } aperf_cur, mperf_cur; - - cpumask_t saved_mask; + struct perf_cur cur; unsigned int perf_percent; unsigned int retval; - saved_mask = current->cpus_allowed; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); - if (get_cpu() != cpu) { - /* We were not able to run on requested processor */ - put_cpu(); + if (!work_on_cpu(cpu, read_measured_perf_ctrs, &cur)) return 0; - } - - rdmsr(MSR_IA32_APERF, aperf_cur.split.lo, aperf_cur.split.hi); - rdmsr(MSR_IA32_MPERF, mperf_cur.split.lo, mperf_cur.split.hi); - - wrmsr(MSR_IA32_APERF, 0,0); - wrmsr(MSR_IA32_MPERF, 0,0); #ifdef __i386__ /* @@ -293,37 +298,39 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy, * Get an approximate value. Return failure in case we cannot get * an approximate value. */ - if (unlikely(aperf_cur.split.hi || mperf_cur.split.hi)) { + if (unlikely(cur.aperf_cur.split.hi || cur.mperf_cur.split.hi)) { int shift_count; u32 h; - h = max_t(u32, aperf_cur.split.hi, mperf_cur.split.hi); + h = max_t(u32, cur.aperf_cur.split.hi, cur.mperf_cur.split.hi); shift_count = fls(h); - aperf_cur.whole >>= shift_count; - mperf_cur.whole >>= shift_count; + cur.aperf_cur.whole >>= shift_count; + cur.mperf_cur.whole >>= shift_count; } - if (((unsigned long)(-1) / 100) < aperf_cur.split.lo) { + if (((unsigned long)(-1) / 100) < cur.aperf_cur.split.lo) { int shift_count = 7; - aperf_cur.split.lo >>= shift_count; - mperf_cur.split.lo >>= shift_count; + cur.aperf_cur.split.lo >>= shift_count; + cur.mperf_cur.split.lo >>= shift_count; } - if (aperf_cur.split.lo && mperf_cur.split.lo) - perf_percent = (aperf_cur.split.lo * 100) / mperf_cur.split.lo; + if (cur.aperf_cur.split.lo && cur.mperf_cur.split.lo) + perf_percent = (cur.aperf_cur.split.lo * 100) / + cur.mperf_cur.split.lo; else perf_percent = 0; #else - if (unlikely(((unsigned long)(-1) / 100) < aperf_cur.whole)) { + if (unlikely(((unsigned long)(-1) / 100) < cur.aperf_cur.whole)) { int shift_count = 7; - aperf_cur.whole >>= shift_count; - mperf_cur.whole >>= shift_count; + cur.aperf_cur.whole >>= shift_count; + cur.mperf_cur.whole >>= shift_count; } - if (aperf_cur.whole && mperf_cur.whole) - perf_percent = (aperf_cur.whole * 100) / mperf_cur.whole; + if (cur.aperf_cur.whole && cur.mperf_cur.whole) + perf_percent = (cur.aperf_cur.whole * 100) / + cur.mperf_cur.whole; else perf_percent = 0; @@ -331,10 +338,6 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy, retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100; - put_cpu(); - set_cpus_allowed_ptr(current, &saved_mask); - - dprintk("cpu %d: performance percent %d\n", cpu, perf_percent); return retval; } @@ -352,7 +355,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) } cached_freq = data->freq_table[data->acpi_data->state].frequency; - freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data); + freq = extract_freq(get_cur_val(cpumask_of(cpu)), data); if (freq != cached_freq) { /* * The dreaded BIOS frequency change behind our back. -- cgit v1.2.3 From 025dfdafe77f20b3890981a394774baab7b9c827 Mon Sep 17 00:00:00 2001 From: Frederik Schwarzer Date: Thu, 16 Oct 2008 19:02:37 +0200 Subject: trivial: fix then -> than typos in comments and documentation - (better, more, bigger ...) then -> (...) than Signed-off-by: Frederik Schwarzer Signed-off-by: Jiri Kosina --- Documentation/hwmon/abituguru-datasheet | 6 +++--- Documentation/networking/rxrpc.txt | 2 +- Documentation/scsi/ChangeLog.lpfc | 2 +- arch/blackfin/kernel/kgdb.c | 2 +- arch/ia64/kernel/kprobes.c | 2 +- arch/m68k/Kconfig | 2 +- arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.c | 2 +- arch/powerpc/kernel/kprobes.c | 2 +- arch/powerpc/oprofile/cell/spu_profiler.c | 2 +- arch/s390/Kconfig | 2 +- arch/s390/kernel/kprobes.c | 2 +- arch/sparc/kernel/kprobes.c | 2 +- arch/x86/kernel/kprobes.c | 2 +- arch/x86/kernel/mfgpt_32.c | 2 +- drivers/hwmon/fschmd.c | 2 +- drivers/infiniband/hw/mlx4/cq.c | 2 +- drivers/message/i2o/i2o_scsi.c | 2 +- drivers/mtd/devices/pmc551.c | 2 +- drivers/mtd/ubi/eba.c | 2 +- drivers/mtd/ubi/io.c | 2 +- drivers/mtd/ubi/scan.c | 2 +- drivers/mtd/ubi/ubi-media.h | 4 ++-- drivers/mtd/ubi/vtbl.c | 2 +- drivers/mtd/ubi/wl.c | 4 ++-- drivers/net/bnx2x_link.c | 2 +- drivers/net/e1000/e1000_hw.c | 4 ++-- drivers/net/slip.h | 2 +- drivers/net/tehuti.c | 4 ++-- drivers/net/tokenring/smctr.c | 2 +- drivers/net/wireless/ipw2x00/ipw2100.c | 2 +- drivers/net/wireless/rt2x00/rt2x00crypto.c | 4 ++-- drivers/net/wireless/strip.c | 2 +- drivers/s390/block/dasd_eer.c | 4 ++-- drivers/s390/char/vmlogrdr.c | 4 ++-- drivers/scsi/lpfc/lpfc_hbadisc.c | 4 ++-- drivers/scsi/lpfc/lpfc_sli.c | 10 +++++----- drivers/serial/crisv10.c | 4 ++-- drivers/video/console/vgacon.c | 2 +- fs/ocfs2/cluster/heartbeat.c | 2 +- fs/proc/task_nommu.c | 2 +- fs/ubifs/Kconfig | 2 +- fs/ubifs/budget.c | 4 ++-- fs/ubifs/gc.c | 2 +- fs/ubifs/journal.c | 2 +- fs/ubifs/shrinker.c | 2 +- fs/xfs/linux-2.6/xfs_super.c | 2 +- include/linux/mtd/mtd.h | 2 +- include/linux/spi/spi.h | 4 ++-- include/mtd/ubi-user.h | 2 +- kernel/pid.c | 2 +- kernel/time/jiffies.c | 2 +- net/sctp/auth.c | 4 ++-- net/sctp/sm_statefuns.c | 6 +++--- net/sctp/socket.c | 2 +- net/sctp/tsnmap.c | 2 +- sound/usb/usx2y/usbusx2y.c | 2 +- 56 files changed, 76 insertions(+), 76 deletions(-) (limited to 'arch/x86') diff --git a/Documentation/hwmon/abituguru-datasheet b/Documentation/hwmon/abituguru-datasheet index aef5a9b36846..4d184f2db0ea 100644 --- a/Documentation/hwmon/abituguru-datasheet +++ b/Documentation/hwmon/abituguru-datasheet @@ -74,7 +74,7 @@ a sensor. Notice that some banks have both a read and a write address this is how the uGuru determines if a read from or a write to the bank is taking place, thus when reading you should always use the read address and when writing the -write address. The write address is always one (1) more then the read address. +write address. The write address is always one (1) more than the read address. uGuru ready @@ -224,7 +224,7 @@ Bit 3: Beep if alarm (RW) Bit 4: 1 if alarm cause measured temp is over the warning threshold (R) Bit 5: 1 if alarm cause measured volt is over the max threshold (R) Bit 6: 1 if alarm cause measured volt is under the min threshold (R) -Bit 7: Volt sensor: Shutdown if alarm persist for more then 4 seconds (RW) +Bit 7: Volt sensor: Shutdown if alarm persist for more than 4 seconds (RW) Temp sensor: Shutdown if temp is over the shutdown threshold (RW) * This bit is only honored/used by the uGuru if a temp sensor is connected @@ -293,7 +293,7 @@ Byte 0: Alarm behaviour for the selected sensor. A 1 enables the described behaviour. Bit 0: Give an alarm if measured rpm is under the min threshold (RW) Bit 3: Beep if alarm (RW) -Bit 7: Shutdown if alarm persist for more then 4 seconds (RW) +Bit 7: Shutdown if alarm persist for more than 4 seconds (RW) Byte 1: min threshold (scale as bank 0x26) diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt index c3669a3fb4af..60d05eb77c64 100644 --- a/Documentation/networking/rxrpc.txt +++ b/Documentation/networking/rxrpc.txt @@ -540,7 +540,7 @@ A client would issue an operation by: MSG_MORE should be set in msghdr::msg_flags on all but the last part of the request. Multiple requests may be made simultaneously. - If a call is intended to go to a destination other then the default + If a call is intended to go to a destination other than the default specified through connect(), then msghdr::msg_name should be set on the first request message of that call. diff --git a/Documentation/scsi/ChangeLog.lpfc b/Documentation/scsi/ChangeLog.lpfc index ae3f962a7cfc..ff19a52fe004 100644 --- a/Documentation/scsi/ChangeLog.lpfc +++ b/Documentation/scsi/ChangeLog.lpfc @@ -733,7 +733,7 @@ Changes from 20040920 to 20041018 I/O completion path a little more, especially taking care of fast-pathing the non-error case. Also removes tons of dead members and defines from lpfc_scsi.h - e.g. lpfc_target is down - to nothing more then the lpfc_nodelist pointer. + to nothing more than the lpfc_nodelist pointer. * Added binary sysfs file to issue mbox commands * Replaced #if __BIG_ENDIAN with #if __BIG_ENDIAN_BITFIELD for compatibility with the user space applications. diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c index b795a207742c..1c5afaeb9504 100644 --- a/arch/blackfin/kernel/kgdb.c +++ b/arch/blackfin/kernel/kgdb.c @@ -105,7 +105,7 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) * Extracts ebp, esp and eip values understandable by gdb from the values * saved by switch_to. * thread.esp points to ebp. flags and ebp are pushed in switch_to hence esp - * prior to entering switch_to is 8 greater then the value that is saved. + * prior to entering switch_to is 8 greater than the value that is saved. * If switch_to changes, change following code appropriately. */ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index f07688da947c..0017b9de2ddf 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -434,7 +434,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) /* * It is possible to have multiple instances associated with a given * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more then one return + * have a return probe installed on them, and/or more than one return * return probe was registered for a target function. * * We can handle this because: diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index c825bde17cb3..fb87c08c6b57 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -303,7 +303,7 @@ config M68KFPU_EMU_EXTRAPREC correct rounding, the emulator can (often) do the same but this extra calculation can cost quite some time, so you can disable it here. The emulator will then "only" calculate with a 64 bit - mantissa and round slightly incorrect, what is more then enough + mantissa and round slightly incorrect, what is more than enough for normal usage. config M68KFPU_EMU_ONLY diff --git a/arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.c b/arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.c index 97862f45496d..caf5e9a0acc7 100644 --- a/arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.c +++ b/arch/mips/pmc-sierra/yosemite/atmel_read_eeprom.c @@ -148,7 +148,7 @@ int read_eeprom(char *buffer, int eeprom_size, int size) send_byte(W_HEADER); recv_ack(); - /* EEPROM with size of more then 2K need two byte addressing */ + /* EEPROM with size of more than 2K need two byte addressing */ if (eeprom_size > 2048) { send_byte(0x00); recv_ack(); diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index de79915452c8..b29005a5a8f5 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -316,7 +316,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, /* * It is possible to have multiple instances associated with a given * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more then one return + * have a return probe installed on them, and/or more than one return * return probe was registered for a target function. * * We can handle this because: diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c index dd499c3e9da7..83faa958b9d4 100644 --- a/arch/powerpc/oprofile/cell/spu_profiler.c +++ b/arch/powerpc/oprofile/cell/spu_profiler.c @@ -49,7 +49,7 @@ void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_rese * of precision. This is close enough for the purpose at hand. * * The value of the timeout should be small enough that the hw - * trace buffer will not get more then about 1/3 full for the + * trace buffer will not get more than about 1/3 full for the * maximum user specified (the LFSR value) hw sampling frequency. * This is to ensure the trace buffer will never fill even if the * kernel thread scheduling varies under a heavy system load. diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 19577aeffd7b..a94a3c3ae932 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -299,7 +299,7 @@ config WARN_STACK This option enables the compiler options -mwarn-framesize and -mwarn-dynamicstack. If the compiler supports these options it will generate warnings for function which either use alloca or - create a stack frame bigger then CONFIG_WARN_STACK_SIZE. + create a stack frame bigger than CONFIG_WARN_STACK_SIZE. Say N if you are unsure. diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 569079ec4ff0..267f6698680a 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -381,7 +381,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, /* * It is possible to have multiple instances associated with a given * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more then one return + * have a return probe installed on them, and/or more than one return * return probe was registered for a target function. * * We can handle this because: diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c index 201a6e547e4a..3bc6527c95af 100644 --- a/arch/sparc/kernel/kprobes.c +++ b/arch/sparc/kernel/kprobes.c @@ -517,7 +517,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) /* * It is possible to have multiple instances associated with a given * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more then one return + * have a return probe installed on them, and/or more than one return * return probe was registered for a target function. * * We can handle this because: diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 6c27679ec6aa..a116e6d5726c 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -694,7 +694,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) /* * It is possible to have multiple instances associated with a given * task either because multiple functions in the call path have - * return probes installed on them, and/or more then one + * return probes installed on them, and/or more than one * return probe was registered for a target function. * * We can handle this because: diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c index c12314c9e86f..8815f3c7fec7 100644 --- a/arch/x86/kernel/mfgpt_32.c +++ b/arch/x86/kernel/mfgpt_32.c @@ -252,7 +252,7 @@ EXPORT_SYMBOL_GPL(geode_mfgpt_alloc_timer); /* * The MFPGT timers on the CS5536 provide us with suitable timers to use * as clock event sources - not as good as a HPET or APIC, but certainly - * better then the PIT. This isn't a general purpose MFGPT driver, but + * better than the PIT. This isn't a general purpose MFGPT driver, but * a simplified one designed specifically to act as a clock event source. * For full details about the MFGPT, please consult the CS5536 data sheet. */ diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c index 967170368933..8b2d756595d9 100644 --- a/drivers/hwmon/fschmd.c +++ b/drivers/hwmon/fschmd.c @@ -75,7 +75,7 @@ static const u8 FSCHMD_REG_VOLT[3] = { 0x45, 0x42, 0x48 }; /* minimum pwm at which the fan is driven (pwm can by increased depending on the temp. Notice that for the scy some fans share there minimum speed. - Also notice that with the scy the sensor order is different then with the + Also notice that with the scy the sensor order is different than with the other chips, this order was in the 2.4 driver and kept for consistency. */ static const u8 FSCHMD_REG_FAN_MIN[5][6] = { { 0x55, 0x65 }, /* pos */ diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index a3c5af1d7ec0..de5263beab4a 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -367,7 +367,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) if (err) goto out; } else { - /* Can't be smaller then the number of outstanding CQEs */ + /* Can't be smaller than the number of outstanding CQEs */ outst_cqe = mlx4_ib_get_outstanding_cqes(cq); if (entries < outst_cqe + 1) { err = 0; diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c index 1bcdbbb9e7d3..3d45817e6dcd 100644 --- a/drivers/message/i2o/i2o_scsi.c +++ b/drivers/message/i2o/i2o_scsi.c @@ -390,7 +390,7 @@ static int i2o_scsi_reply(struct i2o_controller *c, u32 m, * @i2o_dev: the I2O device which was added * * If a I2O device is added we catch the notification, because I2O classes - * other then SCSI peripheral will not be received through + * other than SCSI peripheral will not be received through * i2o_scsi_probe(). */ static void i2o_scsi_notify_device_add(struct i2o_device *i2o_dev) diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c index d38bca64bb15..d2fd550f7e09 100644 --- a/drivers/mtd/devices/pmc551.c +++ b/drivers/mtd/devices/pmc551.c @@ -34,7 +34,7 @@ * aperture size, not the dram size, and the V370PDC supplies no * other method for memory size discovery. This problem is * mostly only relevant when compiled as a module, as the - * unloading of the module with an aperture size smaller then + * unloading of the module with an aperture size smaller than * the ram will cause the driver to detect the onboard memory * size to be equal to the aperture size when the module is * reloaded. Soooo, to help, the module supports an msize diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 048a606cebde..25def348e5ba 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -717,7 +717,7 @@ write_error: * to the real data size, although the @buf buffer has to contain the * alignment. In all other cases, @len has to be aligned. * - * It is prohibited to write more then once to logical eraseblocks of static + * It is prohibited to write more than once to logical eraseblocks of static * volumes. This function returns zero in case of success and a negative error * code in case of failure. */ diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index a74118c05745..fe81039f2a7c 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c @@ -465,7 +465,7 @@ out: * This function synchronously erases physical eraseblock @pnum. If @torture * flag is not zero, the physical eraseblock is checked by means of writing * different patterns to it and reading them back. If the torturing is enabled, - * the physical eraseblock is erased more then once. + * the physical eraseblock is erased more than once. * * This function returns the number of erasures made in case of success, %-EIO * if the erasure failed or the torturing test failed, and other negative error diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index 41d47e1cf15c..ecde202a5a12 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c @@ -478,7 +478,7 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si, return 0; } else { /* - * This logical eraseblock is older then the one found + * This logical eraseblock is older than the one found * previously. */ if (cmp_res & 4) diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h index 2ad940409053..8419fdccc79c 100644 --- a/drivers/mtd/ubi/ubi-media.h +++ b/drivers/mtd/ubi/ubi-media.h @@ -135,7 +135,7 @@ enum { * The erase counter header takes 64 bytes and has a plenty of unused space for * future usage. The unused fields are zeroed. The @version field is used to * indicate the version of UBI implementation which is supposed to be able to - * work with this UBI image. If @version is greater then the current UBI + * work with this UBI image. If @version is greater than the current UBI * version, the image is rejected. This may be useful in future if something * is changed radically. This field is duplicated in the volume identifier * header. @@ -187,7 +187,7 @@ struct ubi_ec_hdr { * (sequence number) is used to distinguish between older and newer versions of * logical eraseblocks. * - * There are 2 situations when there may be more then one physical eraseblock + * There are 2 situations when there may be more than one physical eraseblock * corresponding to the same logical eraseblock, i.e., having the same @vol_id * and @lnum values in the volume identifier header. Suppose we have a logical * eraseblock L and it is mapped to the physical eraseblock P. diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 333c8941552f..1afc61e7455d 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c @@ -577,7 +577,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si, if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) { /* Auto re-size flag may be set only for one volume */ if (ubi->autoresize_vol_id != -1) { - ubi_err("more then one auto-resize volume (%d " + ubi_err("more than one auto-resize volume (%d " "and %d)", ubi->autoresize_vol_id, i); kfree(vol); return -EINVAL; diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 14901cb82c18..891534f8210d 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -128,7 +128,7 @@ * situation when the picked physical eraseblock is constantly erased after the * data is written to it. So, we have a constant which limits the highest erase * counter of the free physical eraseblock to pick. Namely, the WL sub-system - * does not pick eraseblocks with erase counter greater then the lowest erase + * does not pick eraseblocks with erase counter greater than the lowest erase * counter plus %WL_FREE_MAX_DIFF. */ #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD) @@ -917,7 +917,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi) /* * We schedule wear-leveling only if the difference between the * lowest erase counter of used physical eraseblocks and a high - * erase counter of free physical eraseblocks is greater then + * erase counter of free physical eraseblocks is greater than * %UBI_WL_THRESHOLD. */ e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c index 67de94f1f30e..fefa6ab13064 100644 --- a/drivers/net/bnx2x_link.c +++ b/drivers/net/bnx2x_link.c @@ -3359,7 +3359,7 @@ static u8 bnx2x_format_ver(u32 num, u8 *str, u16 len) u8 shift = 8*4; u8 digit; if (len < 10) { - /* Need more then 10chars for this format */ + /* Need more than 10chars for this format */ *str_ptr = '\0'; return -EINVAL; } diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index d04eef53571e..e1a3fc1303ee 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c @@ -6758,7 +6758,7 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, * returns: - E1000_ERR_XXX * E1000_SUCCESS * - * For phy's older then IGP, this function simply reads the polarity bit in the + * For phy's older than IGP, this function simply reads the polarity bit in the * Phy Status register. For IGP phy's, this bit is valid only if link speed is * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will * return 0. If the link speed is 1000 Mbps the polarity status is in the @@ -6834,7 +6834,7 @@ static s32 e1000_check_polarity(struct e1000_hw *hw, * returns: - E1000_ERR_XXX * E1000_SUCCESS * - * For phy's older then IGP, this function reads the Downshift bit in the Phy + * For phy's older than IGP, this function reads the Downshift bit in the Phy * Specific Status register. For IGP phy's, it reads the Downgrade bit in the * Link Health register. In IGP this bit is latched high, so the driver must * read it immediately after link is established. diff --git a/drivers/net/slip.h b/drivers/net/slip.h index 853e0f6ec710..9ea5c11287d2 100644 --- a/drivers/net/slip.h +++ b/drivers/net/slip.h @@ -75,7 +75,7 @@ struct slip { unsigned long tx_errors; /* Planned stuff */ unsigned long rx_dropped; /* No memory for skb */ unsigned long tx_dropped; /* When MTU change */ - unsigned long rx_over_errors; /* Frame bigger then SLIP buf. */ + unsigned long rx_over_errors; /* Frame bigger than SLIP buf. */ #ifdef SL_INCLUDE_CSLIP unsigned long tx_compressed; unsigned long rx_compressed; diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c index a10a83a11d9f..a7a4dc4d6313 100644 --- a/drivers/net/tehuti.c +++ b/drivers/net/tehuti.c @@ -1004,7 +1004,7 @@ static inline void bdx_rxdb_free_elem(struct rxdb *db, int n) * skb for rx. It assumes that Rx is desabled in HW * funcs are grouped for better cache usage * - * RxD fifo is smaller then RxF fifo by design. Upon high load, RxD will be + * RxD fifo is smaller than RxF fifo by design. Upon high load, RxD will be * filled and packets will be dropped by nic without getting into host or * cousing interrupt. Anyway, in that condition, host has no chance to proccess * all packets, but dropping in nic is cheaper, since it takes 0 cpu cycles @@ -1826,7 +1826,7 @@ static void bdx_tx_free(struct bdx_priv *priv) * * Pushes desc to TxD fifo and overlaps it if needed. * NOTE: this func does not check for available space. this is responsibility - * of the caller. Neither does it check that data size is smaller then + * of the caller. Neither does it check that data size is smaller than * fifo size. */ static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size) diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c index a011666342ff..50eb29ce3c87 100644 --- a/drivers/net/tokenring/smctr.c +++ b/drivers/net/tokenring/smctr.c @@ -3064,7 +3064,7 @@ static int smctr_load_node_addr(struct net_device *dev) * will consequently cause a timeout. * * NOTE 1: If the monitor_state is MS_BEACON_TEST_STATE, all transmit - * queues other then the one used for the lobe_media_test should be + * queues other than the one used for the lobe_media_test should be * disabled.!? * * NOTE 2: If the monitor_state is MS_BEACON_TEST_STATE and the receive_mask diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 1667065b86a7..753de1a9c4b3 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -1332,7 +1332,7 @@ static int ipw2100_power_cycle_adapter(struct ipw2100_priv *priv) IPW_AUX_HOST_RESET_REG_STOP_MASTER); /* Step 2. Wait for stop Master Assert - * (not more then 50us, otherwise ret error */ + * (not more than 50us, otherwise ret error */ i = 5; do { udelay(IPW_WAIT_RESET_MASTER_ASSERT_COMPLETE_DELAY); diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c index 37ad0d2fb64c..aee9cba13eb3 100644 --- a/drivers/net/wireless/rt2x00/rt2x00crypto.c +++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c @@ -184,8 +184,8 @@ void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, unsigned int align, * Make room for new data, note that we increase both * headsize and tailsize when required. The tailsize is * only needed when ICV data needs to be inserted and - * the padding is smaller then the ICV data. - * When alignment requirements is greater then the + * the padding is smaller than the ICV data. + * When alignment requirements is greater than the * ICV data we must trim the skb to the correct size * because we need to remove the extra bytes. */ diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c index dd0de3a9ed4e..7015f2480550 100644 --- a/drivers/net/wireless/strip.c +++ b/drivers/net/wireless/strip.c @@ -236,7 +236,7 @@ struct strip { unsigned long tx_errors; /* Planned stuff */ unsigned long rx_dropped; /* No memory for skb */ unsigned long tx_dropped; /* When MTU change */ - unsigned long rx_over_errors; /* Frame bigger then STRIP buf. */ + unsigned long rx_over_errors; /* Frame bigger than STRIP buf. */ unsigned long pps_timer; /* Timer to determine pps */ unsigned long rx_pps_count; /* Counter to determine pps */ diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index 892e2878d61b..f8e05ce98621 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -535,8 +535,8 @@ static int dasd_eer_open(struct inode *inp, struct file *filp) eerb->buffer_page_count > INT_MAX / PAGE_SIZE) { kfree(eerb); MESSAGE(KERN_WARNING, "can't open device since module " - "parameter eer_pages is smaller then 1 or" - " bigger then %d", (int)(INT_MAX / PAGE_SIZE)); + "parameter eer_pages is smaller than 1 or" + " bigger than %d", (int)(INT_MAX / PAGE_SIZE)); unlock_kernel(); return -EINVAL; } diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index aabbeb909cc6..d8a2289fcb69 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -427,7 +427,7 @@ static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) buffer = priv->buffer + sizeof(int); } /* - * If the record is bigger then our buffer, we receive only + * If the record is bigger than our buffer, we receive only * a part of it. We can get the rest later. */ if (iucv_data_count > NET_BUFFER_SIZE) @@ -437,7 +437,7 @@ static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) 0, buffer, iucv_data_count, &priv->residual_length); spin_unlock_bh(&priv->priv_lock); - /* An rc of 5 indicates that the record was bigger then + /* An rc of 5 indicates that the record was bigger than * the buffer, which is OK for us. A 9 indicates that the * record was purged befor we could receive it. */ diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 8c64494444bf..311ed6dea726 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1964,10 +1964,10 @@ lpfc_set_disctmo(struct lpfc_vport *vport) uint32_t tmo; if (vport->port_state == LPFC_LOCAL_CFG_LINK) { - /* For FAN, timeout should be greater then edtov */ + /* For FAN, timeout should be greater than edtov */ tmo = (((phba->fc_edtov + 999) / 1000) + 1); } else { - /* Normal discovery timeout should be > then ELS/CT timeout + /* Normal discovery timeout should be > than ELS/CT timeout * FC spec states we need 3 * ratov for CT requests */ tmo = ((phba->fc_ratov * 3) + 3); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 01dfdc8696f8..a36a120561e2 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -420,7 +420,7 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) if (unlikely(pring->local_getidx >= max_cmd_idx)) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0315 Ring %d issue: portCmdGet %d " - "is bigger then cmd ring %d\n", + "is bigger than cmd ring %d\n", pring->ringno, pring->local_getidx, max_cmd_idx); @@ -1628,12 +1628,12 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) { struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; /* - * Ring handler: portRspPut is bigger then + * Ring handler: portRspPut is bigger than * rsp ring */ lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0312 Ring %d handler: portRspPut %d " - "is bigger then rsp ring %d\n", + "is bigger than rsp ring %d\n", pring->ringno, le32_to_cpu(pgp->rspPutInx), pring->numRiocb); @@ -2083,12 +2083,12 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, portRspPut = le32_to_cpu(pgp->rspPutInx); if (portRspPut >= portRspMax) { /* - * Ring handler: portRspPut is bigger then + * Ring handler: portRspPut is bigger than * rsp ring */ lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0303 Ring %d handler: portRspPut %d " - "is bigger then rsp ring %d\n", + "is bigger than rsp ring %d\n", pring->ringno, portRspPut, portRspMax); phba->link_state = LPFC_HBA_ERROR; diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c index 8b2c619a09f2..e642c22c80e2 100644 --- a/drivers/serial/crisv10.c +++ b/drivers/serial/crisv10.c @@ -1203,7 +1203,7 @@ static void e100_disable_txdma_channel(struct e100_serial *info) unsigned long flags; /* Disable output DMA channel for the serial port in question - * ( set to something other then serialX) + * ( set to something other than serialX) */ local_irq_save(flags); DFLOW(DEBUG_LOG(info->line, "disable_txdma_channel %i\n", info->line)); @@ -1266,7 +1266,7 @@ static void e100_disable_rxdma_channel(struct e100_serial *info) unsigned long flags; /* Disable input DMA channel for the serial port in question - * ( set to something other then serialX) + * ( set to something other than serialX) */ local_irq_save(flags); if (info->line == 0) { diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index e6210725b9ab..d012edda6d11 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -1332,7 +1332,7 @@ static void vgacon_save_screen(struct vc_data *c) c->vc_y = screen_info.orig_y; } - /* We can't copy in more then the size of the video buffer, + /* We can't copy in more than the size of the video buffer, * or we'll be copying in VGA BIOS */ if (!vga_is_gfx) diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 6ebaa58e2c03..04697ba7f73e 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -854,7 +854,7 @@ static int o2hb_thread(void *data) while (!kthread_should_stop() && !reg->hr_unclean_stop) { /* We track the time spent inside - * o2hb_do_disk_heartbeat so that we avoid more then + * o2hb_do_disk_heartbeat so that we avoid more than * hr_timeout_ms between disk writes. On busy systems * this should result in a heartbeat which is less * likely to time itself out. */ diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index 219bd79ea894..d4a8be32b902 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c @@ -9,7 +9,7 @@ /* * Logic: we've got two memory sums for each process, "shared", and - * "non-shared". Shared memory may get counted more then once, for + * "non-shared". Shared memory may get counted more than once, for * each process that owns it. Non-shared memory is counted * accurately. */ diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig index 91ceeda7e5bf..e35b54d5059d 100644 --- a/fs/ubifs/Kconfig +++ b/fs/ubifs/Kconfig @@ -40,7 +40,7 @@ config UBIFS_FS_ZLIB depends on UBIFS_FS default y help - Zlib copresses better then LZO but it is slower. Say 'Y' if unsure. + Zlib compresses better than LZO but it is slower. Say 'Y' if unsure. # Debugging-related stuff config UBIFS_FS_DEBUG diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 0e5e54d82924..175f9c590b77 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -142,7 +142,7 @@ static long long get_liability(struct ubifs_info *c) * * This function is called when an operation cannot be budgeted because there * is supposedly no free space. But in most cases there is some free space: - * o budgeting is pessimistic, so it always budgets more then it is actually + * o budgeting is pessimistic, so it always budgets more than it is actually * needed, so shrinking the liability is one way to make free space - the * cached data will take less space then it was budgeted for; * o GC may turn some dark space into free space (budgeting treats dark space @@ -606,7 +606,7 @@ void ubifs_release_budget(struct ubifs_info *c, struct ubifs_budget_req *req) * @c: UBIFS file-system description object * * This function converts budget which was allocated for a new page of data to - * the budget of changing an existing page of data. The latter is smaller then + * the budget of changing an existing page of data. The latter is smaller than * the former, so this function only does simple re-calculation and does not * involve any write-back. */ diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c index 0bef6501d58a..9832f9abe28e 100644 --- a/fs/ubifs/gc.c +++ b/fs/ubifs/gc.c @@ -45,7 +45,7 @@ #define SMALL_NODE_WM UBIFS_MAX_DENT_NODE_SZ /* - * GC may need to move more then one LEB to make progress. The below constants + * GC may need to move more than one LEB to make progress. The below constants * define "soft" and "hard" limits on the number of LEBs the garbage collector * may move. */ diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index 10ae25b7d1db..9b7c54e0cd2a 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -191,7 +191,7 @@ again: if (wbuf->lnum != -1 && avail >= len) { /* * Someone else has switched the journal head and we have - * enough space now. This happens when more then one process is + * enough space now. This happens when more than one process is * trying to write to the same journal head at the same time. */ dbg_jnl("return LEB %d back, already have LEB %d:%d", diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c index f248533841a2..e7bab52a1410 100644 --- a/fs/ubifs/shrinker.c +++ b/fs/ubifs/shrinker.c @@ -151,7 +151,7 @@ static int shrink_tnc(struct ubifs_info *c, int nr, int age, int *contention) * @contention: if any contention, this is set to %1 * * This function walks the list of mounted UBIFS file-systems and frees clean - * znodes which are older then @age, until at least @nr znodes are freed. + * znodes which are older than @age, until at least @nr znodes are freed. * Returns the number of freed znodes. */ static int shrink_tnc_trees(int nr, int age, int *contention) diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 36f6cc703ef2..be846d606ae8 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -1348,7 +1348,7 @@ xfs_finish_flags( { int ronly = (mp->m_flags & XFS_MOUNT_RDONLY); - /* Fail a mount where the logbuf is smaller then the log stripe */ + /* Fail a mount where the logbuf is smaller than the log stripe */ if (xfs_sb_version_haslogv2(&mp->m_sb)) { if (mp->m_logbsize <= 0 && mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index eae26bb6430a..64433eb411d7 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -83,7 +83,7 @@ typedef enum { * @datbuf: data buffer - if NULL only oob data are read/written * @oobbuf: oob data buffer * - * Note, it is allowed to read more then one OOB area at one go, but not write. + * Note, it is allowed to read more than one OOB area at one go, but not write. * The interface assumes that the OOB write requests program only one page's * OOB area. */ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 82229317753d..68bb1c501d0d 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -327,9 +327,9 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum); * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped * @len: size of rx and tx buffers (in bytes) - * @speed_hz: Select a speed other then the device default for this + * @speed_hz: Select a speed other than the device default for this * transfer. If 0 the default (from @spi_device) is used. - * @bits_per_word: select a bits_per_word other then the device default + * @bits_per_word: select a bits_per_word other than the device default * for this transfer. If 0 the default (from @spi_device) is used. * @cs_change: affects chipselect after this transfer completes * @delay_usecs: microseconds to delay after this transfer before diff --git a/include/mtd/ubi-user.h b/include/mtd/ubi-user.h index ccdc562e444e..2dc2eb2b8e22 100644 --- a/include/mtd/ubi-user.h +++ b/include/mtd/ubi-user.h @@ -253,7 +253,7 @@ struct ubi_mkvol_req { * * Re-sizing is possible for both dynamic and static volumes. But while dynamic * volumes may be re-sized arbitrarily, static volumes cannot be made to be - * smaller then the number of bytes they bear. To arbitrarily shrink a static + * smaller than the number of bytes they bear. To arbitrarily shrink a static * volume, it must be wiped out first (by means of volume update operation with * zero number of bytes). */ diff --git a/kernel/pid.c b/kernel/pid.c index 064e76afa507..af9224cdd6c0 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -475,7 +475,7 @@ pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) EXPORT_SYMBOL(task_session_nr_ns); /* - * Used by proc to find the first pid that is greater then or equal to nr. + * Used by proc to find the first pid that is greater than or equal to nr. * * If there is a pid at nr this function is exactly the same as find_pid_ns. */ diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 1ca99557e929..06f197560f3b 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -45,7 +45,7 @@ * * The value 8 is somewhat carefully chosen, as anything * larger can result in overflows. NSEC_PER_JIFFY grows as - * HZ shrinks, so values greater then 8 overflow 32bits when + * HZ shrinks, so values greater than 8 overflow 32bits when * HZ=100. */ #define JIFFIES_SHIFT 8 diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 52db5f60daa0..20c576f530fa 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -141,8 +141,8 @@ void sctp_auth_destroy_keys(struct list_head *keys) /* Compare two byte vectors as numbers. Return values * are: * 0 - vectors are equal - * < 0 - vector 1 is smaller then vector2 - * > 0 - vector 1 is greater then vector2 + * < 0 - vector 1 is smaller than vector2 + * > 0 - vector 1 is greater than vector2 * * Algorithm is: * This is performed by selecting the numerically smaller key vector... diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 1c4e5d6c29c0..3a0cd075914f 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -4268,9 +4268,9 @@ nomem: /* * Handle a protocol violation when the chunk length is invalid. - * "Invalid" length is identified as smaller then the minimal length a + * "Invalid" length is identified as smaller than the minimal length a * given chunk can be. For example, a SACK chunk has invalid length - * if it's length is set to be smaller then the size of sctp_sack_chunk_t. + * if its length is set to be smaller than the size of sctp_sack_chunk_t. * * We inform the other end by sending an ABORT with a Protocol Violation * error code. @@ -4300,7 +4300,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen( /* * Handle a protocol violation when the parameter length is invalid. - * "Invalid" length is identified as smaller then the minimal length a + * "Invalid" length is identified as smaller than the minimal length a * given parameter can be. */ static sctp_disposition_t sctp_sf_violation_paramlen( diff --git a/net/sctp/socket.c b/net/sctp/socket.c index b14a8f33e42d..ff0a8f88de04 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2717,7 +2717,7 @@ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, int o paths++; } - /* Only validate asocmaxrxt if we have more then + /* Only validate asocmaxrxt if we have more than * one path/transport. We do this because path * retransmissions are only counted when we have more * then one path. diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c index 35c73e82553a..9bd64565021a 100644 --- a/net/sctp/tsnmap.c +++ b/net/sctp/tsnmap.c @@ -227,7 +227,7 @@ void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn) */ bitmap_zero(map->tsn_map, map->len); } else { - /* If the gap is smaller then the map size, + /* If the gap is smaller than the map size, * shift the map by 'gap' bits and update further. */ bitmap_shift_right(map->tsn_map, map->tsn_map, gap, map->len); diff --git a/sound/usb/usx2y/usbusx2y.c b/sound/usb/usx2y/usbusx2y.c index ca26c532e77e..11639bd72a51 100644 --- a/sound/usb/usx2y/usbusx2y.c +++ b/sound/usb/usx2y/usbusx2y.c @@ -238,7 +238,7 @@ static void i_usX2Y_In04Int(struct urb *urb) send = 0; for (j = 0; j < URBS_AsyncSeq && !err; ++j) if (0 == usX2Y->AS04.urb[j]->status) { - struct us428_p4out *p4out = us428ctls->p4out + send; // FIXME if more then 1 p4out is new, 1 gets lost. + struct us428_p4out *p4out = us428ctls->p4out + send; // FIXME if more than 1 p4out is new, 1 gets lost. usb_fill_bulk_urb(usX2Y->AS04.urb[j], usX2Y->chip.dev, usb_sndbulkpipe(usX2Y->chip.dev, 0x04), &p4out->val.vol, p4out->type == eLT_Light ? sizeof(struct us428_lights) : 5, -- cgit v1.2.3 From 40bcc69b399ddbcd2d5c52b277a1b27398339b27 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 5 Jan 2009 18:39:01 -0800 Subject: x86: k8 numa register active regions later Impact: cleanup don't register early, so we don't need to clear actived regions if it fail to get node hash shift or wild set in nb config. also remove nodeids array that is not needed Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/mm/k8topology_64.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c index 41f1b5c00a1d..268f8255280f 100644 --- a/arch/x86/mm/k8topology_64.c +++ b/arch/x86/mm/k8topology_64.c @@ -81,7 +81,6 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end) unsigned numnodes, cores, bits, apicid_base; unsigned long prevbase; struct bootnode nodes[8]; - unsigned char nodeids[8]; int i, j, nb, found = 0; u32 nodeid, reg; @@ -110,7 +109,6 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end) limit = read_pci_config(0, nb, 1, 0x44 + i*8); nodeid = limit & 7; - nodeids[i] = nodeid; if ((base & 3) == 0) { if (i < numnodes) printk("Skipping disabled node %d\n", i); @@ -179,9 +177,6 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end) nodes[nodeid].start = base; nodes[nodeid].end = limit; - e820_register_active_regions(nodeid, - nodes[nodeid].start >> PAGE_SHIFT, - nodes[nodeid].end >> PAGE_SHIFT); prevbase = base; @@ -211,12 +206,15 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end) } for (i = 0; i < 8; i++) { - if (nodes[i].start != nodes[i].end) { - nodeid = nodeids[i]; - for (j = apicid_base; j < cores + apicid_base; j++) - apicid_to_node[(nodeid << bits) + j] = i; - setup_node_bootmem(i, nodes[i].start, nodes[i].end); - } + if (nodes[i].start == nodes[i].end) + continue; + + e820_register_active_regions(i, + nodes[i].start >> PAGE_SHIFT, + nodes[i].end >> PAGE_SHIFT); + for (j = apicid_base; j < cores + apicid_base; j++) + apicid_to_node[(i << bits) + j] = i; + setup_node_bootmem(i, nodes[i].start, nodes[i].end); } numa_init_array(); -- cgit v1.2.3 From 4d9f94319c485f5af6717dfd7a333949636aed16 Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Mon, 5 Jan 2009 17:09:41 -0800 Subject: x86: fix x86_32 builds for summit and es7000 arch's Fix the following build errors reported by Yinghai Lu: | In file included from arch/x86/mach-generic/summit.c:16: | tip/linux-2.6/arch/x86/include/asm/summit/apic.h: | In function 'cpu_mask_to_apicid_and': | tip/linux-2.6/arch/x86/include/asm/summit/apic.h:179: | error: 'GFP_ATOMIC' undeclared (first use in this function) Reported-by: Yinghai Lu Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- arch/x86/include/asm/es7000/apic.h | 2 ++ arch/x86/include/asm/summit/apic.h | 1 + 2 files changed, 3 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index bc53d5ef1386..c58b9cc74465 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h @@ -1,6 +1,8 @@ #ifndef __ASM_ES7000_APIC_H #define __ASM_ES7000_APIC_H +#include + #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu) #define esr_disable (1) diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 4bb5fb34f030..93d2c8667cfe 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h @@ -2,6 +2,7 @@ #define __ASM_SUMMIT_APIC_H #include +#include #define esr_disable (1) #define NO_BALANCE_IRQ (0) -- cgit v1.2.3 From 9e9197370dafa7ebc7191d835f0403b13855ca35 Mon Sep 17 00:00:00 2001 From: Huang Weiyi Date: Tue, 6 Jan 2009 06:58:39 +0800 Subject: x86: remove duplicated #include's Removed duplicated #include's in: arch/x86/kernel/mpparse.c arch/x86/kernel/nmi.c Signed-off-by: Huang Weiyi Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse.c | 1 - arch/x86/kernel/nmi.c | 1 - 2 files changed, 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index e6a9724fefc1..fc971973f037 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 45a09ccdc214..7228979f1e7f 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include -- cgit v1.2.3 From 1c0fe6e3bda0464728c23c8d84aa47567e8b716c Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Tue, 6 Jan 2009 14:38:59 -0800 Subject: mm: invoke oom-killer from page fault Rather than have the pagefault handler kill a process directly if it gets a VM_FAULT_OOM, have it call into the OOM killer. With increasingly sophisticated oom behaviour (cpusets, memory cgroups, oom killing throttling, oom priority adjustment or selective disabling, panic on oom, etc), it's silly to unconditionally kill the faulting process at page fault time. Create a hook for pagefault oom path to call into instead. Only converted x86 and uml so far. [akpm@linux-foundation.org: make __out_of_memory() static] [akpm@linux-foundation.org: fix comment] Signed-off-by: Nick Piggin Cc: Jeff Dike Acked-by: Ingo Molnar Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/um/kernel/trap.c | 24 +++++-------- arch/x86/mm/fault.c | 24 ++++--------- include/linux/mm.h | 5 +++ mm/oom_kill.c | 94 +++++++++++++++++++++++++++++++++++---------------- 4 files changed, 84 insertions(+), 63 deletions(-) (limited to 'arch/x86') diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 44e490419495..7384d8accfe7 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -64,11 +64,10 @@ good_area: do { int fault; -survive: + fault = handle_mm_fault(mm, vma, address, is_write); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) { - err = -ENOMEM; goto out_of_memory; } else if (fault & VM_FAULT_SIGBUS) { err = -EACCES; @@ -104,18 +103,14 @@ out: out_nosemaphore: return err; -/* - * We ran out of memory, or some other thing happened to us that made - * us unable to handle the page fault gracefully. - */ out_of_memory: - if (is_global_init(current)) { - up_read(&mm->mmap_sem); - yield(); - down_read(&mm->mmap_sem); - goto survive; - } - goto out; + /* + * We ran out of memory, call the OOM killer, and return the userspace + * (which will retry the fault, or kill us if we got oom-killed). + */ + up_read(&mm->mmap_sem); + pagefault_out_of_memory(); + return 0; } static void bad_segv(struct faultinfo fi, unsigned long ip) @@ -214,9 +209,6 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, si.si_addr = (void __user *)address; current->thread.arch.faultinfo = fi; force_sig_info(SIGBUS, &si, current); - } else if (err == -ENOMEM) { - printk(KERN_INFO "VM: killing process %s\n", current->comm); - do_exit(SIGKILL); } else { BUG_ON(err != -EFAULT); si.si_signo = SIGSEGV; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 57ec8c86a877..9e268b6b204e 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -667,7 +667,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) if (unlikely(in_atomic() || !mm)) goto bad_area_nosemaphore; -again: /* * When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the @@ -859,25 +858,14 @@ no_context: oops_end(flags, regs, sig); #endif -/* - * We ran out of memory, or some other thing happened to us that made - * us unable to handle the page fault gracefully. - */ out_of_memory: + /* + * We ran out of memory, call the OOM killer, and return the userspace + * (which will retry the fault, or kill us if we got oom-killed). + */ up_read(&mm->mmap_sem); - if (is_global_init(tsk)) { - yield(); - /* - * Re-lookup the vma - in theory the vma tree might - * have changed: - */ - goto again; - } - - printk("VM: killing process %s\n", tsk->comm); - if (error_code & PF_USER) - do_group_exit(SIGKILL); - goto no_context; + pagefault_out_of_memory(); + return; do_sigbus: up_read(&mm->mmap_sem); diff --git a/include/linux/mm.h b/include/linux/mm.h index aaa8b843be28..4a3d28c86443 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -717,6 +717,11 @@ static inline int page_mapped(struct page *page) #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS) +/* + * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. + */ +extern void pagefault_out_of_memory(void); + #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) extern void show_free_areas(void); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 558f9afe6e4e..c592965dab2f 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -509,6 +509,69 @@ void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) spin_unlock(&zone_scan_mutex); } +/* + * Must be called with tasklist_lock held for read. + */ +static void __out_of_memory(gfp_t gfp_mask, int order) +{ + if (sysctl_oom_kill_allocating_task) { + oom_kill_process(current, gfp_mask, order, 0, NULL, + "Out of memory (oom_kill_allocating_task)"); + + } else { + unsigned long points; + struct task_struct *p; + +retry: + /* + * Rambo mode: Shoot down a process and hope it solves whatever + * issues we may have. + */ + p = select_bad_process(&points, NULL); + + if (PTR_ERR(p) == -1UL) + return; + + /* Found nothing?!?! Either we hang forever, or we panic. */ + if (!p) { + read_unlock(&tasklist_lock); + panic("Out of memory and no killable processes...\n"); + } + + if (oom_kill_process(p, gfp_mask, order, points, NULL, + "Out of memory")) + goto retry; + } +} + +/* + * pagefault handler calls into here because it is out of memory but + * doesn't know exactly how or why. + */ +void pagefault_out_of_memory(void) +{ + unsigned long freed = 0; + + blocking_notifier_call_chain(&oom_notify_list, 0, &freed); + if (freed > 0) + /* Got some memory back in the last second. */ + return; + + if (sysctl_panic_on_oom) + panic("out of memory from page fault. panic_on_oom is selected.\n"); + + read_lock(&tasklist_lock); + __out_of_memory(0, 0); /* unknown gfp_mask and order */ + read_unlock(&tasklist_lock); + + /* + * Give "p" a good chance of killing itself before we + * retry to allocate memory. + */ + if (!test_thread_flag(TIF_MEMDIE)) + schedule_timeout_uninterruptible(1); +} + /** * out_of_memory - kill the "best" process when we run out of memory * @zonelist: zonelist pointer @@ -522,8 +585,6 @@ void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) */ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) { - struct task_struct *p; - unsigned long points = 0; unsigned long freed = 0; enum oom_constraint constraint; @@ -544,7 +605,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) switch (constraint) { case CONSTRAINT_MEMORY_POLICY: - oom_kill_process(current, gfp_mask, order, points, NULL, + oom_kill_process(current, gfp_mask, order, 0, NULL, "No available memory (MPOL_BIND)"); break; @@ -553,35 +614,10 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) panic("out of memory. panic_on_oom is selected\n"); /* Fall-through */ case CONSTRAINT_CPUSET: - if (sysctl_oom_kill_allocating_task) { - oom_kill_process(current, gfp_mask, order, points, NULL, - "Out of memory (oom_kill_allocating_task)"); - break; - } -retry: - /* - * Rambo mode: Shoot down a process and hope it solves whatever - * issues we may have. - */ - p = select_bad_process(&points, NULL); - - if (PTR_ERR(p) == -1UL) - goto out; - - /* Found nothing?!?! Either we hang forever, or we panic. */ - if (!p) { - read_unlock(&tasklist_lock); - panic("Out of memory and no killable processes...\n"); - } - - if (oom_kill_process(p, gfp_mask, order, points, NULL, - "Out of memory")) - goto retry; - + __out_of_memory(gfp_mask, order); break; } -out: read_unlock(&tasklist_lock); /* -- cgit v1.2.3 From c04fc586c1a480ba198f03ae7b6cbd7b57380b91 Mon Sep 17 00:00:00 2001 From: Gary Hade Date: Tue, 6 Jan 2009 14:39:14 -0800 Subject: mm: show node to memory section relationship with symlinks in sysfs Show node to memory section relationship with symlinks in sysfs Add /sys/devices/system/node/nodeX/memoryY symlinks for all the memory sections located on nodeX. For example: /sys/devices/system/node/node1/memory135 -> ../../memory/memory135 indicates that memory section 135 resides on node1. Also revises documentation to cover this change as well as updating Documentation/ABI/testing/sysfs-devices-memory to include descriptions of memory hotremove files 'phys_device', 'phys_index', and 'state' that were previously not described there. In addition to it always being a good policy to provide users with the maximum possible amount of physical location information for resources that can be hot-added and/or hot-removed, the following are some (but likely not all) of the user benefits provided by this change. Immediate: - Provides information needed to determine the specific node on which a defective DIMM is located. This will reduce system downtime when the node or defective DIMM is swapped out. - Prevents unintended onlining of a memory section that was previously offlined due to a defective DIMM. This could happen during node hot-add when the user or node hot-add assist script onlines _all_ offlined sections due to user or script inability to identify the specific memory sections located on the hot-added node. The consequences of reintroducing the defective memory could be ugly. - Provides information needed to vary the amount and distribution of memory on specific nodes for testing or debugging purposes. Future: - Will provide information needed to identify the memory sections that need to be offlined prior to physical removal of a specific node. Symlink creation during boot was tested on 2-node x86_64, 2-node ppc64, and 2-node ia64 systems. Symlink creation during physical memory hot-add tested on a 2-node x86_64 system. Signed-off-by: Gary Hade Signed-off-by: Badari Pulavarty Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/ABI/testing/sysfs-devices-memory | 51 +++++++++++- Documentation/memory-hotplug.txt | 16 +++- arch/ia64/mm/init.c | 2 +- arch/powerpc/mm/mem.c | 2 +- arch/s390/mm/init.c | 2 +- arch/sh/mm/init.c | 3 +- arch/x86/mm/init_32.c | 2 +- arch/x86/mm/init_64.c | 2 +- drivers/base/memory.c | 19 +++-- drivers/base/node.c | 103 +++++++++++++++++++++++++ include/linux/memory.h | 6 +- include/linux/memory_hotplug.h | 2 +- include/linux/node.h | 13 ++++ mm/memory_hotplug.c | 11 +-- 14 files changed, 209 insertions(+), 25 deletions(-) (limited to 'arch/x86') diff --git a/Documentation/ABI/testing/sysfs-devices-memory b/Documentation/ABI/testing/sysfs-devices-memory index 7a16fe1e2270..9fe91c02ee40 100644 --- a/Documentation/ABI/testing/sysfs-devices-memory +++ b/Documentation/ABI/testing/sysfs-devices-memory @@ -6,7 +6,6 @@ Description: internal state of the kernel memory blocks. Files could be added or removed dynamically to represent hot-add/remove operations. - Users: hotplug memory add/remove tools https://w3.opensource.ibm.com/projects/powerpc-utils/ @@ -19,6 +18,56 @@ Description: This is useful for a user-level agent to determine identify removable sections of the memory before attempting potentially expensive hot-remove memory operation +Users: hotplug memory remove tools + https://w3.opensource.ibm.com/projects/powerpc-utils/ + +What: /sys/devices/system/memory/memoryX/phys_device +Date: September 2008 +Contact: Badari Pulavarty +Description: + The file /sys/devices/system/memory/memoryX/phys_device + is read-only and is designed to show the name of physical + memory device. Implementation is currently incomplete. +What: /sys/devices/system/memory/memoryX/phys_index +Date: September 2008 +Contact: Badari Pulavarty +Description: + The file /sys/devices/system/memory/memoryX/phys_index + is read-only and contains the section ID in hexadecimal + which is equivalent to decimal X contained in the + memory section directory name. + +What: /sys/devices/system/memory/memoryX/state +Date: September 2008 +Contact: Badari Pulavarty +Description: + The file /sys/devices/system/memory/memoryX/state + is read-write. When read, it's contents show the + online/offline state of the memory section. When written, + root can toggle the the online/offline state of a removable + memory section (see removable file description above) + using the following commands. + # echo online > /sys/devices/system/memory/memoryX/state + # echo offline > /sys/devices/system/memory/memoryX/state + + For example, if /sys/devices/system/memory/memory22/removable + contains a value of 1 and + /sys/devices/system/memory/memory22/state contains the + string "online" the following command can be executed by + by root to offline that section. + # echo offline > /sys/devices/system/memory/memory22/state Users: hotplug memory remove tools https://w3.opensource.ibm.com/projects/powerpc-utils/ + +What: /sys/devices/system/node/nodeX/memoryY +Date: September 2008 +Contact: Gary Hade +Description: + When CONFIG_NUMA is enabled + /sys/devices/system/node/nodeX/memoryY is a symbolic link that + points to the corresponding /sys/devices/system/memory/memoryY + memory section directory. For example, the following symbolic + link is created for memory section 9 on node0. + /sys/devices/system/node/node0/memory9 -> ../../memory/memory9 + diff --git a/Documentation/memory-hotplug.txt b/Documentation/memory-hotplug.txt index 168117bd6ee8..4c2ecf537a4a 100644 --- a/Documentation/memory-hotplug.txt +++ b/Documentation/memory-hotplug.txt @@ -124,7 +124,7 @@ config options. This option can be kernel module too. -------------------------------- -3 sysfs files for memory hotplug +4 sysfs files for memory hotplug -------------------------------- All sections have their device information under /sys/devices/system/memory as @@ -138,11 +138,12 @@ For example, assume 1GiB section size. A device for a memory starting at (0x100000000 / 1Gib = 4) This device covers address range [0x100000000 ... 0x140000000) -Under each section, you can see 3 files. +Under each section, you can see 4 files. /sys/devices/system/memory/memoryXXX/phys_index /sys/devices/system/memory/memoryXXX/phys_device /sys/devices/system/memory/memoryXXX/state +/sys/devices/system/memory/memoryXXX/removable 'phys_index' : read-only and contains section id, same as XXX. 'state' : read-write @@ -150,10 +151,20 @@ Under each section, you can see 3 files. at write: user can specify "online", "offline" command 'phys_device': read-only: designed to show the name of physical memory device. This is not well implemented now. +'removable' : read-only: contains an integer value indicating + whether the memory section is removable or not + removable. A value of 1 indicates that the memory + section is removable and a value of 0 indicates that + it is not removable. NOTE: These directories/files appear after physical memory hotplug phase. +If CONFIG_NUMA is enabled the +/sys/devices/system/memory/memoryXXX memory section +directories can also be accessed via symbolic links located in +the /sys/devices/system/node/node* directories. For example: +/sys/devices/system/node/node0/memory9 -> ../../memory/memory9 -------------------------------- 4. Physical memory hot-add phase @@ -365,7 +376,6 @@ node if necessary. - allowing memory hot-add to ZONE_MOVABLE. maybe we need some switch like sysctl or new control file. - showing memory section and physical device relationship. - - showing memory section and node relationship (maybe good for NUMA) - showing memory section is under ZONE_MOVABLE or not - test and make it better memory offlining. - support HugeTLB page migration and offlining. diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 054bcd9439aa..56e12903973c 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -692,7 +692,7 @@ int arch_add_memory(int nid, u64 start, u64 size) pgdat = NODE_DATA(nid); zone = pgdat->node_zones + ZONE_NORMAL; - ret = __add_pages(zone, start_pfn, nr_pages); + ret = __add_pages(nid, zone, start_pfn, nr_pages); if (ret) printk("%s: Problem encountered in __add_pages() as ret=%d\n", diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 53b06ebb3f2f..f00f09a77f12 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -132,7 +132,7 @@ int arch_add_memory(int nid, u64 start, u64 size) /* this should work for most non-highmem platforms */ zone = pgdata->node_zones; - return __add_pages(zone, start_pfn, nr_pages); + return __add_pages(nid, zone, start_pfn, nr_pages); } #endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 158b0d6d7046..f0258ca3b17e 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -183,7 +183,7 @@ int arch_add_memory(int nid, u64 start, u64 size) rc = vmem_add_mapping(start, size); if (rc) return rc; - rc = __add_pages(zone, PFN_DOWN(start), PFN_DOWN(size)); + rc = __add_pages(nid, zone, PFN_DOWN(start), PFN_DOWN(size)); if (rc) vmem_remove_mapping(start, size); return rc; diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 6cbef8caeb56..3edf297c829b 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -311,7 +311,8 @@ int arch_add_memory(int nid, u64 start, u64 size) pgdat = NODE_DATA(nid); /* We only have ZONE_NORMAL, so this is easy.. */ - ret = __add_pages(pgdat->node_zones + ZONE_NORMAL, start_pfn, nr_pages); + ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL, + start_pfn, nr_pages); if (unlikely(ret)) printk("%s: Failed, __add_pages() == %d\n", __func__, ret); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index f99a6c6c432e..544d724caeee 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -1079,7 +1079,7 @@ int arch_add_memory(int nid, u64 start, u64 size) unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - return __add_pages(zone, start_pfn, nr_pages); + return __add_pages(nid, zone, start_pfn, nr_pages); } #endif diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 9f7a0d24d42a..54c437e96541 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -857,7 +857,7 @@ int arch_add_memory(int nid, u64 start, u64 size) if (last_mapped_pfn > max_pfn_mapped) max_pfn_mapped = last_mapped_pfn; - ret = __add_pages(zone, start_pfn, nr_pages); + ret = __add_pages(nid, zone, start_pfn, nr_pages); WARN_ON_ONCE(ret); return ret; diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 5260e9e0df48..989429cfed88 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -347,8 +347,9 @@ static inline int memory_probe_init(void) * section belongs to... */ -static int add_memory_block(unsigned long node_id, struct mem_section *section, - unsigned long state, int phys_device) +static int add_memory_block(int nid, struct mem_section *section, + unsigned long state, int phys_device, + enum mem_add_context context) { struct memory_block *mem = kzalloc(sizeof(*mem), GFP_KERNEL); int ret = 0; @@ -370,6 +371,10 @@ static int add_memory_block(unsigned long node_id, struct mem_section *section, ret = mem_create_simple_file(mem, phys_device); if (!ret) ret = mem_create_simple_file(mem, removable); + if (!ret) { + if (context == HOTPLUG) + ret = register_mem_sect_under_node(mem, nid); + } return ret; } @@ -382,7 +387,7 @@ static int add_memory_block(unsigned long node_id, struct mem_section *section, * * This could be made generic for all sysdev classes. */ -static struct memory_block *find_memory_block(struct mem_section *section) +struct memory_block *find_memory_block(struct mem_section *section) { struct kobject *kobj; struct sys_device *sysdev; @@ -411,6 +416,7 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section, struct memory_block *mem; mem = find_memory_block(section); + unregister_mem_sect_under_nodes(mem); mem_remove_simple_file(mem, phys_index); mem_remove_simple_file(mem, state); mem_remove_simple_file(mem, phys_device); @@ -424,9 +430,9 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section, * need an interface for the VM to add new memory regions, * but without onlining it. */ -int register_new_memory(struct mem_section *section) +int register_new_memory(int nid, struct mem_section *section) { - return add_memory_block(0, section, MEM_OFFLINE, 0); + return add_memory_block(nid, section, MEM_OFFLINE, 0, HOTPLUG); } int unregister_memory_section(struct mem_section *section) @@ -458,7 +464,8 @@ int __init memory_dev_init(void) for (i = 0; i < NR_MEM_SECTIONS; i++) { if (!present_section_nr(i)) continue; - err = add_memory_block(0, __nr_to_section(i), MEM_ONLINE, 0); + err = add_memory_block(0, __nr_to_section(i), MEM_ONLINE, + 0, BOOT); if (!ret) ret = err; } diff --git a/drivers/base/node.c b/drivers/base/node.c index 91636cd8b6c9..43fa90b837ee 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -248,6 +249,105 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) return 0; } +#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE +#define page_initialized(page) (page->lru.next) + +static int get_nid_for_pfn(unsigned long pfn) +{ + struct page *page; + + if (!pfn_valid_within(pfn)) + return -1; + page = pfn_to_page(pfn); + if (!page_initialized(page)) + return -1; + return pfn_to_nid(pfn); +} + +/* register memory section under specified node if it spans that node */ +int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) +{ + unsigned long pfn, sect_start_pfn, sect_end_pfn; + + if (!mem_blk) + return -EFAULT; + if (!node_online(nid)) + return 0; + sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index); + sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; + for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { + int page_nid; + + page_nid = get_nid_for_pfn(pfn); + if (page_nid < 0) + continue; + if (page_nid != nid) + continue; + return sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj, + &mem_blk->sysdev.kobj, + kobject_name(&mem_blk->sysdev.kobj)); + } + /* mem section does not span the specified node */ + return 0; +} + +/* unregister memory section under all nodes that it spans */ +int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) +{ + nodemask_t unlinked_nodes; + unsigned long pfn, sect_start_pfn, sect_end_pfn; + + if (!mem_blk) + return -EFAULT; + nodes_clear(unlinked_nodes); + sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index); + sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; + for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { + unsigned int nid; + + nid = get_nid_for_pfn(pfn); + if (nid < 0) + continue; + if (!node_online(nid)) + continue; + if (node_test_and_set(nid, unlinked_nodes)) + continue; + sysfs_remove_link(&node_devices[nid].sysdev.kobj, + kobject_name(&mem_blk->sysdev.kobj)); + } + return 0; +} + +static int link_mem_sections(int nid) +{ + unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn; + unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_spanned_pages; + unsigned long pfn; + int err = 0; + + for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { + unsigned long section_nr = pfn_to_section_nr(pfn); + struct mem_section *mem_sect; + struct memory_block *mem_blk; + int ret; + + if (!present_section_nr(section_nr)) + continue; + mem_sect = __nr_to_section(section_nr); + mem_blk = find_memory_block(mem_sect); + ret = register_mem_sect_under_node(mem_blk, nid); + if (!err) + err = ret; + + /* discard ref obtained in find_memory_block() */ + kobject_put(&mem_blk->sysdev.kobj); + } + return err; +} +#else +static int link_mem_sections(int nid) { return 0; } +#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ + int register_one_node(int nid) { int error = 0; @@ -267,6 +367,9 @@ int register_one_node(int nid) if (cpu_to_node(cpu) == nid) register_cpu_under_node(cpu, nid); } + + /* link memory sections under this node */ + error = link_mem_sections(nid); } return error; diff --git a/include/linux/memory.h b/include/linux/memory.h index 36c82c9e6ea7..3fdc10806d31 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -79,14 +79,14 @@ static inline int memory_notify(unsigned long val, void *v) #else extern int register_memory_notifier(struct notifier_block *nb); extern void unregister_memory_notifier(struct notifier_block *nb); -extern int register_new_memory(struct mem_section *); +extern int register_new_memory(int, struct mem_section *); extern int unregister_memory_section(struct mem_section *); extern int memory_dev_init(void); extern int remove_memory_block(unsigned long, struct mem_section *, int); extern int memory_notify(unsigned long val, void *v); +extern struct memory_block *find_memory_block(struct mem_section *); #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION< Date: Tue, 6 Jan 2009 14:40:39 -0800 Subject: atomic_t: unify all arch definitions The atomic_t type cannot currently be used in some header files because it would create an include loop with asm/atomic.h. Move the type definition to linux/types.h to break the loop. Signed-off-by: Matthew Wilcox Cc: Huang Ying Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/atomic.h | 9 +-------- arch/arm/include/asm/atomic.h | 3 +-- arch/avr32/include/asm/atomic.h | 2 +- arch/blackfin/include/asm/atomic.h | 4 +--- arch/cris/include/asm/atomic.h | 4 +--- arch/h8300/include/asm/atomic.h | 3 ++- arch/ia64/include/asm/atomic.h | 6 ------ arch/m68knommu/include/asm/atomic.h | 2 +- arch/mips/include/asm/atomic.h | 5 +---- arch/parisc/include/asm/atomic.h | 11 +++-------- arch/powerpc/include/asm/atomic.h | 4 +--- arch/s390/include/asm/atomic.h | 7 +------ arch/sh/include/asm/atomic.h | 7 +++---- arch/sparc/include/asm/atomic_32.h | 2 -- arch/sparc/include/asm/atomic_64.h | 3 --- arch/x86/include/asm/atomic_32.h | 10 +--------- arch/x86/include/asm/atomic_64.h | 18 ++---------------- include/asm-frv/atomic.h | 4 ---- include/asm-m32r/atomic.h | 8 +------- include/asm-m68k/atomic.h | 3 +-- include/asm-mn10300/atomic.h | 9 --------- include/asm-xtensa/atomic.h | 3 +-- include/linux/types.h | 10 ++++++++++ 23 files changed, 33 insertions(+), 104 deletions(-) (limited to 'arch/x86') diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index ca88e54dec93..62b363584b2b 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -1,6 +1,7 @@ #ifndef _ALPHA_ATOMIC_H #define _ALPHA_ATOMIC_H +#include #include #include @@ -13,14 +14,6 @@ */ -/* - * Counter is volatile to make sure gcc doesn't try to be clever - * and move things around on us. We need to use _exactly_ the address - * the user gave us, not some alias that contains the same information. - */ -typedef struct { volatile int counter; } atomic_t; -typedef struct { volatile long counter; } atomic64_t; - #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) #define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 325f881ccb50..ee99723b3a6c 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -12,10 +12,9 @@ #define __ASM_ARM_ATOMIC_H #include +#include #include -typedef struct { volatile int counter; } atomic_t; - #define ATOMIC_INIT(i) { (i) } #ifdef __KERNEL__ diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h index 7ef3862a73d0..318815107748 100644 --- a/arch/avr32/include/asm/atomic.h +++ b/arch/avr32/include/asm/atomic.h @@ -14,9 +14,9 @@ #ifndef __ASM_AVR32_ATOMIC_H #define __ASM_AVR32_ATOMIC_H +#include #include -typedef struct { volatile int counter; } atomic_t; #define ATOMIC_INIT(i) { (i) } #define atomic_read(v) ((v)->counter) diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h index 7cf508718605..25776c19064b 100644 --- a/arch/blackfin/include/asm/atomic.h +++ b/arch/blackfin/include/asm/atomic.h @@ -1,6 +1,7 @@ #ifndef __ARCH_BLACKFIN_ATOMIC__ #define __ARCH_BLACKFIN_ATOMIC__ +#include #include /* local_irq_XXX() */ /* @@ -13,9 +14,6 @@ * Tony Kou (tonyko@lineo.ca) Lineo Inc. 2001 */ -typedef struct { - int counter; -} atomic_t; #define ATOMIC_INIT(i) { (i) } #define atomic_read(v) ((v)->counter) diff --git a/arch/cris/include/asm/atomic.h b/arch/cris/include/asm/atomic.h index f71ea686a2ea..5718dd8902a1 100644 --- a/arch/cris/include/asm/atomic.h +++ b/arch/cris/include/asm/atomic.h @@ -4,7 +4,7 @@ #define __ASM_CRIS_ATOMIC__ #include - +#include #include #include @@ -13,8 +13,6 @@ * resource counting etc.. */ -typedef struct { volatile int counter; } atomic_t; - #define ATOMIC_INIT(i) { (i) } #define atomic_read(v) ((v)->counter) diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index b4cf0ea97ede..833186c8dc3b 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h @@ -1,12 +1,13 @@ #ifndef __ARCH_H8300_ATOMIC__ #define __ARCH_H8300_ATOMIC__ +#include + /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. */ -typedef struct { int counter; } atomic_t; #define ATOMIC_INIT(i) { (i) } #define atomic_read(v) ((v)->counter) diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 50c2b83fd5a0..d37292bd9875 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -17,12 +17,6 @@ #include #include -/* - * On IA-64, counter must always be volatile to ensure that that the - * memory accesses are ordered. - */ -typedef struct { volatile __s32 counter; } atomic_t; -typedef struct { volatile __s64 counter; } atomic64_t; #define ATOMIC_INIT(i) ((atomic_t) { (i) }) #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) diff --git a/arch/m68knommu/include/asm/atomic.h b/arch/m68knommu/include/asm/atomic.h index d5632a305dae..6bb674855a3f 100644 --- a/arch/m68knommu/include/asm/atomic.h +++ b/arch/m68knommu/include/asm/atomic.h @@ -1,6 +1,7 @@ #ifndef __ARCH_M68KNOMMU_ATOMIC__ #define __ARCH_M68KNOMMU_ATOMIC__ +#include #include /* @@ -12,7 +13,6 @@ * We do not have SMP m68k systems, so we don't have to deal with that. */ -typedef struct { int counter; } atomic_t; #define ATOMIC_INIT(i) { (i) } #define atomic_read(v) ((v)->counter) diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 1232be3885b0..c996c3b4d074 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -15,13 +15,12 @@ #define _ASM_ATOMIC_H #include +#include #include #include #include #include -typedef struct { volatile int counter; } atomic_t; - #define ATOMIC_INIT(i) { (i) } /* @@ -404,8 +403,6 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #ifdef CONFIG_64BIT -typedef struct { volatile long counter; } atomic64_t; - #define ATOMIC64_INIT(i) { (i) } /* diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 57fcc4a5ebb4..edbfe25c5fc1 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -155,14 +155,11 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) #endif -/* Note that we need not lock read accesses - aligned word writes/reads - * are atomic, so a reader never sees unconsistent values. - * - * Cache-line alignment would conflict with, for example, linux/module.h +/* + * Note that we need not lock read accesses - aligned word writes/reads + * are atomic, so a reader never sees inconsistent values. */ -typedef struct { volatile int counter; } atomic_t; - /* It's possible to reduce all atomic operations to either * __atomic_add_return, atomic_set and atomic_read (the latter * is there only for consistency). @@ -260,8 +257,6 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #ifdef CONFIG_64BIT -typedef struct { volatile s64 counter; } atomic64_t; - #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) static __inline__ int diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 499be5bdd6fa..b401950f5259 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -5,7 +5,7 @@ * PowerPC atomic operations */ -typedef struct { int counter; } atomic_t; +#include #ifdef __KERNEL__ #include @@ -251,8 +251,6 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) #ifdef __powerpc64__ -typedef struct { long counter; } atomic64_t; - #define ATOMIC64_INIT(i) { (i) } static __inline__ long atomic64_read(const atomic64_t *v) diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 2d184655bc5d..de432f2de2d2 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -2,6 +2,7 @@ #define __ARCH_S390_ATOMIC__ #include +#include /* * include/asm-s390/atomic.h @@ -23,9 +24,6 @@ * S390 uses 'Compare And Swap' for atomicity in SMP enviroment */ -typedef struct { - int counter; -} __attribute__ ((aligned (4))) atomic_t; #define ATOMIC_INIT(i) { (i) } #ifdef __KERNEL__ @@ -149,9 +147,6 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #undef __CS_LOOP #ifdef __s390x__ -typedef struct { - long long counter; -} __attribute__ ((aligned (8))) atomic64_t; #define ATOMIC64_INIT(i) { (i) } #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index c043ef003028..6327ffbb1992 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -7,16 +7,15 @@ * */ -typedef struct { volatile int counter; } atomic_t; +#include +#include +#include #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) #define atomic_read(v) ((v)->counter) #define atomic_set(v,i) ((v)->counter = (i)) -#include -#include - #if defined(CONFIG_GUSA_RB) #include #elif defined(CONFIG_CPU_SH4A) diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 5c944b5a8040..ce465975a6a5 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -13,8 +13,6 @@ #include -typedef struct { volatile int counter; } atomic_t; - #ifdef __KERNEL__ #define ATOMIC_INIT(i) { (i) } diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index 5982c5ae7f07..a0a706492696 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -10,9 +10,6 @@ #include #include -typedef struct { volatile int counter; } atomic_t; -typedef struct { volatile __s64 counter; } atomic64_t; - #define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h index ad5b9f6ecddf..85b46fba4229 100644 --- a/arch/x86/include/asm/atomic_32.h +++ b/arch/x86/include/asm/atomic_32.h @@ -2,6 +2,7 @@ #define _ASM_X86_ATOMIC_32_H #include +#include #include #include @@ -10,15 +11,6 @@ * resource counting etc.. */ -/* - * Make sure gcc doesn't try to be clever and move things around - * on us. We need to use _exactly_ the address the user gave us, - * not some alias that contains the same information. - */ -typedef struct { - int counter; -} atomic_t; - #define ATOMIC_INIT(i) { (i) } /** diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h index 279d2a731f3f..8c21731984da 100644 --- a/arch/x86/include/asm/atomic_64.h +++ b/arch/x86/include/asm/atomic_64.h @@ -1,25 +1,15 @@ #ifndef _ASM_X86_ATOMIC_64_H #define _ASM_X86_ATOMIC_64_H +#include #include #include -/* atomic_t should be 32 bit signed type */ - /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. */ -/* - * Make sure gcc doesn't try to be clever and move things around - * on us. We need to use _exactly_ the address the user gave us, - * not some alias that contains the same information. - */ -typedef struct { - int counter; -} atomic_t; - #define ATOMIC_INIT(i) { (i) } /** @@ -191,11 +181,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) #define atomic_inc_return(v) (atomic_add_return(1, v)) #define atomic_dec_return(v) (atomic_sub_return(1, v)) -/* An 64bit atomic type */ - -typedef struct { - long counter; -} atomic64_t; +/* The 64-bit atomic type */ #define ATOMIC64_INIT(i) { (i) } diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h index 46d696b331e7..296c35cfb207 100644 --- a/include/asm-frv/atomic.h +++ b/include/asm-frv/atomic.h @@ -35,10 +35,6 @@ #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -typedef struct { - int counter; -} atomic_t; - #define ATOMIC_INIT(i) { (i) } #define atomic_read(v) ((v)->counter) #define atomic_set(v, i) (((v)->counter) = (i)) diff --git a/include/asm-m32r/atomic.h b/include/asm-m32r/atomic.h index 3a38ffe4a4f4..2eed30f84080 100644 --- a/include/asm-m32r/atomic.h +++ b/include/asm-m32r/atomic.h @@ -9,6 +9,7 @@ * Copyright (C) 2004 Hirokazu Takata */ +#include #include #include @@ -17,13 +18,6 @@ * resource counting etc.. */ -/* - * Make sure gcc doesn't try to be clever and move things around - * on us. We need to use _exactly_ the address the user gave us, - * not some alias that contains the same information. - */ -typedef struct { volatile int counter; } atomic_t; - #define ATOMIC_INIT(i) { (i) } /** diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h index 4915294fea63..eb0ab9d4ee77 100644 --- a/include/asm-m68k/atomic.h +++ b/include/asm-m68k/atomic.h @@ -1,7 +1,7 @@ #ifndef __ARCH_M68K_ATOMIC__ #define __ARCH_M68K_ATOMIC__ - +#include #include /* @@ -13,7 +13,6 @@ * We do not have SMP m68k systems, so we don't have to deal with that. */ -typedef struct { int counter; } atomic_t; #define ATOMIC_INIT(i) { (i) } #define atomic_read(v) ((v)->counter) diff --git a/include/asm-mn10300/atomic.h b/include/asm-mn10300/atomic.h index 27c9690b9574..bc064825f9b1 100644 --- a/include/asm-mn10300/atomic.h +++ b/include/asm-mn10300/atomic.h @@ -20,15 +20,6 @@ * resource counting etc.. */ -/* - * Make sure gcc doesn't try to be clever and move things around - * on us. We need to use _exactly_ the address the user gave us, - * not some alias that contains the same information. - */ -typedef struct { - int counter; -} atomic_t; - #define ATOMIC_INIT(i) { (i) } #ifdef __KERNEL__ diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h index b3b23540f14d..67ad67bed8c1 100644 --- a/include/asm-xtensa/atomic.h +++ b/include/asm-xtensa/atomic.h @@ -14,8 +14,7 @@ #define _XTENSA_ATOMIC_H #include - -typedef struct { volatile int counter; } atomic_t; +#include #ifdef __KERNEL__ #include diff --git a/include/linux/types.h b/include/linux/types.h index 121f349cb7ec..3b864f2d9560 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -195,6 +195,16 @@ typedef u32 phys_addr_t; typedef phys_addr_t resource_size_t; +typedef struct { + volatile int counter; +} atomic_t; + +#ifdef CONFIG_64BIT +typedef struct { + volatile long counter; +} atomic64_t; +#endif + struct ustat { __kernel_daddr_t f_tfree; __kernel_ino_t f_tinode; -- cgit v1.2.3 From f1883f86dea84fe47a71a39fc1afccc005915ed8 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Tue, 6 Jan 2009 14:40:45 -0800 Subject: Remove remaining unwinder code Signed-off-by: Alexey Dobriyan Cc: Gabor Gombas Cc: Jan Beulich Cc: Andi Kleen Cc: Ingo Molnar , Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/include/asm/unwind.h | 13 --------- arch/x86/kernel/traps.c | 2 -- include/linux/module.h | 3 -- include/linux/unwind.h | 68 ------------------------------------------- init/main.c | 3 -- kernel/module.c | 15 ---------- lib/fault-inject.c | 1 - 7 files changed, 105 deletions(-) delete mode 100644 arch/x86/include/asm/unwind.h delete mode 100644 include/linux/unwind.h (limited to 'arch/x86') diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h deleted file mode 100644 index 8b064bd9c553..000000000000 --- a/arch/x86/include/asm/unwind.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef _ASM_X86_UNWIND_H -#define _ASM_X86_UNWIND_H - -#define UNW_PC(frame) ((void)(frame), 0UL) -#define UNW_SP(frame) ((void)(frame), 0UL) -#define UNW_FP(frame) ((void)(frame), 0UL) - -static inline int arch_unw_user_mode(const void *info) -{ - return 0; -} - -#endif /* _ASM_X86_UNWIND_H */ diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index ce6650eb64e9..c9a666cdd3db 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -51,7 +50,6 @@ #include #include #include -#include #include #include #include diff --git a/include/linux/module.h b/include/linux/module.h index 3bfed013350b..03cb93d1865a 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -294,9 +294,6 @@ struct module /* The size of the executable code in each section. */ unsigned int init_text_size, core_text_size; - /* The handle returned from unwind_add_table. */ - void *unwind_info; - /* Arch-specific module values */ struct mod_arch_specific arch; diff --git a/include/linux/unwind.h b/include/linux/unwind.h deleted file mode 100644 index 7760860fa170..000000000000 --- a/include/linux/unwind.h +++ /dev/null @@ -1,68 +0,0 @@ -#ifndef _LINUX_UNWIND_H -#define _LINUX_UNWIND_H - -/* - * Copyright (C) 2002-2006 Novell, Inc. - * Jan Beulich - * This code is released under version 2 of the GNU GPL. - * - * A simple API for unwinding kernel stacks. This is used for - * debugging and error reporting purposes. The kernel doesn't need - * full-blown stack unwinding with all the bells and whistles, so there - * is not much point in implementing the full Dwarf2 unwind API. - */ - -struct module; - -struct unwind_frame_info {}; - -static inline void unwind_init(void) {} -static inline void unwind_setup(void) {} - -#ifdef CONFIG_MODULES - -static inline void *unwind_add_table(struct module *mod, - const void *table_start, - unsigned long table_size) -{ - return NULL; -} - -static inline void unwind_remove_table(void *handle, int init_only) -{ -} - -#endif - -static inline int unwind_init_frame_info(struct unwind_frame_info *info, - struct task_struct *tsk, - const struct pt_regs *regs) -{ - return -ENOSYS; -} - -static inline int unwind_init_blocked(struct unwind_frame_info *info, - struct task_struct *tsk) -{ - return -ENOSYS; -} - -static inline int unwind_init_running(struct unwind_frame_info *info, - asmlinkage int (*cb)(struct unwind_frame_info *, - void *arg), - void *arg) -{ - return -ENOSYS; -} - -static inline int unwind(struct unwind_frame_info *info) -{ - return -ENOSYS; -} - -static inline int unwind_to_user(struct unwind_frame_info *info) -{ - return -ENOSYS; -} - -#endif /* _LINUX_UNWIND_H */ diff --git a/init/main.c b/init/main.c index 90926dadc20d..e119dd28dd7d 100644 --- a/init/main.c +++ b/init/main.c @@ -50,7 +50,6 @@ #include #include #include -#include #include #include #include @@ -537,7 +536,6 @@ asmlinkage void __init start_kernel(void) * Need to run as early as possible, to initialize the * lockdep hash: */ - unwind_init(); lockdep_init(); debug_objects_early_init(); cgroup_init_early(); @@ -559,7 +557,6 @@ asmlinkage void __init start_kernel(void) setup_arch(&command_line); mm_init_owner(&init_mm, &init_task); setup_command_line(command_line); - unwind_setup(); setup_per_cpu_areas(); setup_nr_cpu_ids(); smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ diff --git a/kernel/module.c b/kernel/module.c index f47cce910f25..34b56cf06615 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -43,7 +43,6 @@ #include #include #include -#include #include #include #include @@ -1449,8 +1448,6 @@ static void free_module(struct module *mod) remove_sect_attrs(mod); mod_kobject_remove(mod); - unwind_remove_table(mod->unwind_info, 0); - /* Arch-specific cleanup. */ module_arch_cleanup(mod); @@ -1867,7 +1864,6 @@ static noinline struct module *load_module(void __user *umod, unsigned int symindex = 0; unsigned int strindex = 0; unsigned int modindex, versindex, infoindex, pcpuindex; - unsigned int unwindex = 0; unsigned int num_kp, num_mcount; struct kernel_param *kp; struct module *mod; @@ -1957,9 +1953,6 @@ static noinline struct module *load_module(void __user *umod, versindex = find_sec(hdr, sechdrs, secstrings, "__versions"); infoindex = find_sec(hdr, sechdrs, secstrings, ".modinfo"); pcpuindex = find_pcpusec(hdr, sechdrs, secstrings); -#ifdef ARCH_UNWIND_SECTION_NAME - unwindex = find_sec(hdr, sechdrs, secstrings, ARCH_UNWIND_SECTION_NAME); -#endif /* Don't keep modinfo and version sections. */ sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC; @@ -1969,8 +1962,6 @@ static noinline struct module *load_module(void __user *umod, sechdrs[symindex].sh_flags |= SHF_ALLOC; sechdrs[strindex].sh_flags |= SHF_ALLOC; #endif - if (unwindex) - sechdrs[unwindex].sh_flags |= SHF_ALLOC; /* Check module struct version now, before we try to use module. */ if (!check_modstruct_version(sechdrs, versindex, mod)) { @@ -2267,11 +2258,6 @@ static noinline struct module *load_module(void __user *umod, add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs); add_notes_attrs(mod, hdr->e_shnum, secstrings, sechdrs); - /* Size of section 0 is 0, so this works well if no unwind info. */ - mod->unwind_info = unwind_add_table(mod, - (void *)sechdrs[unwindex].sh_addr, - sechdrs[unwindex].sh_size); - /* Get rid of temporary copy */ vfree(hdr); @@ -2370,7 +2356,6 @@ sys_init_module(void __user *umod, mutex_lock(&module_mutex); /* Drop initial reference. */ module_put(mod); - unwind_remove_table(mod->unwind_info, 1); module_free(mod, mod->module_init); mod->module_init = NULL; mod->init_size = 0; diff --git a/lib/fault-inject.c b/lib/fault-inject.c index a50a311554cc..f97af55bdd96 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From 129415607845d4daea11ddcba706005c69dcb942 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 6 Jan 2009 14:41:50 -0800 Subject: kprobes: add kprobe_insn_mutex and cleanup arch_remove_kprobe() Add kprobe_insn_mutex for protecting kprobe_insn_pages hlist, and remove kprobe_mutex from architecture dependent code. This allows us to call arch_remove_kprobe() (and free_insn_slot) while holding kprobe_mutex. Signed-off-by: Masami Hiramatsu Acked-by: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: Russell King Cc: "Luck, Tony" Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: Ingo Molnar Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/kernel/kprobes.c | 2 -- arch/ia64/kernel/kprobes.c | 8 +++++--- arch/powerpc/kernel/kprobes.c | 7 ++++--- arch/s390/kernel/kprobes.c | 7 ++++--- arch/x86/kernel/kprobes.c | 7 ++++--- include/linux/kprobes.h | 1 - kernel/kprobes.c | 25 +++++++++++++++++++++---- 7 files changed, 38 insertions(+), 19 deletions(-) (limited to 'arch/x86') diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c index 3f9abe0e9aff..f692efddd449 100644 --- a/arch/arm/kernel/kprobes.c +++ b/arch/arm/kernel/kprobes.c @@ -92,9 +92,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { if (p->ainsn.insn) { - mutex_lock(&kprobe_mutex); free_insn_slot(p->ainsn.insn, 0); - mutex_unlock(&kprobe_mutex); p->ainsn.insn = NULL; } } diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index f07688da947c..097b84d54e73 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -670,9 +670,11 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - mutex_lock(&kprobe_mutex); - free_insn_slot(p->ainsn.insn, p->ainsn.inst_flag & INST_FLAG_BOOSTABLE); - mutex_unlock(&kprobe_mutex); + if (p->ainsn.insn) { + free_insn_slot(p->ainsn.insn, + p->ainsn.inst_flag & INST_FLAG_BOOSTABLE); + p->ainsn.insn = NULL; + } } /* * We are resuming execution after a single step fault, so the pt_regs diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index de79915452c8..989edcdf0297 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -96,9 +96,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - mutex_lock(&kprobe_mutex); - free_insn_slot(p->ainsn.insn, 0); - mutex_unlock(&kprobe_mutex); + if (p->ainsn.insn) { + free_insn_slot(p->ainsn.insn, 0); + p->ainsn.insn = NULL; + } } static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 569079ec4ff0..9b92856632cf 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -218,9 +218,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - mutex_lock(&kprobe_mutex); - free_insn_slot(p->ainsn.insn, 0); - mutex_unlock(&kprobe_mutex); + if (p->ainsn.insn) { + free_insn_slot(p->ainsn.insn, 0); + p->ainsn.insn = NULL; + } } static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 6c27679ec6aa..eead6f8f9218 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -376,9 +376,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - mutex_lock(&kprobe_mutex); - free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); - mutex_unlock(&kprobe_mutex); + if (p->ainsn.insn) { + free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); + p->ainsn.insn = NULL; + } } static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 497b1d1f7a05..b93e44ce2284 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -201,7 +201,6 @@ static inline int init_test_probes(void) } #endif /* CONFIG_KPROBES_SANITY_TEST */ -extern struct mutex kprobe_mutex; extern int arch_prepare_kprobe(struct kprobe *p); extern void arch_arm_kprobe(struct kprobe *p); extern void arch_disarm_kprobe(struct kprobe *p); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 3afd354c46f1..29e87921437d 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -69,7 +69,7 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; /* NOTE: change this value only with kprobe_mutex held */ static bool kprobe_enabled; -DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ +static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; static struct { spinlock_t lock ____cacheline_aligned_in_smp; @@ -115,6 +115,7 @@ enum kprobe_slot_state { SLOT_USED = 2, }; +static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */ static struct hlist_head kprobe_insn_pages; static int kprobe_garbage_slots; static int collect_garbage_slots(void); @@ -144,10 +145,10 @@ loop_end: } /** - * get_insn_slot() - Find a slot on an executable page for an instruction. + * __get_insn_slot() - Find a slot on an executable page for an instruction. * We allocate an executable page if there's no room on existing ones. */ -kprobe_opcode_t __kprobes *get_insn_slot(void) +static kprobe_opcode_t __kprobes *__get_insn_slot(void) { struct kprobe_insn_page *kip; struct hlist_node *pos; @@ -196,6 +197,15 @@ kprobe_opcode_t __kprobes *get_insn_slot(void) return kip->insns; } +kprobe_opcode_t __kprobes *get_insn_slot(void) +{ + kprobe_opcode_t *ret; + mutex_lock(&kprobe_insn_mutex); + ret = __get_insn_slot(); + mutex_unlock(&kprobe_insn_mutex); + return ret; +} + /* Return 1 if all garbages are collected, otherwise 0. */ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) { @@ -226,9 +236,13 @@ static int __kprobes collect_garbage_slots(void) { struct kprobe_insn_page *kip; struct hlist_node *pos, *next; + int safety; /* Ensure no-one is preepmted on the garbages */ - if (check_safety() != 0) + mutex_unlock(&kprobe_insn_mutex); + safety = check_safety(); + mutex_lock(&kprobe_insn_mutex); + if (safety != 0) return -EAGAIN; hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) { @@ -251,6 +265,7 @@ void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) struct kprobe_insn_page *kip; struct hlist_node *pos; + mutex_lock(&kprobe_insn_mutex); hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { if (kip->insns <= slot && slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { @@ -267,6 +282,8 @@ void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE) collect_garbage_slots(); + + mutex_unlock(&kprobe_insn_mutex); } #endif -- cgit v1.2.3 From 5d30a683888c60b8f93bef3ddc139d1a91ca58f4 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Tue, 6 Jan 2009 14:56:28 -0800 Subject: x86: introduce asm/swab.h Signed-off-by: Harvey Harrison Signed-off-by: Linus Torvalds --- arch/x86/include/asm/Kbuild | 1 + arch/x86/include/asm/byteorder.h | 62 ++-------------------------------------- arch/x86/include/asm/swab.h | 61 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+), 60 deletions(-) create mode 100644 arch/x86/include/asm/swab.h (limited to 'arch/x86') diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index 4a8e80cdcfa5..a9f8a814a1f7 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild @@ -22,3 +22,4 @@ unifdef-y += unistd_32.h unifdef-y += unistd_64.h unifdef-y += vm86.h unifdef-y += vsyscall.h +unifdef-y += swab.h diff --git a/arch/x86/include/asm/byteorder.h b/arch/x86/include/asm/byteorder.h index f110ad417df3..7c49917e3d9d 100644 --- a/arch/x86/include/asm/byteorder.h +++ b/arch/x86/include/asm/byteorder.h @@ -1,65 +1,7 @@ #ifndef _ASM_X86_BYTEORDER_H #define _ASM_X86_BYTEORDER_H -#include -#include - -#define __LITTLE_ENDIAN - -static inline __attribute_const__ __u32 __arch_swab32(__u32 val) -{ -#ifdef __i386__ -# ifdef CONFIG_X86_BSWAP - asm("bswap %0" : "=r" (val) : "0" (val)); -# else - asm("xchgb %b0,%h0\n\t" /* swap lower bytes */ - "rorl $16,%0\n\t" /* swap words */ - "xchgb %b0,%h0" /* swap higher bytes */ - : "=q" (val) - : "0" (val)); -# endif - -#else /* __i386__ */ - asm("bswapl %0" - : "=r" (val) - : "0" (val)); -#endif - return val; -} -#define __arch_swab32 __arch_swab32 - -static inline __attribute_const__ __u64 __arch_swab64(__u64 val) -{ -#ifdef __i386__ - union { - struct { - __u32 a; - __u32 b; - } s; - __u64 u; - } v; - v.u = val; -# ifdef CONFIG_X86_BSWAP - asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" - : "=r" (v.s.a), "=r" (v.s.b) - : "0" (v.s.a), "1" (v.s.b)); -# else - v.s.a = __arch_swab32(v.s.a); - v.s.b = __arch_swab32(v.s.b); - asm("xchgl %0,%1" - : "=r" (v.s.a), "=r" (v.s.b) - : "0" (v.s.a), "1" (v.s.b)); -# endif - return v.u; -#else /* __i386__ */ - asm("bswapq %0" - : "=r" (val) - : "0" (val)); - return val; -#endif -} -#define __arch_swab64 __arch_swab64 - -#include +#include +#include #endif /* _ASM_X86_BYTEORDER_H */ diff --git a/arch/x86/include/asm/swab.h b/arch/x86/include/asm/swab.h new file mode 100644 index 000000000000..306d4178ffc9 --- /dev/null +++ b/arch/x86/include/asm/swab.h @@ -0,0 +1,61 @@ +#ifndef _ASM_X86_SWAB_H +#define _ASM_X86_SWAB_H + +#include +#include + +static inline __attribute_const__ __u32 __arch_swab32(__u32 val) +{ +#ifdef __i386__ +# ifdef CONFIG_X86_BSWAP + asm("bswap %0" : "=r" (val) : "0" (val)); +# else + asm("xchgb %b0,%h0\n\t" /* swap lower bytes */ + "rorl $16,%0\n\t" /* swap words */ + "xchgb %b0,%h0" /* swap higher bytes */ + : "=q" (val) + : "0" (val)); +# endif + +#else /* __i386__ */ + asm("bswapl %0" + : "=r" (val) + : "0" (val)); +#endif + return val; +} +#define __arch_swab32 __arch_swab32 + +static inline __attribute_const__ __u64 __arch_swab64(__u64 val) +{ +#ifdef __i386__ + union { + struct { + __u32 a; + __u32 b; + } s; + __u64 u; + } v; + v.u = val; +# ifdef CONFIG_X86_BSWAP + asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" + : "=r" (v.s.a), "=r" (v.s.b) + : "0" (v.s.a), "1" (v.s.b)); +# else + v.s.a = __arch_swab32(v.s.a); + v.s.b = __arch_swab32(v.s.b); + asm("xchgl %0,%1" + : "=r" (v.s.a), "=r" (v.s.b) + : "0" (v.s.a), "1" (v.s.b)); +# endif + return v.u; +#else /* __i386__ */ + asm("bswapq %0" + : "=r" (val) + : "0" (val)); + return val; +#endif +} +#define __arch_swab64 __arch_swab64 + +#endif /* _ASM_X86_SWAB_H */ -- cgit v1.2.3 From da4276b8299a6544dc41ac2485d3ffca5811b3fb Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 7 Jan 2009 11:05:10 +0100 Subject: x86: offer frame pointers in all build modes CONFIG_FRAME_POINTERS=y results in much better debug info for the kernel (clear and precise backtraces), with the only drawback being a ~1% increase in kernel size. So offer it unconditionally and enable it by default. Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 + lib/Kconfig.debug | 16 ++++++++-------- 2 files changed, 9 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 862adb9bf0d4..73f7fe8fd4d1 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -27,6 +27,7 @@ config X86 select HAVE_IOREMAP_PROT select HAVE_KPROBES select ARCH_WANT_OPTIONAL_GPIOLIB + select ARCH_WANT_FRAME_POINTERS select HAVE_KRETPROBES select HAVE_FTRACE_MCOUNT_RECORD select HAVE_DYNAMIC_FTRACE diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 2e75478e9c69..2d0f14490174 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -566,14 +566,14 @@ config DEBUG_NOTIFIERS config FRAME_POINTER bool "Compile the kernel with frame pointers" depends on DEBUG_KERNEL && \ - (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390 || \ - AVR32 || SUPERH || BLACKFIN || MN10300) - default y if DEBUG_INFO && UML - help - If you say Y here the resulting kernel image will be slightly larger - and slower, but it might give very useful debugging information on - some architectures or if you use external debuggers. - If you don't debug the kernel, you can say N. + (CRIS || M68K || M68KNOMMU || FRV || UML || S390 || \ + AVR32 || SUPERH || BLACKFIN || MN10300) || \ + ARCH_WANT_FRAME_POINTERS + default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS + help + If you say Y here the resulting kernel image will be slightly + larger and slower, but it gives very useful debugging information + in case of kernel bugs. (precise oopses/stacktraces/warnings) config BOOT_PRINTK_DELAY bool "Delay each boot printk message by N milliseconds" -- cgit v1.2.3 From 51d7a1398d1851e892504e97ca20521610dfcece Mon Sep 17 00:00:00 2001 From: Leonardo Potenza Date: Tue, 6 Jan 2009 23:58:29 +0100 Subject: x86: fix section mismatch warnings in mcheck/mce_amd_64.c Mark the function local_allocate_threshold_blocks() with __cpuinit, in order to remove the following section mismatch messages: WARNING: arch/x86/kernel/cpu/mcheck/built-in.o(.text+0x1363): Section mismatch in reference from the function local_allocate_threshold_blocks() to the function .cpuinit.text:allocate_threshold_blocks() The function local_allocate_threshold_blocks() references the function __cpuinit allocate_threshold_blocks(). This is often because local_allocate_threshold_blocks lacks a __cpuinit annotation or the annotation of allocate_threshold_blocks is wrong. WARNING: arch/x86/kernel/cpu/built-in.o(.text+0x1def): Section mismatch in reference from the function local_allocate_threshold_blocks() to the function .cpuinit.text:allocate_threshold_blocks() The function local_allocate_threshold_blocks() references the function __cpuinit allocate_threshold_blocks(). This is often because local_allocate_threshold_blocks lacks a __cpuinit annotation or the annotation of allocate_threshold_blocks is wrong. WARNING: arch/x86/kernel/built-in.o(.text+0xef2b): Section mismatch in reference from the function local_allocate_threshold_blocks() to the function .cpuinit.text:allocate_threshold_blocks() The function local_allocate_threshold_blocks() references the function __cpuinit allocate_threshold_blocks(). This is often because local_allocate_threshold_blocks lacks a __cpuinit annotation or the annotation of allocate_threshold_blocks is wrong. All the callsites of this function are __cpuinit already, and all the functions it calls are __cpuinit as well. Signed-off-by: Leonardo Potenza Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/mce_amd_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index a5a5e0530370..8ae8c4ff094d 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c @@ -462,7 +462,7 @@ out_free: return err; } -static long local_allocate_threshold_blocks(void *_bank) +static __cpuinit long local_allocate_threshold_blocks(void *_bank) { unsigned int *bank = _bank; -- cgit v1.2.3 From 1a9271331ab663f3c7cda78d86b884f2ea86d4d7 Mon Sep 17 00:00:00 2001 From: Kay Sievers Date: Thu, 30 Oct 2008 02:17:49 +0100 Subject: PCI: struct device - replace bus_id with dev_name(), dev_set_name() This patch is part of a larger patch series which will remove the "char bus_id[20]" name string from struct device. The device name is managed in the kobject anyway, and without any size limitation, and just needlessly copied into "struct device". To set and read the device name dev_name(dev) and dev_set_name(dev) must be used. If your code uses static kobjects, which it shouldn't do, "const char *init_name" can be used to statically provide the name the registered device should have. At registration time, the init_name field is cleared, to enforce the use of dev_name(dev) to access the device name at a later time. We need to get rid of all occurrences of bus_id in the entire tree to be able to enable the new interface. Please apply this patch, and possibly convert any remaining remaining occurrences of bus_id. Acked-by: Greg Kroah-Hartman Signed-Off-By: Kay Sievers Signed-off-by: Jesse Barnes --- arch/x86/kernel/pci-dma.c | 2 +- drivers/pci/hotplug/acpiphp_ibm.c | 2 +- drivers/pci/irq.c | 2 +- drivers/pci/pci-acpi.c | 2 +- drivers/pci/pcie/portdrv_core.c | 2 +- drivers/pci/probe.c | 6 +++--- 6 files changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 19a1044a0cd9..b25428533141 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -38,7 +38,7 @@ EXPORT_SYMBOL(bad_dma_address); be probably a smaller DMA mask, but this is bug-to-bug compatible to older i386. */ struct device x86_dma_fallback_dev = { - .bus_id = "fallback device", + .init_name = "fallback device", .coherent_dma_mask = DMA_32BIT_MASK, .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask, }; diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c index 881fdd2b7313..5befa7e379b7 100644 --- a/drivers/pci/hotplug/acpiphp_ibm.c +++ b/drivers/pci/hotplug/acpiphp_ibm.c @@ -271,7 +271,7 @@ static void ibm_handle_events(acpi_handle handle, u32 event, void *context) dbg("%s: generationg bus event\n", __func__); acpi_bus_generate_proc_event(note->device, note->event, detail); acpi_bus_generate_netlink_event(note->device->pnp.device_class, - note->device->dev.bus_id, + dev_name(¬e->device->dev), note->event, detail); } else note->event = event; diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c index 6441dfa969a3..de01174aff06 100644 --- a/drivers/pci/irq.c +++ b/drivers/pci/irq.c @@ -15,7 +15,7 @@ static void pci_note_irq_problem(struct pci_dev *pdev, const char *reason) dev_printk(KERN_ERR, &pdev->dev, "Potentially misrouted IRQ (Bridge %s %04x:%04x)\n", - parent->dev.bus_id, parent->vendor, parent->device); + dev_name(&parent->dev), parent->vendor, parent->device); dev_printk(KERN_ERR, &pdev->dev, "%s\n", reason); dev_printk(KERN_ERR, &pdev->dev, "Please report to linux-kernel@vger.kernel.org\n"); WARN_ON(1); diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index ae5ec76dca77..2ed3f10d0860 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -375,7 +375,7 @@ static int acpi_pci_find_root_bridge(struct device *dev, acpi_handle *handle) * The string should be the same as root bridge's name * Please look at 'pci_scan_bus_parented' */ - num = sscanf(dev->bus_id, "pci%04x:%02x", &seg, &bus); + num = sscanf(dev_name(dev), "pci%04x:%02x", &seg, &bus); if (num != 2) return -ENODEV; *handle = acpi_get_pci_rootbridge_handle(seg, bus); diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 2e091e014829..75f501ab6468 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -224,7 +224,7 @@ static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev, device->driver = NULL; device->driver_data = NULL; device->release = release_pcie_device; /* callback to free pcie dev */ - snprintf(device->bus_id, sizeof(device->bus_id), "%s:pcie%02x", + dev_set_name(device, "%s:pcie%02x", pci_name(parent), get_descriptor_id(port_type, service_type)); device->parent = &parent->dev; } diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 5b3f5937ecf5..eb2b985beb48 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -412,7 +412,7 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, * registered later in pci_bus_add_devices() */ child->dev.class = &pcibus_class; - sprintf(child->dev.bus_id, "%04x:%02x", pci_domain_nr(child), busnr); + dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr); /* * Set up the primary, secondary and subordinate @@ -1130,7 +1130,7 @@ struct pci_bus * pci_create_bus(struct device *parent, memset(dev, 0, sizeof(*dev)); dev->parent = parent; dev->release = pci_release_bus_bridge_dev; - sprintf(dev->bus_id, "pci%04x:%02x", pci_domain_nr(b), bus); + dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus); error = device_register(dev); if (error) goto dev_reg_err; @@ -1141,7 +1141,7 @@ struct pci_bus * pci_create_bus(struct device *parent, b->dev.class = &pcibus_class; b->dev.parent = b->bridge; - sprintf(b->dev.bus_id, "%04x:%02x", pci_domain_nr(b), bus); + dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus); error = device_register(&b->dev); if (error) goto class_dev_reg_err; -- cgit v1.2.3 From 0ef5f8f6159e44b4faa997be08d1a3bcbf44ad08 Mon Sep 17 00:00:00 2001 From: Andrew Patterson Date: Mon, 10 Nov 2008 15:30:50 -0700 Subject: ACPI/PCI: PCI extended config _OSC support called when root bridge added The _OSC capability OSC_EXT_PCI_CONFIG_SUPPORT is set when the root bridge is added with pci_acpi_osc_support() if we can access PCI extended config space. This adds the function pci_ext_cfg_avail which returns true if we can access PCI extended config space (offset greater than 0xff). It currently only returns false if arch=x86 and raw_pci_ext_ops is not set (which might happen if pci=nommcfg is set on the kernel command-line). Signed-off-by: Andrew Patterson Signed-off-by: Jesse Barnes --- arch/x86/pci/common.c | 8 ++++++++ drivers/acpi/pci_root.c | 10 ++++++++-- drivers/pci/pci.c | 13 +++++++++++++ include/linux/pci.h | 2 ++ 4 files changed, 31 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 62ddb73e09ed..9ab8509f7b15 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -562,6 +562,14 @@ void pcibios_disable_device (struct pci_dev *dev) pcibios_disable_irq(dev); } +int pci_ext_cfg_avail(struct pci_dev *dev) +{ + if (raw_pci_ext_ops) + return 1; + else + return 0; +} + struct pci_bus * __devinit pci_scan_bus_on_node(int busno, struct pci_ops *ops, int node) { struct pci_bus *bus = NULL; diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index de4d57114fe4..96e68e841539 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -194,7 +194,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) unsigned long long value = 0; acpi_handle handle = NULL; struct acpi_device *child; - u32 flags; + u32 flags, base_flags; if (!device) @@ -216,7 +216,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) * All supported architectures that use ACPI have support for * PCI domains, so we indicate this in _OSC support capabilities. */ - flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; + flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; pci_acpi_osc_support(device->handle, flags); /* @@ -344,6 +344,12 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) list_for_each_entry(child, &device->children, node) acpi_pci_bridge_scan(child); + /* Indicate support for various _OSC capabilities. */ + if (pci_ext_cfg_avail(root->bus->self)) + flags |= OSC_EXT_PCI_CONFIG_SUPPORT; + if (flags != base_flags) + pci_acpi_osc_support(device->handle, flags); + end: if (result) { if (!list_empty(&root->node)) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 3c2fa2fdc9cd..48fa860276d4 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -2084,6 +2084,19 @@ static void __devinit pci_no_domains(void) #endif } +/** + * pci_ext_cfg_enabled - can we access extended PCI config space? + * @dev: The PCI device of the root bridge. + * + * Returns 1 if we can access PCI extended config space (offsets + * greater than 0xff). This is the default implementation. Architecture + * implementations can override this. + */ +int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev) +{ + return 1; +} + static int __devinit pci_init(void) { struct pci_dev *dev = NULL; diff --git a/include/linux/pci.h b/include/linux/pci.h index 4bb156ba854a..6fd47654ca4e 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1140,6 +1140,8 @@ static inline void pci_mmcfg_early_init(void) { } static inline void pci_mmcfg_late_init(void) { } #endif +int pci_ext_cfg_avail(struct pci_dev *dev); + #ifdef CONFIG_HAS_IOMEM static inline void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) { -- cgit v1.2.3 From e8de1481fd7126ee9e93d6889da6f00c05e1e019 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Wed, 22 Oct 2008 19:55:31 -0700 Subject: resource: allow MMIO exclusivity for device drivers Device drivers that use pci_request_regions() (and similar APIs) have a reasonable expectation that they are the only ones accessing their device. As part of the e1000e hunt, we were afraid that some userland (X or some bootsplash stuff) was mapping the MMIO region that the driver thought it had exclusively via /dev/mem or via various sysfs resource mappings. This patch adds the option for device drivers to cause their reserved regions to the "banned from /dev/mem use" list, so now both kernel memory and device-exclusive MMIO regions are banned. NOTE: This is only active when CONFIG_STRICT_DEVMEM is set. In addition to the config option, a kernel parameter iomem=relaxed is provided for the cases where developers want to diagnose, in the field, drivers issues from userspace. Reviewed-by: Matthew Wilcox Signed-off-by: Arjan van de Ven Signed-off-by: Jesse Barnes --- Documentation/kernel-parameters.txt | 4 ++ arch/x86/mm/init_32.c | 2 + arch/x86/mm/init_64.c | 2 + drivers/net/e1000e/netdev.c | 2 +- drivers/pci/pci-sysfs.c | 3 + drivers/pci/pci.c | 107 ++++++++++++++++++++++++++++++++---- include/linux/ioport.h | 11 +++- include/linux/pci.h | 3 + kernel/resource.c | 61 +++++++++++++++++++- 9 files changed, 176 insertions(+), 19 deletions(-) (limited to 'arch/x86') diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 0b3f6711d2f1..0072fabb1dd1 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -918,6 +918,10 @@ and is between 256 and 4096 characters. It is defined in the file inttest= [IA64] + iomem= Disable strict checking of access to MMIO memory + strict regions from userspace. + relaxed + iommu= [x86] off force diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 544d724caeee..88f1b10de3be 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -328,6 +328,8 @@ int devmem_is_allowed(unsigned long pagenr) { if (pagenr <= 256) return 1; + if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) + return 0; if (!page_is_ram(pagenr)) return 1; return 0; diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 54c437e96541..23f68e77ad1f 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -888,6 +888,8 @@ int devmem_is_allowed(unsigned long pagenr) { if (pagenr <= 256) return 1; + if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) + return 0; if (!page_is_ram(pagenr)) return 1; return 0; diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index d4639facd1bd..91817d0afcaf 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -4807,7 +4807,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, } } - err = pci_request_selected_regions(pdev, + err = pci_request_selected_regions_exclusive(pdev, pci_select_bars(pdev, IORESOURCE_MEM), e1000e_driver_name); if (err) diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 388440e0d222..d5cdccf27a69 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -620,6 +620,9 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, vma->vm_pgoff += start >> PAGE_SHIFT; mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; + if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(start)) + return -EINVAL; + return pci_mmap_page_range(pdev, vma, mmap_type, write_combine); } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 2cfa41e367a7..47663dc0daf7 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1395,7 +1395,8 @@ void pci_release_region(struct pci_dev *pdev, int bar) * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. */ -int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) +static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name, + int exclusive) { struct pci_devres *dr; @@ -1408,8 +1409,9 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) goto err_out; } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { - if (!request_mem_region(pci_resource_start(pdev, bar), - pci_resource_len(pdev, bar), res_name)) + if (!__request_mem_region(pci_resource_start(pdev, bar), + pci_resource_len(pdev, bar), res_name, + exclusive)) goto err_out; } @@ -1427,6 +1429,47 @@ err_out: return -EBUSY; } +/** + * pci_request_region - Reserved PCI I/O and memory resource + * @pdev: PCI device whose resources are to be reserved + * @bar: BAR to be reserved + * @res_name: Name to be associated with resource. + * + * Mark the PCI region associated with PCI device @pdev BR @bar as + * being reserved by owner @res_name. Do not access any + * address inside the PCI regions unless this call returns + * successfully. + * + * Returns 0 on success, or %EBUSY on error. A warning + * message is also printed on failure. + */ +int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) +{ + return __pci_request_region(pdev, bar, res_name, 0); +} + +/** + * pci_request_region_exclusive - Reserved PCI I/O and memory resource + * @pdev: PCI device whose resources are to be reserved + * @bar: BAR to be reserved + * @res_name: Name to be associated with resource. + * + * Mark the PCI region associated with PCI device @pdev BR @bar as + * being reserved by owner @res_name. Do not access any + * address inside the PCI regions unless this call returns + * successfully. + * + * Returns 0 on success, or %EBUSY on error. A warning + * message is also printed on failure. + * + * The key difference that _exclusive makes it that userspace is + * explicitly not allowed to map the resource via /dev/mem or + * sysfs. + */ +int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name) +{ + return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE); +} /** * pci_release_selected_regions - Release selected PCI I/O and memory resources * @pdev: PCI device whose resources were previously reserved @@ -1444,20 +1487,14 @@ void pci_release_selected_regions(struct pci_dev *pdev, int bars) pci_release_region(pdev, i); } -/** - * pci_request_selected_regions - Reserve selected PCI I/O and memory resources - * @pdev: PCI device whose resources are to be reserved - * @bars: Bitmask of BARs to be requested - * @res_name: Name to be associated with resource - */ -int pci_request_selected_regions(struct pci_dev *pdev, int bars, - const char *res_name) +int __pci_request_selected_regions(struct pci_dev *pdev, int bars, + const char *res_name, int excl) { int i; for (i = 0; i < 6; i++) if (bars & (1 << i)) - if(pci_request_region(pdev, i, res_name)) + if (__pci_request_region(pdev, i, res_name, excl)) goto err_out; return 0; @@ -1469,6 +1506,26 @@ err_out: return -EBUSY; } + +/** + * pci_request_selected_regions - Reserve selected PCI I/O and memory resources + * @pdev: PCI device whose resources are to be reserved + * @bars: Bitmask of BARs to be requested + * @res_name: Name to be associated with resource + */ +int pci_request_selected_regions(struct pci_dev *pdev, int bars, + const char *res_name) +{ + return __pci_request_selected_regions(pdev, bars, res_name, 0); +} + +int pci_request_selected_regions_exclusive(struct pci_dev *pdev, + int bars, const char *res_name) +{ + return __pci_request_selected_regions(pdev, bars, res_name, + IORESOURCE_EXCLUSIVE); +} + /** * pci_release_regions - Release reserved PCI I/O and memory resources * @pdev: PCI device whose resources were previously reserved by pci_request_regions @@ -1501,6 +1558,29 @@ int pci_request_regions(struct pci_dev *pdev, const char *res_name) return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name); } +/** + * pci_request_regions_exclusive - Reserved PCI I/O and memory resources + * @pdev: PCI device whose resources are to be reserved + * @res_name: Name to be associated with resource. + * + * Mark all PCI regions associated with PCI device @pdev as + * being reserved by owner @res_name. Do not access any + * address inside the PCI regions unless this call returns + * successfully. + * + * pci_request_regions_exclusive() will mark the region so that + * /dev/mem and the sysfs MMIO access will not be allowed. + * + * Returns 0 on success, or %EBUSY on error. A warning + * message is also printed on failure. + */ +int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) +{ + return pci_request_selected_regions_exclusive(pdev, + ((1 << 6) - 1), res_name); +} + + /** * pci_set_master - enables bus-mastering for device dev * @dev: the PCI device to enable @@ -2149,10 +2229,13 @@ EXPORT_SYMBOL(pci_find_capability); EXPORT_SYMBOL(pci_bus_find_capability); EXPORT_SYMBOL(pci_release_regions); EXPORT_SYMBOL(pci_request_regions); +EXPORT_SYMBOL(pci_request_regions_exclusive); EXPORT_SYMBOL(pci_release_region); EXPORT_SYMBOL(pci_request_region); +EXPORT_SYMBOL(pci_request_region_exclusive); EXPORT_SYMBOL(pci_release_selected_regions); EXPORT_SYMBOL(pci_request_selected_regions); +EXPORT_SYMBOL(pci_request_selected_regions_exclusive); EXPORT_SYMBOL(pci_set_master); EXPORT_SYMBOL(pci_set_mwi); EXPORT_SYMBOL(pci_try_set_mwi); diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 041e95aac2bf..f6bb2ca8e3ba 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -49,6 +49,7 @@ struct resource_list { #define IORESOURCE_SIZEALIGN 0x00020000 /* size indicates alignment */ #define IORESOURCE_STARTALIGN 0x00040000 /* start field is alignment */ +#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */ #define IORESOURCE_DISABLED 0x10000000 #define IORESOURCE_UNSET 0x20000000 #define IORESOURCE_AUTO 0x40000000 @@ -133,13 +134,16 @@ static inline unsigned long resource_type(struct resource *res) } /* Convenience shorthand with allocation */ -#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name)) -#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name)) +#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0) +#define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl) +#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0) +#define request_mem_region_exclusive(start,n,name) \ + __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE) #define rename_region(region, newname) do { (region)->name = (newname); } while (0) extern struct resource * __request_region(struct resource *, resource_size_t start, - resource_size_t n, const char *name); + resource_size_t n, const char *name, int relaxed); /* Compatibility cruft */ #define release_region(start,n) __release_region(&ioport_resource, (start), (n)) @@ -175,6 +179,7 @@ extern struct resource * __devm_request_region(struct device *dev, extern void __devm_release_region(struct device *dev, struct resource *parent, resource_size_t start, resource_size_t n); extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); +extern int iomem_is_exclusive(u64 addr); #endif /* __ASSEMBLY__ */ #endif /* _LINUX_IOPORT_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 59a3dc2059d3..bfcb39ca8879 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -686,10 +686,13 @@ void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *), int (*)(struct pci_dev *, u8, u8)); #define HAVE_PCI_REQ_REGIONS 2 int __must_check pci_request_regions(struct pci_dev *, const char *); +int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *); void pci_release_regions(struct pci_dev *); int __must_check pci_request_region(struct pci_dev *, int, const char *); +int __must_check pci_request_region_exclusive(struct pci_dev *, int, const char *); void pci_release_region(struct pci_dev *, int); int pci_request_selected_regions(struct pci_dev *, int, const char *); +int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *); void pci_release_selected_regions(struct pci_dev *, int); /* drivers/pci/bus.c */ diff --git a/kernel/resource.c b/kernel/resource.c index e633106b12f6..ca6a1536b205 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -623,7 +623,7 @@ resource_size_t resource_alignment(struct resource *res) */ struct resource * __request_region(struct resource *parent, resource_size_t start, resource_size_t n, - const char *name) + const char *name, int flags) { struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); @@ -634,6 +634,7 @@ struct resource * __request_region(struct resource *parent, res->start = start; res->end = start + n - 1; res->flags = IORESOURCE_BUSY; + res->flags |= flags; write_lock(&resource_lock); @@ -679,7 +680,7 @@ int __check_region(struct resource *parent, resource_size_t start, { struct resource * res; - res = __request_region(parent, start, n, "check-region"); + res = __request_region(parent, start, n, "check-region", 0); if (!res) return -EBUSY; @@ -776,7 +777,7 @@ struct resource * __devm_request_region(struct device *dev, dr->start = start; dr->n = n; - res = __request_region(parent, start, n, name); + res = __request_region(parent, start, n, name, 0); if (res) devres_add(dev, dr); else @@ -876,3 +877,57 @@ int iomem_map_sanity_check(resource_size_t addr, unsigned long size) return err; } + +#ifdef CONFIG_STRICT_DEVMEM +static int strict_iomem_checks = 1; +#else +static int strict_iomem_checks; +#endif + +/* + * check if an address is reserved in the iomem resource tree + * returns 1 if reserved, 0 if not reserved. + */ +int iomem_is_exclusive(u64 addr) +{ + struct resource *p = &iomem_resource; + int err = 0; + loff_t l; + int size = PAGE_SIZE; + + if (!strict_iomem_checks) + return 0; + + addr = addr & PAGE_MASK; + + read_lock(&resource_lock); + for (p = p->child; p ; p = r_next(NULL, p, &l)) { + /* + * We can probably skip the resources without + * IORESOURCE_IO attribute? + */ + if (p->start >= addr + size) + break; + if (p->end < addr) + continue; + if (p->flags & IORESOURCE_BUSY && + p->flags & IORESOURCE_EXCLUSIVE) { + err = 1; + break; + } + } + read_unlock(&resource_lock); + + return err; +} + +static int __init strict_iomem(char *str) +{ + if (strstr(str, "relaxed")) + strict_iomem_checks = 0; + if (strstr(str, "strict")) + strict_iomem_checks = 1; + return 1; +} + +__setup("iomem=", strict_iomem); -- cgit v1.2.3 From 104bafcfab7ce3031399e60069949f10acecc022 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 12 Dec 2008 06:49:40 +0100 Subject: PCI: Don't carp about BAR allocation failures in quiet boot These are easy to trigger (more or less harmlessly) with multiple video cards, since the ROM BAR will typically not be given any space by the BIOS bridge setup. No reason to punish quiet boot for this. Signed-off-by: Adam Jackson Signed-off-by: Jesse Barnes --- arch/x86/pci/i386.c | 4 ++-- drivers/pci/setup-res.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index e51bf2cda4b0..f884740da318 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c @@ -129,7 +129,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list) pr = pci_find_parent_resource(dev, r); if (!r->start || !pr || request_resource(pr, r) < 0) { - dev_err(&dev->dev, "BAR %d: can't allocate resource\n", idx); + dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx); /* * Something is wrong with the region. * Invalidate the resource to prevent @@ -170,7 +170,7 @@ static void __init pcibios_allocate_resources(int pass) r->flags, disabled, pass); pr = pci_find_parent_resource(dev, r); if (!pr || request_resource(pr, r) < 0) { - dev_err(&dev->dev, "BAR %d: can't allocate resource\n", idx); + dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx); /* We'll assign a new address later */ r->end -= r->start; r->start = 0; diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 2dbd96cce2d8..4e375632499a 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -134,7 +134,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno) align = resource_alignment(res); if (!align) { - dev_err(&dev->dev, "BAR %d: can't allocate resource (bogus " + dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus " "alignment) %pR flags %#lx\n", resno, res, res->flags); return -EINVAL; @@ -157,7 +157,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno) } if (ret) { - dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n", + dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n", resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res); } else { res->flags &= ~IORESOURCE_STARTALIGN; -- cgit v1.2.3 From 0663a36284586ac9a9781be8aa7e8ca9fff16d06 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Wed, 10 Dec 2008 13:12:00 -0700 Subject: x86/PCI: make PCI bus locality messages more meaningful Change PCI bus locality messages so they have a bit more context and look like the rest of PCI, e.g., - bus 01 -> node 0 - bus 04 -> node 0 + pci 0000:01: bus on NUMA node 0 + pci 0000:04: bus on NUMA node 0 Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- arch/x86/pci/acpi.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 9e5752fe4d15..6c7683709c44 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -210,11 +210,12 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do if (bus && node != -1) { #ifdef CONFIG_ACPI_NUMA if (pxm >= 0) - printk(KERN_DEBUG "bus %02x -> pxm %d -> node %d\n", - busnum, pxm, node); + printk(KERN_DEBUG + "pci %04x:%02x: bus on NUMA node %d (pxm %d)\n", + domain, busnum, node, pxm); #else - printk(KERN_DEBUG "bus %02x -> node %d\n", - busnum, node); + printk(KERN_DEBUG "pci %04x:%02x: bus on NUMA node %d\n", + domain, busnum, node); #endif } -- cgit v1.2.3 From 23a36002742bca87510201770a7d931c4aa63e97 Mon Sep 17 00:00:00 2001 From: Jacob Pan Date: Mon, 8 Dec 2008 09:44:16 -0800 Subject: PCI: avoid early PCI mmconfig init if pci=noearly is given in cmdline Early type 1 accesses can cause problems on some platforms, and pci=noearly is supposed to prevent them from occurring. However, early mcfg probing code uses type 1 and isn't protected by a check for noearly. This patch fixes that problem. Signed-off-by: Jacob Pan Signed-off-by: Jesse Barnes --- arch/x86/pci/init.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/init.c b/arch/x86/pci/init.c index bec3b048e72b..25a1f8efed4a 100644 --- a/arch/x86/pci/init.c +++ b/arch/x86/pci/init.c @@ -12,7 +12,8 @@ static __init int pci_arch_init(void) type = pci_direct_probe(); #endif - pci_mmcfg_early_init(); + if (!(pci_probe & PCI_PROBE_NOEARLY)) + pci_mmcfg_early_init(); #ifdef CONFIG_PCI_OLPC if (!pci_olpc_init()) -- cgit v1.2.3 From 878f2e50fd1cfea575cdca5bf019c2175dc64131 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 9 Dec 2008 16:11:46 -0700 Subject: PCI: use config space encoding in pci_get_interrupt_pin() This patch makes pci_get_interrupt_pin() return values encoded the same way as the "Interrupt Pin" value in PCI config space, i.e., 1=INTA, ..., 4=INTD. pirq_bios_set() is the only in-tree caller of pci_get_interrupt_pin() and pci_get_interrupt_pin() is not exported. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: hpa@zytor.com Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- arch/x86/pci/irq.c | 2 +- drivers/pci/pci.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 373b9afe6d44..399a172f047d 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -533,7 +533,7 @@ static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq, { struct pci_dev *bridge; int pin = pci_get_interrupt_pin(dev, &bridge); - return pcibios_set_irq_routing(bridge, pin, irq); + return pcibios_set_irq_routing(bridge, pin - 1, irq); } #endif diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 3222f9022707..bd52ca4c2893 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1374,9 +1374,9 @@ pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) pin = dev->pin; if (!pin) return -1; - pin--; + while (dev->bus->self) { - pin = (pin + PCI_SLOT(dev->devfn)) % 4; + pin = (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1; dev = dev->bus->self; } *bridge = dev; -- cgit v1.2.3 From f672c392b9c61bcdfb1247561d404b2c3ed4b0b3 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 9 Dec 2008 16:11:51 -0700 Subject: x86/PCI: use config space encoding for interrupt pins Keep "pin" encoded as it is in the "Interrupt Pin" value in PCI config space, i.e., 0=device doesn't use interrupts, 1=INTA, ..., 4=INTD. This makes the bridge INTx swizzle match other architectures. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: hpa@zytor.com Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- arch/x86/pci/irq.c | 44 ++++++++++++++++++++------------------------ 1 file changed, 20 insertions(+), 24 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 399a172f047d..cc9d5e254060 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -887,7 +887,6 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) dev_dbg(&dev->dev, "no interrupt pin\n"); return 0; } - pin = pin - 1; /* Find IRQ routing entry */ @@ -897,17 +896,17 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) info = pirq_get_info(dev); if (!info) { dev_dbg(&dev->dev, "PCI INT %c not found in routing table\n", - 'A' + pin); + 'A' + pin - 1); return 0; } - pirq = info->irq[pin].link; - mask = info->irq[pin].bitmap; + pirq = info->irq[pin - 1].link; + mask = info->irq[pin - 1].bitmap; if (!pirq) { - dev_dbg(&dev->dev, "PCI INT %c not routed\n", 'A' + pin); + dev_dbg(&dev->dev, "PCI INT %c not routed\n", 'A' + pin - 1); return 0; } dev_dbg(&dev->dev, "PCI INT %c -> PIRQ %02x, mask %04x, excl %04x", - 'A' + pin, pirq, mask, pirq_table->exclusive_irqs); + 'A' + pin - 1, pirq, mask, pirq_table->exclusive_irqs); mask &= pcibios_irq_mask; /* Work around broken HP Pavilion Notebooks which assign USB to @@ -949,7 +948,7 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) newirq = i; } } - dev_dbg(&dev->dev, "PCI INT %c -> newirq %d", 'A' + pin, newirq); + dev_dbg(&dev->dev, "PCI INT %c -> newirq %d", 'A' + pin - 1, newirq); /* Check if it is hardcoded */ if ((pirq & 0xf0) == 0xf0) { @@ -977,18 +976,18 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign) return 0; } } - dev_info(&dev->dev, "%s PCI INT %c -> IRQ %d\n", msg, 'A' + pin, irq); + dev_info(&dev->dev, "%s PCI INT %c -> IRQ %d\n", msg, 'A' + pin - 1, irq); /* Update IRQ for all devices with the same pirq value */ while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) { pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin); if (!pin) continue; - pin--; + info = pirq_get_info(dev2); if (!info) continue; - if (info->irq[pin].link == pirq) { + if (info->irq[pin - 1].link == pirq) { /* * We refuse to override the dev->irq * information. Give a warning! @@ -1055,9 +1054,8 @@ static void __init pcibios_fixup_irqs(void) /* * interrupt pins are numbered starting from 1 */ - pin--; irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, - PCI_SLOT(dev->devfn), pin); + PCI_SLOT(dev->devfn), pin - 1); /* * Busses behind bridges are typically not listed in the * MP-table. In this case we have to look up the IRQ @@ -1070,22 +1068,22 @@ static void __init pcibios_fixup_irqs(void) struct pci_dev *bridge = dev->bus->self; int bus; - pin = (pin + PCI_SLOT(dev->devfn)) % 4; + pin = (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1; bus = bridge->bus->number; irq = IO_APIC_get_PCI_irq_vector(bus, - PCI_SLOT(bridge->devfn), pin); + PCI_SLOT(bridge->devfn), pin - 1); if (irq >= 0) dev_warn(&dev->dev, "using bridge %s INT %c to " "get IRQ %d\n", pci_name(bridge), - 'A' + pin, irq); + 'A' + pin - 1, irq); } if (irq >= 0) { dev_info(&dev->dev, "PCI->APIC IRQ transform: INT %c " "-> IRQ %d\n", - 'A' + pin, irq); + 'A' + pin - 1, irq); dev->irq = irq; } } @@ -1220,12 +1218,10 @@ static int pirq_enable_irq(struct pci_dev *dev) if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) { char *msg = ""; - pin--; /* interrupt pins are numbered starting from 1 */ - if (io_apic_assign_pci_irqs) { int irq; - irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin); + irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin - 1); /* * Busses behind bridges are typically not listed in the MP-table. * In this case we have to look up the IRQ based on the parent bus, @@ -1236,20 +1232,20 @@ static int pirq_enable_irq(struct pci_dev *dev) while (irq < 0 && dev->bus->parent) { /* go back to the bridge */ struct pci_dev *bridge = dev->bus->self; - pin = (pin + PCI_SLOT(dev->devfn)) % 4; + pin = (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1; irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, - PCI_SLOT(bridge->devfn), pin); + PCI_SLOT(bridge->devfn), pin - 1); if (irq >= 0) dev_warn(&dev->dev, "using bridge %s " "INT %c to get IRQ %d\n", - pci_name(bridge), 'A' + pin, + pci_name(bridge), 'A' + pin - 1, irq); dev = bridge; } dev = temp_dev; if (irq >= 0) { dev_info(&dev->dev, "PCI->APIC IRQ transform: " - "INT %c -> IRQ %d\n", 'A' + pin, irq); + "INT %c -> IRQ %d\n", 'A' + pin - 1, irq); dev->irq = irq; return 0; } else @@ -1268,7 +1264,7 @@ static int pirq_enable_irq(struct pci_dev *dev) return 0; dev_warn(&dev->dev, "can't find IRQ for PCI INT %c%s\n", - 'A' + pin, msg); + 'A' + pin - 1, msg); } return 0; } -- cgit v1.2.3 From 12b955ff63db0b75cfc2d4939696c57b31891ec6 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 9 Dec 2008 16:11:57 -0700 Subject: x86/PCI: minor logic simplications Test "pin" immediately to simplify the subsequent code. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: hpa@zytor.com Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- arch/x86/pci/irq.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index cc9d5e254060..e1d605bfeb47 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -1041,6 +1041,9 @@ static void __init pcibios_fixup_irqs(void) dev = NULL; while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); + if (!pin) + continue; + #ifdef CONFIG_X86_IO_APIC /* * Recalculate IRQ numbers if we use the I/O APIC. @@ -1048,9 +1051,6 @@ static void __init pcibios_fixup_irqs(void) if (io_apic_assign_pci_irqs) { int irq; - if (!pin) - continue; - /* * interrupt pins are numbered starting from 1 */ @@ -1091,7 +1091,7 @@ static void __init pcibios_fixup_irqs(void) /* * Still no IRQ? Try to lookup one... */ - if (pin && !dev->irq) + if (!dev->irq) pcibios_lookup_irq(dev, 0); } } -- cgit v1.2.3 From b1c86792a0f3cf24a12c1ac7d452d665d90284b1 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 9 Dec 2008 16:12:37 -0700 Subject: PCI: x86: use generic pci_swizzle_interrupt_pin() Use the generic pci_swizzle_interrupt_pin() instead of arch-specific code. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: hpa@zytor.com Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- arch/x86/pci/irq.c | 4 ++-- arch/x86/pci/visws.c | 7 +------ 2 files changed, 3 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index e1d605bfeb47..4064345cf144 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -1068,7 +1068,7 @@ static void __init pcibios_fixup_irqs(void) struct pci_dev *bridge = dev->bus->self; int bus; - pin = (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1; + pin = pci_swizzle_interrupt_pin(dev, pin); bus = bridge->bus->number; irq = IO_APIC_get_PCI_irq_vector(bus, PCI_SLOT(bridge->devfn), pin - 1); @@ -1232,7 +1232,7 @@ static int pirq_enable_irq(struct pci_dev *dev) while (irq < 0 && dev->bus->parent) { /* go back to the bridge */ struct pci_dev *bridge = dev->bus->self; - pin = (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1; + pin = pci_swizzle_interrupt_pin(dev, pin); irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, PCI_SLOT(bridge->devfn), pin - 1); if (irq >= 0) diff --git a/arch/x86/pci/visws.c b/arch/x86/pci/visws.c index 16d0c0eb0d19..2c54e7e03f53 100644 --- a/arch/x86/pci/visws.c +++ b/arch/x86/pci/visws.c @@ -24,17 +24,12 @@ static void pci_visws_disable_irq(struct pci_dev *dev) { } unsigned int pci_bus0, pci_bus1; -static inline u8 bridge_swizzle(u8 pin, u8 slot) -{ - return (((pin - 1) + slot) % 4) + 1; -} - static u8 __init visws_swizzle(struct pci_dev *dev, u8 *pinp) { u8 pin = *pinp; while (dev->bus->self) { /* Move up the chain of bridges. */ - pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); + pin = pci_swizzle_interrupt_pin(dev, pin); dev = dev->bus->self; } *pinp = pin; -- cgit v1.2.3 From 904d6a303361a85bfa4c8181ef62a24edb8da0a8 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 16 Dec 2008 21:37:20 -0700 Subject: PCI: x86/visws: use generic INTx swizzle from PCI core Use the generic pci_common_swizzle() instead of arch-specific code. Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- arch/x86/pci/visws.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/visws.c b/arch/x86/pci/visws.c index 2c54e7e03f53..bcead7a46871 100644 --- a/arch/x86/pci/visws.c +++ b/arch/x86/pci/visws.c @@ -24,19 +24,6 @@ static void pci_visws_disable_irq(struct pci_dev *dev) { } unsigned int pci_bus0, pci_bus1; -static u8 __init visws_swizzle(struct pci_dev *dev, u8 *pinp) -{ - u8 pin = *pinp; - - while (dev->bus->self) { /* Move up the chain of bridges. */ - pin = pci_swizzle_interrupt_pin(dev, pin); - dev = dev->bus->self; - } - *pinp = pin; - - return PCI_SLOT(dev->devfn); -} - static int __init visws_map_irq(struct pci_dev *dev, u8 slot, u8 pin) { int irq, bus = dev->bus->number; @@ -101,7 +88,7 @@ int __init pci_visws_init(void) raw_pci_ops = &pci_direct_conf1; pci_scan_bus_with_sysdata(pci_bus0); pci_scan_bus_with_sysdata(pci_bus1); - pci_fixup_irqs(visws_swizzle, visws_map_irq); + pci_fixup_irqs(pci_common_swizzle, visws_map_irq); pcibios_resource_survey(); return 0; } -- cgit v1.2.3 From 2b8c2efe44ed897fc958131d70addc89876d806b Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Thu, 18 Dec 2008 16:34:51 -0700 Subject: x86/PCI: use dev_printk for PCI bus locality messages Since pci_bus has a struct device, use dev_printk directly instead of faking it by hand. Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- arch/x86/pci/acpi.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 6c7683709c44..c0ecf250fe51 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -210,12 +210,10 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do if (bus && node != -1) { #ifdef CONFIG_ACPI_NUMA if (pxm >= 0) - printk(KERN_DEBUG - "pci %04x:%02x: bus on NUMA node %d (pxm %d)\n", - domain, busnum, node, pxm); + dev_printk(KERN_DEBUG, &bus->dev, + "on NUMA node %d (pxm %d)\n", node, pxm); #else - printk(KERN_DEBUG "pci %04x:%02x: bus on NUMA node %d\n", - domain, busnum, node); + dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node); #endif } -- cgit v1.2.3 From 16cf0ebc35dd63f72628ba1246132a6fd17bced2 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 5 Jan 2009 14:50:27 +0100 Subject: x86/PCI: Do not use interrupt links for devices using MSI-X pcibios_enable_device() and pcibios_disable_device() don't handle IRQs for devices that have MSI enabled and it should treat the devices with MSI-X enabled in the same way. Signed-off-by: Rafael J. Wysocki Acked-by: Ingo Molnar Signed-off-by: Jesse Barnes --- arch/x86/pci/common.c | 4 ++-- include/linux/pci.h | 9 +++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 9ab8509f7b15..82d22fc601ae 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -551,14 +551,14 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) if ((err = pci_enable_resources(dev, mask)) < 0) return err; - if (!dev->msi_enabled) + if (!pci_dev_msi_enabled(dev)) return pcibios_enable_irq(dev); return 0; } void pcibios_disable_device (struct pci_dev *dev) { - if (!dev->msi_enabled && pcibios_disable_irq) + if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq) pcibios_disable_irq(dev); } diff --git a/include/linux/pci.h b/include/linux/pci.h index 0f6d2bb1df9c..80f8b8b65fde 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -336,6 +336,15 @@ struct pci_bus { #define pci_bus_b(n) list_entry(n, struct pci_bus, node) #define to_pci_bus(n) container_of(n, struct pci_bus, dev) +#ifdef CONFIG_PCI_MSI +static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) +{ + return pci_dev->msi_enabled || pci_dev->msix_enabled; +} +#else +static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; } +#endif + /* * Error values that may be returned by PCI functions. */ -- cgit v1.2.3 From fc81be8ca29e28bfb89aa23359036a8ad4118d0f Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Thu, 18 Dec 2008 00:28:27 +0100 Subject: oprofile: rename variable ibs_allowed to has_ibs in op_model_amd.c This patch renames ibs_allowed to has_ibs. Varible name fits better now. Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_amd.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index c5b5c7fb3ced..423a95438cbc 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -106,7 +106,7 @@ struct ibs_op_sample { unsigned int ibs_dc_phys_high; }; -static int ibs_allowed; /* AMD Family10h and later */ +static int has_ibs; /* AMD Family10h and later */ struct op_ibs_config { unsigned long op_enabled; @@ -201,7 +201,7 @@ op_amd_handle_ibs(struct pt_regs * const regs, struct ibs_fetch_sample ibs_fetch; struct ibs_op_sample ibs_op; - if (!ibs_allowed) + if (!has_ibs) return 1; if (ibs_config.fetch_enabled) { @@ -305,14 +305,14 @@ static void op_amd_start(struct op_msrs const * const msrs) } #ifdef CONFIG_OPROFILE_IBS - if (ibs_allowed && ibs_config.fetch_enabled) { + if (has_ibs && ibs_config.fetch_enabled) { low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */ + IBS_FETCH_HIGH_ENABLE; wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); } - if (ibs_allowed && ibs_config.op_enabled) { + if (has_ibs && ibs_config.op_enabled) { low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */ + IBS_OP_LOW_ENABLE; @@ -341,14 +341,14 @@ static void op_amd_stop(struct op_msrs const * const msrs) } #ifdef CONFIG_OPROFILE_IBS - if (ibs_allowed && ibs_config.fetch_enabled) { + if (has_ibs && ibs_config.fetch_enabled) { /* clear max count and enable */ low = 0; high = 0; wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); } - if (ibs_allowed && ibs_config.op_enabled) { + if (has_ibs && ibs_config.op_enabled) { /* clear max count and enable */ low = 0; high = 0; @@ -437,20 +437,20 @@ static int init_ibs_nmi(void) /* uninitialize the APIC for the IBS interrupts if needed */ static void clear_ibs_nmi(void) { - if (ibs_allowed) + if (has_ibs) on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1); } /* initialize the APIC for the IBS interrupts if available */ static void ibs_init(void) { - ibs_allowed = boot_cpu_has(X86_FEATURE_IBS); + has_ibs = boot_cpu_has(X86_FEATURE_IBS); - if (!ibs_allowed) + if (!has_ibs) return; if (init_ibs_nmi()) { - ibs_allowed = 0; + has_ibs = 0; return; } @@ -459,7 +459,7 @@ static void ibs_init(void) static void ibs_exit(void) { - if (!ibs_allowed) + if (!has_ibs) return; clear_ibs_nmi(); @@ -479,7 +479,7 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root) if (ret) return ret; - if (!ibs_allowed) + if (!has_ibs) return ret; /* model specific files */ -- cgit v1.2.3 From ae735e9964b4584923f2997d98a8d80ae9c1a75c Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Thu, 25 Dec 2008 17:26:07 +0100 Subject: oprofile: rework implementation of cpu buffer events Special events such as task or context switches are marked with an escape code in the cpu buffer followed by an event code or a task identifier. There is one escape code per event. To make escape sequences also available for data samples the internal cpu buffer format must be changed. The current implementation does not allow the extension of event codes since this would lead to collisions with the task identifiers. To avoid this, this patch introduces an event mask that allows the storage of multiple events with one escape code. Now, task identifiers are stored in the data section of the sample. The implementation also allows the usage of custom data in a sample. As a side effect the new code is much more readable and easier to understand. Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_amd.c | 8 +-- drivers/oprofile/buffer_sync.c | 42 ++++++------ drivers/oprofile/cpu_buffer.c | 139 +++++++++++++++++++++------------------ drivers/oprofile/cpu_buffer.h | 12 ++-- 4 files changed, 106 insertions(+), 95 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 423a95438cbc..f101724db80a 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -2,7 +2,7 @@ * @file op_model_amd.c * athlon / K7 / K8 / Family 10h model-specific MSR operations * - * @remark Copyright 2002-2008 OProfile authors + * @remark Copyright 2002-2009 OProfile authors * @remark Read the file COPYING * * @author John Levon @@ -10,7 +10,7 @@ * @author Graydon Hoare * @author Robert Richter * @author Barry Kasindorf -*/ + */ #include #include @@ -62,8 +62,8 @@ static unsigned long reset_value[NUM_COUNTERS]; /* Codes used in cpu_buffer.c */ /* This produces duplicate code, need to be fixed */ -#define IBS_FETCH_BEGIN 3 -#define IBS_OP_BEGIN 4 +#define IBS_FETCH_BEGIN (1UL << 4) +#define IBS_OP_BEGIN (1UL << 5) /* * The function interface needs to be fixed, something like add diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index 908202afbae9..d969bb13a252 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c @@ -1,11 +1,12 @@ /** * @file buffer_sync.c * - * @remark Copyright 2002 OProfile authors + * @remark Copyright 2002-2009 OProfile authors * @remark Read the file COPYING * * @author John Levon * @author Barry Kasindorf + * @author Robert Richter * * This is the core of the buffer management. Each * CPU buffer is processed and entered into the @@ -529,6 +530,7 @@ void sync_buffer(int cpu) sync_buffer_state state = sb_buffer_start; unsigned int i; unsigned long available; + unsigned long flags; struct op_entry entry; struct op_sample *sample; @@ -545,38 +547,34 @@ void sync_buffer(int cpu) break; if (is_code(sample->eip)) { - switch (sample->event) { - case 0: - case CPU_IS_KERNEL: + flags = sample->event; + if (flags & TRACE_BEGIN) { + state = sb_bt_start; + add_trace_begin(); + } + if (flags & KERNEL_CTX_SWITCH) { /* kernel/userspace switch */ - in_kernel = sample->event; + in_kernel = flags & IS_KERNEL; if (state == sb_buffer_start) state = sb_sample_start; - add_kernel_ctx_switch(sample->event); - break; - case CPU_TRACE_BEGIN: - state = sb_bt_start; - add_trace_begin(); - break; -#ifdef CONFIG_OPROFILE_IBS - case IBS_FETCH_BEGIN: - add_ibs_begin(cpu, IBS_FETCH_CODE, mm); - break; - case IBS_OP_BEGIN: - add_ibs_begin(cpu, IBS_OP_CODE, mm); - break; -#endif - default: + add_kernel_ctx_switch(flags & IS_KERNEL); + } + if (flags & USER_CTX_SWITCH) { /* userspace context switch */ oldmm = mm; - new = (struct task_struct *)sample->event; + new = (struct task_struct *)sample->data[0]; release_mm(oldmm); mm = take_tasks_mm(new); if (mm != oldmm) cookie = get_exec_dcookie(mm); add_user_ctx_switch(new, cookie); - break; } +#ifdef CONFIG_OPROFILE_IBS + if (flags & IBS_FETCH_BEGIN) + add_ibs_begin(cpu, IBS_FETCH_CODE, mm); + if (flags & IBS_OP_BEGIN) + add_ibs_begin(cpu, IBS_OP_CODE, mm); +#endif continue; } diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 400f7fcffdbe..e859d23cfc57 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -212,6 +212,59 @@ unsigned long op_cpu_buffer_entries(int cpu) + ring_buffer_entries_cpu(op_ring_buffer_write, cpu); } +static int +op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace, + int is_kernel, struct task_struct *task) +{ + struct op_entry entry; + struct op_sample *sample; + unsigned long flags; + int size; + + flags = 0; + + if (backtrace) + flags |= TRACE_BEGIN; + + /* notice a switch from user->kernel or vice versa */ + is_kernel = !!is_kernel; + if (cpu_buf->last_is_kernel != is_kernel) { + cpu_buf->last_is_kernel = is_kernel; + flags |= KERNEL_CTX_SWITCH; + if (is_kernel) + flags |= IS_KERNEL; + } + + /* notice a task switch */ + if (cpu_buf->last_task != task) { + cpu_buf->last_task = task; + flags |= USER_CTX_SWITCH; + } + + if (!flags) + /* nothing to do */ + return 0; + + if (flags & USER_CTX_SWITCH) + size = 1; + else + size = 0; + + sample = op_cpu_buffer_write_reserve(&entry, size); + if (!sample) + return -ENOMEM; + + sample->eip = ESCAPE_CODE; + sample->event = flags; + + if (size) + sample->data[0] = (unsigned long)task; + + op_cpu_buffer_write_commit(&entry); + + return 0; +} + static inline int op_add_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, unsigned long event) @@ -229,26 +282,18 @@ op_add_sample(struct oprofile_cpu_buffer *cpu_buf, return op_cpu_buffer_write_commit(&entry); } -static inline int -add_code(struct oprofile_cpu_buffer *buffer, unsigned long value) -{ - return op_add_sample(buffer, ESCAPE_CODE, value); -} - -/* This must be safe from any context. It's safe writing here - * because of the head/tail separation of the writer and reader - * of the CPU buffer. +/* + * This must be safe from any context. * * is_kernel is needed because on some architectures you cannot * tell if you are in kernel or user space simply by looking at * pc. We tag this in the buffer by generating kernel enter/exit * events whenever is_kernel changes */ -static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, - int is_kernel, unsigned long event) +static int +log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, + unsigned long backtrace, int is_kernel, unsigned long event) { - struct task_struct *task; - cpu_buf->sample_received++; if (pc == ESCAPE_CODE) { @@ -256,23 +301,8 @@ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, return 0; } - is_kernel = !!is_kernel; - - task = current; - - /* notice a switch from user->kernel or vice versa */ - if (cpu_buf->last_is_kernel != is_kernel) { - cpu_buf->last_is_kernel = is_kernel; - if (add_code(cpu_buf, is_kernel)) - goto fail; - } - - /* notice a task switch */ - if (cpu_buf->last_task != task) { - cpu_buf->last_task = task; - if (add_code(cpu_buf, (unsigned long)task)) - goto fail; - } + if (op_add_code(cpu_buf, backtrace, is_kernel, current)) + goto fail; if (op_add_sample(cpu_buf, pc, event)) goto fail; @@ -286,7 +316,6 @@ fail: static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) { - add_code(cpu_buf, CPU_TRACE_BEGIN); cpu_buf->tracing = 1; } @@ -300,21 +329,21 @@ __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel) { struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); - - if (!oprofile_backtrace_depth) { - log_sample(cpu_buf, pc, is_kernel, event); - return; - } - - oprofile_begin_trace(cpu_buf); + unsigned long backtrace = oprofile_backtrace_depth; /* * if log_sample() fail we can't backtrace since we lost the * source of this event */ - if (log_sample(cpu_buf, pc, is_kernel, event)) - oprofile_ops.backtrace(regs, oprofile_backtrace_depth); + if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event)) + /* failed */ + return; + if (!backtrace) + return; + + oprofile_begin_trace(cpu_buf); + oprofile_ops.backtrace(regs, backtrace); oprofile_end_trace(cpu_buf); } @@ -339,29 +368,14 @@ void oprofile_add_ibs_sample(struct pt_regs * const regs, { int is_kernel = !user_mode(regs); struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); - struct task_struct *task; int fail = 0; cpu_buf->sample_received++; - /* notice a switch from user->kernel or vice versa */ - if (cpu_buf->last_is_kernel != is_kernel) { - if (add_code(cpu_buf, is_kernel)) - goto fail; - cpu_buf->last_is_kernel = is_kernel; - } + /* backtraces disabled for ibs */ + fail = fail || op_add_code(cpu_buf, 0, is_kernel, current); - /* notice a task switch */ - if (!is_kernel) { - task = current; - if (cpu_buf->last_task != task) { - if (add_code(cpu_buf, (unsigned long)task)) - goto fail; - cpu_buf->last_task = task; - } - } - - fail = fail || add_code(cpu_buf, ibs_code); + fail = fail || op_add_sample(cpu_buf, ESCAPE_CODE, ibs_code); fail = fail || op_add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]); fail = fail || op_add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]); fail = fail || op_add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]); @@ -372,11 +386,8 @@ void oprofile_add_ibs_sample(struct pt_regs * const regs, fail = fail || op_add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]); } - if (!fail) - return; - -fail: - cpu_buf->sample_lost_overflow++; + if (fail) + cpu_buf->sample_lost_overflow++; } #endif @@ -384,7 +395,7 @@ fail: void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) { struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); - log_sample(cpu_buf, pc, is_kernel, event); + log_sample(cpu_buf, pc, 0, is_kernel, event); } void oprofile_add_trace(unsigned long pc) diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h index d7c0545ef8b2..e634dcf2f26f 100644 --- a/drivers/oprofile/cpu_buffer.h +++ b/drivers/oprofile/cpu_buffer.h @@ -78,10 +78,12 @@ int op_cpu_buffer_write_commit(struct op_entry *entry); struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu); unsigned long op_cpu_buffer_entries(int cpu); -/* transient events for the CPU buffer -> event buffer */ -#define CPU_IS_KERNEL 1 -#define CPU_TRACE_BEGIN 2 -#define IBS_FETCH_BEGIN 3 -#define IBS_OP_BEGIN 4 +/* extra data flags */ +#define KERNEL_CTX_SWITCH (1UL << 0) +#define IS_KERNEL (1UL << 1) +#define TRACE_BEGIN (1UL << 2) +#define USER_CTX_SWITCH (1UL << 3) +#define IBS_FETCH_BEGIN (1UL << 4) +#define IBS_OP_BEGIN (1UL << 5) #endif /* OPROFILE_CPU_BUFFER_H */ -- cgit v1.2.3 From 1acda878e20ea0cd3708ba66dca67d52eaafdd2b Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Mon, 5 Jan 2009 10:35:31 +0100 Subject: oprofile: use new data sample format for ibs The new ring buffer implementation allows the storage of samples with different size. This patch implements the usage of the new sample format to store ibs samples in the cpu buffer. Until now, writing to the cpu buffer could lead to incomplete sampling sequences since IBS samples were transfered in multiple samples. Due to a full buffer, data could be lost at any time. This can't happen any more since the complete data is reserved in advance and then stored in a single sample. Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_amd.c | 119 ++++++++++++++------------------------- drivers/oprofile/buffer_sync.c | 53 ++++------------- drivers/oprofile/cpu_buffer.c | 39 +++++++------ drivers/oprofile/cpu_buffer.h | 2 - 4 files changed, 76 insertions(+), 137 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index f101724db80a..cf310aeb462c 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -22,6 +22,7 @@ #include "op_x86_model.h" #include "op_counter.h" +#include "../../../drivers/oprofile/cpu_buffer.h" #define NUM_COUNTERS 4 #define NUM_CONTROLS 4 @@ -60,51 +61,16 @@ static unsigned long reset_value[NUM_COUNTERS]; #define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */ #define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */ -/* Codes used in cpu_buffer.c */ -/* This produces duplicate code, need to be fixed */ -#define IBS_FETCH_BEGIN (1UL << 4) -#define IBS_OP_BEGIN (1UL << 5) - /* * The function interface needs to be fixed, something like add * data. Should then be added to linux/oprofile.h. */ -extern void -oprofile_add_ibs_sample(struct pt_regs * const regs, - unsigned int * const ibs_sample, int ibs_code); - -struct ibs_fetch_sample { - /* MSRC001_1031 IBS Fetch Linear Address Register */ - unsigned int ibs_fetch_lin_addr_low; - unsigned int ibs_fetch_lin_addr_high; - /* MSRC001_1030 IBS Fetch Control Register */ - unsigned int ibs_fetch_ctl_low; - unsigned int ibs_fetch_ctl_high; - /* MSRC001_1032 IBS Fetch Physical Address Register */ - unsigned int ibs_fetch_phys_addr_low; - unsigned int ibs_fetch_phys_addr_high; -}; +extern +void oprofile_add_data(struct op_entry *entry, struct pt_regs * const regs, + unsigned long pc, int code, int size); -struct ibs_op_sample { - /* MSRC001_1034 IBS Op Logical Address Register (IbsRIP) */ - unsigned int ibs_op_rip_low; - unsigned int ibs_op_rip_high; - /* MSRC001_1035 IBS Op Data Register */ - unsigned int ibs_op_data1_low; - unsigned int ibs_op_data1_high; - /* MSRC001_1036 IBS Op Data 2 Register */ - unsigned int ibs_op_data2_low; - unsigned int ibs_op_data2_high; - /* MSRC001_1037 IBS Op Data 3 Register */ - unsigned int ibs_op_data3_low; - unsigned int ibs_op_data3_high; - /* MSRC001_1038 IBS DC Linear Address Register (IbsDcLinAd) */ - unsigned int ibs_dc_linear_low; - unsigned int ibs_dc_linear_high; - /* MSRC001_1039 IBS DC Physical Address Register (IbsDcPhysAd) */ - unsigned int ibs_dc_phys_low; - unsigned int ibs_dc_phys_high; -}; +#define IBS_FETCH_SIZE 6 +#define IBS_OP_SIZE 12 static int has_ibs; /* AMD Family10h and later */ @@ -197,9 +163,9 @@ static inline int op_amd_handle_ibs(struct pt_regs * const regs, struct op_msrs const * const msrs) { - unsigned int low, high; - struct ibs_fetch_sample ibs_fetch; - struct ibs_op_sample ibs_op; + u32 low, high; + u64 msr; + struct op_entry entry; if (!has_ibs) return 1; @@ -207,21 +173,19 @@ op_amd_handle_ibs(struct pt_regs * const regs, if (ibs_config.fetch_enabled) { rdmsr(MSR_AMD64_IBSFETCHCTL, low, high); if (high & IBS_FETCH_HIGH_VALID_BIT) { - ibs_fetch.ibs_fetch_ctl_high = high; - ibs_fetch.ibs_fetch_ctl_low = low; - rdmsr(MSR_AMD64_IBSFETCHLINAD, low, high); - ibs_fetch.ibs_fetch_lin_addr_high = high; - ibs_fetch.ibs_fetch_lin_addr_low = low; - rdmsr(MSR_AMD64_IBSFETCHPHYSAD, low, high); - ibs_fetch.ibs_fetch_phys_addr_high = high; - ibs_fetch.ibs_fetch_phys_addr_low = low; - - oprofile_add_ibs_sample(regs, - (unsigned int *)&ibs_fetch, - IBS_FETCH_BEGIN); + rdmsrl(MSR_AMD64_IBSFETCHLINAD, msr); + oprofile_add_data(&entry, regs, msr, IBS_FETCH_CODE, + IBS_FETCH_SIZE); + op_cpu_buffer_add_data(&entry, (u32)msr); + op_cpu_buffer_add_data(&entry, (u32)(msr >> 32)); + op_cpu_buffer_add_data(&entry, low); + op_cpu_buffer_add_data(&entry, high); + rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, msr); + op_cpu_buffer_add_data(&entry, (u32)msr); + op_cpu_buffer_add_data(&entry, (u32)(msr >> 32)); + op_cpu_buffer_write_commit(&entry); /* reenable the IRQ */ - rdmsr(MSR_AMD64_IBSFETCHCTL, low, high); high &= ~IBS_FETCH_HIGH_VALID_BIT; high |= IBS_FETCH_HIGH_ENABLE; low &= IBS_FETCH_LOW_MAX_CNT_MASK; @@ -232,30 +196,29 @@ op_amd_handle_ibs(struct pt_regs * const regs, if (ibs_config.op_enabled) { rdmsr(MSR_AMD64_IBSOPCTL, low, high); if (low & IBS_OP_LOW_VALID_BIT) { - rdmsr(MSR_AMD64_IBSOPRIP, low, high); - ibs_op.ibs_op_rip_low = low; - ibs_op.ibs_op_rip_high = high; - rdmsr(MSR_AMD64_IBSOPDATA, low, high); - ibs_op.ibs_op_data1_low = low; - ibs_op.ibs_op_data1_high = high; - rdmsr(MSR_AMD64_IBSOPDATA2, low, high); - ibs_op.ibs_op_data2_low = low; - ibs_op.ibs_op_data2_high = high; - rdmsr(MSR_AMD64_IBSOPDATA3, low, high); - ibs_op.ibs_op_data3_low = low; - ibs_op.ibs_op_data3_high = high; - rdmsr(MSR_AMD64_IBSDCLINAD, low, high); - ibs_op.ibs_dc_linear_low = low; - ibs_op.ibs_dc_linear_high = high; - rdmsr(MSR_AMD64_IBSDCPHYSAD, low, high); - ibs_op.ibs_dc_phys_low = low; - ibs_op.ibs_dc_phys_high = high; + rdmsrl(MSR_AMD64_IBSOPRIP, msr); + oprofile_add_data(&entry, regs, msr, IBS_OP_CODE, + IBS_OP_SIZE); + op_cpu_buffer_add_data(&entry, (u32)msr); + op_cpu_buffer_add_data(&entry, (u32)(msr >> 32)); + rdmsrl(MSR_AMD64_IBSOPDATA, msr); + op_cpu_buffer_add_data(&entry, (u32)msr); + op_cpu_buffer_add_data(&entry, (u32)(msr >> 32)); + rdmsrl(MSR_AMD64_IBSOPDATA2, msr); + op_cpu_buffer_add_data(&entry, (u32)msr); + op_cpu_buffer_add_data(&entry, (u32)(msr >> 32)); + rdmsrl(MSR_AMD64_IBSOPDATA3, msr); + op_cpu_buffer_add_data(&entry, (u32)msr); + op_cpu_buffer_add_data(&entry, (u32)(msr >> 32)); + rdmsrl(MSR_AMD64_IBSDCLINAD, msr); + op_cpu_buffer_add_data(&entry, (u32)msr); + op_cpu_buffer_add_data(&entry, (u32)(msr >> 32)); + rdmsrl(MSR_AMD64_IBSDCPHYSAD, msr); + op_cpu_buffer_add_data(&entry, (u32)msr); + op_cpu_buffer_add_data(&entry, (u32)(msr >> 32)); + op_cpu_buffer_write_commit(&entry); /* reenable the IRQ */ - oprofile_add_ibs_sample(regs, - (unsigned int *)&ibs_op, - IBS_OP_BEGIN); - rdmsr(MSR_AMD64_IBSOPCTL, low, high); high = 0; low &= ~IBS_OP_LOW_VALID_BIT; low |= IBS_OP_LOW_ENABLE; diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index f9031d31eeb7..d692fdc1a211 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c @@ -318,29 +318,18 @@ static void add_trace_begin(void) #ifdef CONFIG_OPROFILE_IBS -#define IBS_FETCH_CODE_SIZE 2 -#define IBS_OP_CODE_SIZE 5 - -/* - * Add IBS fetch and op entries to event buffer - */ -static void add_ibs_begin(int cpu, int code, struct mm_struct *mm) +static void add_data(struct op_entry *entry, struct mm_struct *mm) { - unsigned long pc; - int i, count; - unsigned long cookie = 0; + unsigned long code, pc, val; + unsigned long cookie; off_t offset; - struct op_entry entry; - struct op_sample *sample; - sample = op_cpu_buffer_read_entry(&entry, cpu); - if (!sample) + if (!op_cpu_buffer_get_data(entry, &code)) + return; + if (!op_cpu_buffer_get_data(entry, &pc)) + return; + if (!op_cpu_buffer_get_size(entry)) return; - pc = sample->eip; - -#ifdef __LP64__ - pc += sample->event << 32; -#endif if (mm) { cookie = lookup_dcookie(mm, pc, &offset); @@ -362,24 +351,8 @@ static void add_ibs_begin(int cpu, int code, struct mm_struct *mm) add_event_entry(code); add_event_entry(offset); /* Offset from Dcookie */ - /* we send the Dcookie offset, but send the raw Linear Add also*/ - add_event_entry(sample->eip); - add_event_entry(sample->event); - - if (code == IBS_FETCH_CODE) - count = IBS_FETCH_CODE_SIZE; /*IBS FETCH is 2 int64s*/ - else - count = IBS_OP_CODE_SIZE; /*IBS OP is 5 int64s*/ - - for (i = 0; i < count; i++) { - sample = op_cpu_buffer_read_entry(&entry, cpu); - if (!sample) - return; - add_event_entry(sample->eip); - add_event_entry(sample->event); - } - - return; + while (op_cpu_buffer_get_data(entry, &val)) + add_event_entry(val); } #endif @@ -572,10 +545,8 @@ void sync_buffer(int cpu) add_user_ctx_switch(new, cookie); } #ifdef CONFIG_OPROFILE_IBS - if (flags & IBS_FETCH_BEGIN) - add_ibs_begin(cpu, IBS_FETCH_CODE, mm); - if (flags & IBS_OP_BEGIN) - add_ibs_begin(cpu, IBS_OP_CODE, mm); + if (op_cpu_buffer_get_size(&entry)) + add_data(&entry, mm); #endif continue; } diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 1b6590746be4..ddba9d01f09b 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -363,31 +363,38 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) #ifdef CONFIG_OPROFILE_IBS -void oprofile_add_ibs_sample(struct pt_regs * const regs, - unsigned int * const ibs_sample, int ibs_code) +/* + * Add samples with data to the ring buffer. + * + * Use op_cpu_buffer_add_data(&entry, val) to add data and + * op_cpu_buffer_write_commit(&entry) to commit the sample. + */ +void oprofile_add_data(struct op_entry *entry, struct pt_regs * const regs, + unsigned long pc, int code, int size) { + struct op_sample *sample; int is_kernel = !user_mode(regs); struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); - int fail = 0; cpu_buf->sample_received++; - /* backtraces disabled for ibs */ - fail = fail || op_add_code(cpu_buf, 0, is_kernel, current); + /* no backtraces for samples with data */ + if (op_add_code(cpu_buf, 0, is_kernel, current)) + goto fail; - fail = fail || op_add_sample(cpu_buf, ESCAPE_CODE, ibs_code); - fail = fail || op_add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]); - fail = fail || op_add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]); - fail = fail || op_add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]); + sample = op_cpu_buffer_write_reserve(entry, size + 2); + if (!sample) + goto fail; + sample->eip = ESCAPE_CODE; + sample->event = 0; /* no flags */ - if (ibs_code == IBS_OP_BEGIN) { - fail = fail || op_add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]); - fail = fail || op_add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]); - fail = fail || op_add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]); - } + op_cpu_buffer_add_data(entry, code); + op_cpu_buffer_add_data(entry, pc); + + return; - if (fail) - cpu_buf->sample_lost_overflow++; +fail: + cpu_buf->sample_lost_overflow++; } #endif diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h index f34376046573..525cc4d13d8d 100644 --- a/drivers/oprofile/cpu_buffer.h +++ b/drivers/oprofile/cpu_buffer.h @@ -115,7 +115,5 @@ int op_cpu_buffer_get_data(struct op_entry *entry, unsigned long *val) #define IS_KERNEL (1UL << 1) #define TRACE_BEGIN (1UL << 2) #define USER_CTX_SWITCH (1UL << 3) -#define IBS_FETCH_BEGIN (1UL << 4) -#define IBS_OP_BEGIN (1UL << 5) #endif /* OPROFILE_CPU_BUFFER_H */ -- cgit v1.2.3 From 14f0ca8eaea42a5b5a69cfcb699665dd2618db5f Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 7 Jan 2009 21:50:22 +0100 Subject: oprofile: make new cpu buffer functions part of the api This patch creates the new functions oprofile_write_reserve() oprofile_add_data() oprofile_write_commit() and makes them part of the oprofile api. Signed-off-by: Robert Richter --- arch/x86/oprofile/op_model_amd.c | 57 +++++++++++++++++----------------------- drivers/oprofile/cpu_buffer.c | 17 +++++++++--- drivers/oprofile/cpu_buffer.h | 8 +----- include/linux/oprofile.h | 18 +++++++++++++ 4 files changed, 57 insertions(+), 43 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index cf310aeb462c..8fdf06e4edf9 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -22,7 +22,6 @@ #include "op_x86_model.h" #include "op_counter.h" -#include "../../../drivers/oprofile/cpu_buffer.h" #define NUM_COUNTERS 4 #define NUM_CONTROLS 4 @@ -61,14 +60,6 @@ static unsigned long reset_value[NUM_COUNTERS]; #define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */ #define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */ -/* - * The function interface needs to be fixed, something like add - * data. Should then be added to linux/oprofile.h. - */ -extern -void oprofile_add_data(struct op_entry *entry, struct pt_regs * const regs, - unsigned long pc, int code, int size); - #define IBS_FETCH_SIZE 6 #define IBS_OP_SIZE 12 @@ -174,16 +165,16 @@ op_amd_handle_ibs(struct pt_regs * const regs, rdmsr(MSR_AMD64_IBSFETCHCTL, low, high); if (high & IBS_FETCH_HIGH_VALID_BIT) { rdmsrl(MSR_AMD64_IBSFETCHLINAD, msr); - oprofile_add_data(&entry, regs, msr, IBS_FETCH_CODE, - IBS_FETCH_SIZE); - op_cpu_buffer_add_data(&entry, (u32)msr); - op_cpu_buffer_add_data(&entry, (u32)(msr >> 32)); - op_cpu_buffer_add_data(&entry, low); - op_cpu_buffer_add_data(&entry, high); + oprofile_write_reserve(&entry, regs, msr, + IBS_FETCH_CODE, IBS_FETCH_SIZE); + oprofile_add_data(&entry, (u32)msr); + oprofile_add_data(&entry, (u32)(msr >> 32)); + oprofile_add_data(&entry, low); + oprofile_add_data(&entry, high); rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, msr); - op_cpu_buffer_add_data(&entry, (u32)msr); - op_cpu_buffer_add_data(&entry, (u32)(msr >> 32)); - op_cpu_buffer_write_commit(&entry); + oprofile_add_data(&entry, (u32)msr); + oprofile_add_data(&entry, (u32)(msr >> 32)); + oprofile_write_commit(&entry); /* reenable the IRQ */ high &= ~IBS_FETCH_HIGH_VALID_BIT; @@ -197,26 +188,26 @@ op_amd_handle_ibs(struct pt_regs * const regs, rdmsr(MSR_AMD64_IBSOPCTL, low, high); if (low & IBS_OP_LOW_VALID_BIT) { rdmsrl(MSR_AMD64_IBSOPRIP, msr); - oprofile_add_data(&entry, regs, msr, IBS_OP_CODE, - IBS_OP_SIZE); - op_cpu_buffer_add_data(&entry, (u32)msr); - op_cpu_buffer_add_data(&entry, (u32)(msr >> 32)); + oprofile_write_reserve(&entry, regs, msr, + IBS_OP_CODE, IBS_OP_SIZE); + oprofile_add_data(&entry, (u32)msr); + oprofile_add_data(&entry, (u32)(msr >> 32)); rdmsrl(MSR_AMD64_IBSOPDATA, msr); - op_cpu_buffer_add_data(&entry, (u32)msr); - op_cpu_buffer_add_data(&entry, (u32)(msr >> 32)); + oprofile_add_data(&entry, (u32)msr); + oprofile_add_data(&entry, (u32)(msr >> 32)); rdmsrl(MSR_AMD64_IBSOPDATA2, msr); - op_cpu_buffer_add_data(&entry, (u32)msr); - op_cpu_buffer_add_data(&entry, (u32)(msr >> 32)); + oprofile_add_data(&entry, (u32)msr); + oprofile_add_data(&entry, (u32)(msr >> 32)); rdmsrl(MSR_AMD64_IBSOPDATA3, msr); - op_cpu_buffer_add_data(&entry, (u32)msr); - op_cpu_buffer_add_data(&entry, (u32)(msr >> 32)); + oprofile_add_data(&entry, (u32)msr); + oprofile_add_data(&entry, (u32)(msr >> 32)); rdmsrl(MSR_AMD64_IBSDCLINAD, msr); - op_cpu_buffer_add_data(&entry, (u32)msr); - op_cpu_buffer_add_data(&entry, (u32)(msr >> 32)); + oprofile_add_data(&entry, (u32)msr); + oprofile_add_data(&entry, (u32)(msr >> 32)); rdmsrl(MSR_AMD64_IBSDCPHYSAD, msr); - op_cpu_buffer_add_data(&entry, (u32)msr); - op_cpu_buffer_add_data(&entry, (u32)(msr >> 32)); - op_cpu_buffer_write_commit(&entry); + oprofile_add_data(&entry, (u32)msr); + oprofile_add_data(&entry, (u32)(msr >> 32)); + oprofile_write_commit(&entry); /* reenable the IRQ */ high = 0; diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index b846af632c81..2e03b6d796d3 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -364,10 +364,11 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) /* * Add samples with data to the ring buffer. * - * Use op_cpu_buffer_add_data(&entry, val) to add data and - * op_cpu_buffer_write_commit(&entry) to commit the sample. + * Use oprofile_add_data(&entry, val) to add data and + * oprofile_write_commit(&entry) to commit the sample. */ -void oprofile_add_data(struct op_entry *entry, struct pt_regs * const regs, +void +oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs, unsigned long pc, int code, int size) { struct op_sample *sample; @@ -395,6 +396,16 @@ fail: cpu_buf->sample_lost_overflow++; } +int oprofile_add_data(struct op_entry *entry, unsigned long val) +{ + return op_cpu_buffer_add_data(entry, val); +} + +int oprofile_write_commit(struct op_entry *entry) +{ + return op_cpu_buffer_write_commit(entry); +} + void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) { struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h index 525cc4d13d8d..63f81c44846a 100644 --- a/drivers/oprofile/cpu_buffer.h +++ b/drivers/oprofile/cpu_buffer.h @@ -35,13 +35,7 @@ struct op_sample { unsigned long data[0]; }; -struct op_entry { - struct ring_buffer_event *event; - struct op_sample *sample; - unsigned long irq_flags; - unsigned long size; - unsigned long *data; -}; +struct op_entry; struct oprofile_cpu_buffer { unsigned long buffer_size; diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index 1ce9fe572e51..1d9518bc4c58 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h @@ -164,4 +164,22 @@ void oprofile_put_buff(unsigned long *buf, unsigned int start, unsigned long oprofile_get_cpu_buffer_size(void); void oprofile_cpu_buffer_inc_smpl_lost(void); +/* cpu buffer functions */ + +struct op_sample; + +struct op_entry { + struct ring_buffer_event *event; + struct op_sample *sample; + unsigned long irq_flags; + unsigned long size; + unsigned long *data; +}; + +void oprofile_write_reserve(struct op_entry *entry, + struct pt_regs * const regs, + unsigned long pc, int code, int size); +int oprofile_add_data(struct op_entry *entry, unsigned long val); +int oprofile_write_commit(struct op_entry *entry); + #endif /* OPROFILE_H */ -- cgit v1.2.3 From 9b4778f680aa79d838ae2be6ab958938f744ce5f Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Wed, 7 Jan 2009 14:42:41 -0800 Subject: trivial: replace last usages of __FUNCTION__ in kernel __FUNCTION__ is gcc-specific, use __func__ Signed-off-by: Harvey Harrison Acked-by: Mauro Carvalho Chehab Signed-off-by: Linus Torvalds --- arch/x86/mm/numa_32.c | 4 ++-- drivers/gpu/drm/drm_crtc.c | 2 +- drivers/gpu/drm/i915/i915_gem.c | 4 ++-- drivers/media/dvb/frontends/lgdt3304.c | 10 +++++----- drivers/media/dvb/frontends/s921_module.c | 2 +- drivers/media/dvb/ttpci/budget-ci.c | 2 +- 6 files changed, 12 insertions(+), 12 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 8518c678d83f..d1f7439d173c 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -239,7 +239,7 @@ void resume_map_numa_kva(pgd_t *pgd_base) start_pfn = node_remap_start_pfn[node]; size = node_remap_size[node]; - printk(KERN_DEBUG "%s: node %d\n", __FUNCTION__, node); + printk(KERN_DEBUG "%s: node %d\n", __func__, node); for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) { unsigned long vaddr = start_va + (pfn << PAGE_SHIFT); @@ -251,7 +251,7 @@ void resume_map_numa_kva(pgd_t *pgd_base) PAGE_KERNEL_LARGE_EXEC)); printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n", - __FUNCTION__, vaddr, start_pfn + pfn); + __func__, vaddr, start_pfn + pfn); } } } diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 53c87254be4c..5b2cbb778162 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -210,7 +210,7 @@ static int drm_mode_object_get(struct drm_device *dev, int ret; WARN(!mutex_is_locked(&dev->mode_config.mutex), - "%s called w/o mode_config lock\n", __FUNCTION__); + "%s called w/o mode_config lock\n", __func__); again: if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) { DRM_ERROR("Ran out memory getting a mode number\n"); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 14afc23a0e24..1384d6686555 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1445,7 +1445,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || (obj_priv->gtt_offset & (obj->size - 1))) { - WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__); + WARN(1, "%s: object not 1M or size aligned\n", __func__); return; } @@ -1478,7 +1478,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || (obj_priv->gtt_offset & (obj->size - 1))) { - WARN(1, "%s: object not 1M or size aligned\n", __FUNCTION__); + WARN(1, "%s: object not 1M or size aligned\n", __func__); return; } diff --git a/drivers/media/dvb/frontends/lgdt3304.c b/drivers/media/dvb/frontends/lgdt3304.c index 469ace5692c6..3bb0c4394f8a 100644 --- a/drivers/media/dvb/frontends/lgdt3304.c +++ b/drivers/media/dvb/frontends/lgdt3304.c @@ -42,7 +42,7 @@ static int i2c_write_demod_bytes (struct dvb_frontend *fe, __u8 *buf, int len) for (i=0; ii2c, &i2cmsgs, 1))<0) { - printk("%s i2c_transfer error %d\n", __FUNCTION__, err); + printk("%s i2c_transfer error %d\n", __func__, err); if (err < 0) return err; else @@ -73,7 +73,7 @@ static int lgdt3304_i2c_read_reg(struct dvb_frontend *fe, unsigned int reg) i2cmsgs[1].buf = &buf; if((ret = i2c_transfer(state->i2c, i2cmsgs, 2))<0) { - printk("%s i2c_transfer error %d\n", __FUNCTION__, ret); + printk("%s i2c_transfer error %d\n", __func__, ret); return ret; } @@ -94,7 +94,7 @@ static int lgdt3304_i2c_write_reg(struct dvb_frontend *fe, int reg, int val) }; ret = i2c_transfer(state->i2c, &i2cmsgs, 1); if (ret != 1) { - printk("%s i2c_transfer error %d\n", __FUNCTION__, ret); + printk("%s i2c_transfer error %d\n", __func__, ret); return ret; } @@ -238,7 +238,7 @@ static int lgdt3304_set_parameters(struct dvb_frontend *fe, struct dvb_frontend_ } if (err) { - printk("%s error setting modulation\n", __FUNCTION__); + printk("%s error setting modulation\n", __func__); } else { state->current_modulation = param->u.vsb.modulation; } @@ -305,7 +305,7 @@ static int lgdt3304_read_status(struct dvb_frontend *fe, fe_status_t *status) } break; default: - printk("%s unhandled modulation\n", __FUNCTION__); + printk("%s unhandled modulation\n", __func__); } diff --git a/drivers/media/dvb/frontends/s921_module.c b/drivers/media/dvb/frontends/s921_module.c index 3cbb9cb2cf47..892af8c9ed57 100644 --- a/drivers/media/dvb/frontends/s921_module.c +++ b/drivers/media/dvb/frontends/s921_module.c @@ -136,7 +136,7 @@ static int s921_write(void *dev, u8 reg, u8 val) { }; if((err = i2c_transfer(state->i2c, &i2cmsgs, 1))<0) { - printk("%s i2c_transfer error %d\n", __FUNCTION__, err); + printk("%s i2c_transfer error %d\n", __func__, err); if (err < 0) return err; else diff --git a/drivers/media/dvb/ttpci/budget-ci.c b/drivers/media/dvb/ttpci/budget-ci.c index 3507463fdac9..4d2a5cf27c31 100644 --- a/drivers/media/dvb/ttpci/budget-ci.c +++ b/drivers/media/dvb/ttpci/budget-ci.c @@ -1450,7 +1450,7 @@ static void frontend_init(struct budget_ci *budget_ci) if (budget_ci->budget.dvb_frontend) { if (dvb_attach(stb6100_attach, budget_ci->budget.dvb_frontend, &tt3200_stb6100_config, &budget_ci->budget.i2c_adap)) { if (!dvb_attach(lnbp21_attach, budget_ci->budget.dvb_frontend, &budget_ci->budget.i2c_adap, 0, 0)) { - printk("%s: No LNBP21 found!\n", __FUNCTION__); + printk("%s: No LNBP21 found!\n", __func__); dvb_frontend_detach(budget_ci->budget.dvb_frontend); budget_ci->budget.dvb_frontend = NULL; } -- cgit v1.2.3 From 79f3b3cb7a2586b319a43a7f29924c6c555e4357 Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Thu, 8 Jan 2009 10:04:30 -0500 Subject: x86, mtrr: fix types used in userspace exported header Commit 932d27a7913fc6b3c64c6e6082628b0a1561dec9 exported some mtrr structures without using the exportable __uX types, causing userspace build failures. Signed-off-by: Kyle McMartin Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mtrr.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h index cb988aab716d..14080d22edb3 100644 --- a/arch/x86/include/asm/mtrr.h +++ b/arch/x86/include/asm/mtrr.h @@ -58,15 +58,15 @@ struct mtrr_gentry { #endif /* !__i386__ */ struct mtrr_var_range { - u32 base_lo; - u32 base_hi; - u32 mask_lo; - u32 mask_hi; + __u32 base_lo; + __u32 base_hi; + __u32 mask_lo; + __u32 mask_hi; }; /* In the Intel processor's MTRR interface, the MTRR type is always held in an 8 bit field: */ -typedef u8 mtrr_type; +typedef __u8 mtrr_type; #define MTRR_NUM_FIXED_RANGES 88 #define MTRR_MAX_VAR_RANGES 256 -- cgit v1.2.3 From c19a28e1193a6c854738d609ae9b2fe2f6e6bea4 Mon Sep 17 00:00:00 2001 From: Fernando Carrijo Date: Wed, 7 Jan 2009 18:09:08 -0800 Subject: remove lots of double-semicolons Cc: Ingo Molnar Cc: Thomas Gleixner Acked-by: Theodore Ts'o Acked-by: Mark Fasheh Acked-by: David S. Miller Cc: James Morris Acked-by: Casey Schaufler Acked-by: Takashi Iwai Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/kernel/cpu/cpufreq/longhaul.c | 2 +- fs/ocfs2/alloc.c | 2 +- fs/ocfs2/file.c | 2 +- net/ipv6/route.c | 2 +- net/ipv6/sysctl_net_ipv6.c | 2 +- net/sched/sch_sfq.c | 2 +- security/smack/smackfs.c | 2 +- sound/soc/au1x/dbdma2.c | 2 +- sound/soc/davinci/davinci-pcm.c | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c index b0461856acfb..a4cff5d6e380 100644 --- a/arch/x86/kernel/cpu/cpufreq/longhaul.c +++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c @@ -982,7 +982,7 @@ static int __init longhaul_init(void) case 10: printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n"); default: - ;; + ; } return -ENODEV; diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 54ff4c77aaa3..d861096c9d81 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -3868,7 +3868,7 @@ static void ocfs2_split_record(struct inode *inode, struct ocfs2_extent_list *left_el = NULL, *right_el, *insert_el, *el; struct ocfs2_extent_rec *rec, *tmprec; - right_el = path_leaf_el(right_path);; + right_el = path_leaf_el(right_path); if (left_path) left_el = path_leaf_el(left_path); diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index e8f795f978aa..a5887df2cd8a 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1605,7 +1605,7 @@ int ocfs2_change_file_space(struct file *file, unsigned int cmd, struct ocfs2_space_resv *sr) { struct inode *inode = file->f_path.dentry->d_inode; - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) && !ocfs2_writes_unwritten_extents(osb)) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 76f06b94ab9f..c4a59824ac2c 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2752,7 +2752,7 @@ int __init ip6_route_init(void) kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, SLAB_HWCACHE_ALIGN, NULL); if (!ip6_dst_ops_template.kmem_cachep) - goto out;; + goto out; ret = register_pernet_subsys(&ip6_route_net_ops); if (ret) diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index 9048fe7e7ea7..a031034720b4 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c @@ -128,7 +128,7 @@ static struct ctl_table_header *ip6_header; int ipv6_sysctl_register(void) { - int err = -ENOMEM;; + int err = -ENOMEM; ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_table); if (ip6_header == NULL) diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index f3965df00559..33133d27b539 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -435,7 +435,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt) int i; q->perturb_timer.function = sfq_perturbation; - q->perturb_timer.data = (unsigned long)sch;; + q->perturb_timer.data = (unsigned long)sch; init_timer_deferrable(&q->perturb_timer); for (i = 0; i < SFQ_HASH_DIVISOR; i++) diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index bf107a389ac1..71e2b914363e 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c @@ -569,7 +569,7 @@ static ssize_t smk_write_cipso(struct file *file, const char __user *buf, if (skp == NULL) goto out; - rule += SMK_LABELLEN;; + rule += SMK_LABELLEN; ret = sscanf(rule, "%d", &maplevel); if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL) goto out; diff --git a/sound/soc/au1x/dbdma2.c b/sound/soc/au1x/dbdma2.c index 74c823d60f91..bc8d654576c0 100644 --- a/sound/soc/au1x/dbdma2.c +++ b/sound/soc/au1x/dbdma2.c @@ -187,7 +187,7 @@ static int au1x_pcm_dbdma_realloc(struct au1xpsc_audio_dmadata *pcd, au1x_pcm_dmatx_cb, (void *)pcd); if (!pcd->ddma_chan) - return -ENOMEM;; + return -ENOMEM; au1xxx_dbdma_set_devwidth(pcd->ddma_chan, msbits); au1xxx_dbdma_ring_alloc(pcd->ddma_chan, 2); diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c index 74abc9b4f1cc..366049d8578c 100644 --- a/sound/soc/davinci/davinci-pcm.c +++ b/sound/soc/davinci/davinci-pcm.c @@ -212,7 +212,7 @@ davinci_pcm_pointer(struct snd_pcm_substream *substream) if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) count = src - runtime->dma_addr; else - count = dst - runtime->dma_addr;; + count = dst - runtime->dma_addr; spin_unlock(&prtd->lock); -- cgit v1.2.3 From 13b40a1a065824d2d4e55c8b48ea9f3f9d162929 Mon Sep 17 00:00:00 2001 From: Zhao Yakui Date: Sun, 4 Jan 2009 12:04:21 +0800 Subject: ACPI: Avoid array address overflow when _CST MWAIT hint bits are set The Cx Register address obtained from the _CST object is used as the MWAIT hints if the register type is FFixedHW. And it is used to check whether the Cx type is supported or not. On some boxes the following Cx state package is obtained from _CST object: >{ ResourceTemplate () { Register (FFixedHW, 0x01, // Bit Width 0x02, // Bit Offset 0x0000000000889759, // Address 0x03, // Access Size ) }, 0x03, 0xF5, 0x015E } In such case we should use the bit[7:4] of Cx address to check whether the Cx type is supported or not. mask the MWAIT hint to avoid array address overflow Signed-off-by: Zhao Yakui Acked-by:Venki Pallipadi Signed-off-by: Len Brown --- arch/x86/kernel/acpi/cstate.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index c2502eb9aa83..a4805b3b4095 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -56,6 +56,7 @@ static struct cstate_entry *cpu_cstate_entry; /* per CPU ptr */ static short mwait_supported[ACPI_PROCESSOR_MAX_POWER]; #define MWAIT_SUBSTATE_MASK (0xf) +#define MWAIT_CSTATE_MASK (0xf) #define MWAIT_SUBSTATE_SIZE (4) #define CPUID_MWAIT_LEAF (5) @@ -98,7 +99,8 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); /* Check whether this particular cx_type (in CST) is supported or not */ - cstate_type = (cx->address >> MWAIT_SUBSTATE_SIZE) + 1; + cstate_type = ((cx->address >> MWAIT_SUBSTATE_SIZE) & + MWAIT_CSTATE_MASK) + 1; edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE); num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK; -- cgit v1.2.3 From 237889bf0a62f1399fb2ba0c2a259e6a96597131 Mon Sep 17 00:00:00 2001 From: Zhao Yakui Date: Wed, 17 Dec 2008 16:55:18 +0800 Subject: ACPI : Use RSDT instead of XSDT by adding boot option of "acpi=rsdt" On some boxes there exist both RSDT and XSDT table. But unfortunately sometimes there exists the following error when XSDT table is used: a. 32/64X address mismatch b. The 32/64X FACS address mismatch In such case the boot option of "acpi=rsdt" is provided so that RSDT is tried instead of XSDT table when the system can't work well. http://bugzilla.kernel.org/show_bug.cgi?id=8246 Signed-off-by: Zhao Yakui cc:Thomas Renninger Signed-off-by: Len Brown --- Documentation/kernel-parameters.txt | 1 + arch/ia64/kernel/acpi.c | 1 + arch/x86/kernel/acpi/boot.c | 6 +++++- drivers/acpi/tables/tbutils.c | 3 ++- include/acpi/acpixf.h | 1 + 5 files changed, 10 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index c9115c1b672c..136f02842de9 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -139,6 +139,7 @@ and is between 256 and 4096 characters. It is defined in the file ht -- run only enough ACPI to enable Hyper Threading strict -- Be less tolerant of platforms that are not strictly ACPI specification compliant. + rsdt -- prefer RSDT over (default) XSDT See also Documentation/power/pm.txt, pci=noacpi diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index bd7acc71e8a9..c19b686db9b8 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -65,6 +65,7 @@ EXPORT_SYMBOL(pm_idle); void (*pm_power_off) (void); EXPORT_SYMBOL(pm_power_off); +u32 acpi_rsdt_forced; unsigned int acpi_cpei_override; unsigned int acpi_cpei_phys_cpuid; diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 4c51a2f8fd31..db1a90a76b3e 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -47,7 +47,7 @@ #endif static int __initdata acpi_force = 0; - +u32 acpi_rsdt_forced; #ifdef CONFIG_ACPI int acpi_disabled = 0; #else @@ -1783,6 +1783,10 @@ static int __init parse_acpi(char *arg) disable_acpi(); acpi_ht = 1; } + /* acpi=rsdt use RSDT instead of XSDT */ + else if (strcmp(arg, "rsdt") == 0) { + acpi_rsdt_forced = 1; + } /* "acpi=noirq" disables ACPI interrupt routing */ else if (strcmp(arg, "noirq") == 0) { acpi_noirq_set(); diff --git a/drivers/acpi/tables/tbutils.c b/drivers/acpi/tables/tbutils.c index 0cc92ef5236f..da9f240186e8 100644 --- a/drivers/acpi/tables/tbutils.c +++ b/drivers/acpi/tables/tbutils.c @@ -420,7 +420,8 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags) /* Differentiate between RSDT and XSDT root tables */ - if (rsdp->revision > 1 && rsdp->xsdt_physical_address) { + if (rsdp->revision > 1 && rsdp->xsdt_physical_address + && !acpi_rsdt_forced) { /* * Root table is an XSDT (64-bit physical addresses). We must use the * XSDT if the revision is > 1 and the XSDT pointer is present, as per diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 33bc0e3b1954..05d2614e0078 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -48,6 +48,7 @@ #include "actypes.h" #include "actbl.h" +extern u32 acpi_rsdt_forced; /* * Global interfaces */ -- cgit v1.2.3 From 8659c406ade32f47da2c95889094801921d6330a Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 9 Jan 2009 12:17:39 -0800 Subject: x86: only scan the root bus in early PCI quirks We found a situation on Linus' machine that the Nvidia timer quirk hit on a Intel chipset system. The problem is that the system has a fancy Nvidia card with an own PCI bridge, and the early-quirks code looking for any NVidia bridge triggered on it incorrectly. This didn't lead a boot failure by luck, but the timer routing code selecting the wrong timer first and some ugly messages. It might lead to real problems on other systems. I checked all the devices which are currently checked for by early_quirks and it turns out they are all located in the root bus zero. So change the early-quirks loop to only scan bus 0. This incidently also saves quite some unnecessary scanning work, because early_quirks doesn't go through all the non root busses. The graphics card is not on bus 0, so it is not matched anymore. Signed-off-by: Andi Kleen Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Jesse Barnes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/kernel/early-quirks.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 744aa7fc49d5..76b8cd953dee 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -201,6 +201,12 @@ struct chipset { void (*f)(int num, int slot, int func); }; +/* + * Only works for devices on the root bus. If you add any devices + * not on bus 0 readd another loop level in early_quirks(). But + * be careful because at least the Nvidia quirk here relies on + * only matching on bus 0. + */ static struct chipset early_qrk[] __initdata = { { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs }, @@ -267,17 +273,17 @@ static int __init check_dev_quirk(int num, int slot, int func) void __init early_quirks(void) { - int num, slot, func; + int slot, func; if (!early_pci_allowed()) return; /* Poor man's PCI discovery */ - for (num = 0; num < 32; num++) - for (slot = 0; slot < 32; slot++) - for (func = 0; func < 8; func++) { - /* Only probe function 0 on single fn devices */ - if (check_dev_quirk(num, slot, func)) - break; - } + /* Only scan the root bus */ + for (slot = 0; slot < 32; slot++) + for (func = 0; func < 8; func++) { + /* Only probe function 0 on single fn devices */ + if (check_dev_quirk(0, slot, func)) + break; + } } -- cgit v1.2.3 From c4295fbb6048d85f0b41c5ced5cbf63f6811c46c Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 9 Jan 2009 12:49:50 -0800 Subject: x86: make 'constant_test_bit()' take an unsigned bit number Ingo noticed that using signed arithmetic seems to confuse the gcc inliner, and make it potentially decide that it's all too complicated. (Yeah, yeah, it's a constant. It's always positive. Still..) Based-on: Ingo Molnar Signed-off-by: Linus Torvalds --- arch/x86/include/asm/bitops.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 9fa9dcdf344b..e02a359d2aa5 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -300,7 +300,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) return oldbit; } -static inline int constant_test_bit(int nr, const volatile unsigned long *addr) +static inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) { return ((1UL << (nr % BITS_PER_LONG)) & (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; -- cgit v1.2.3 From 6d612b0f943289856c6e8186c564cda922cd040e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 12 Jan 2009 12:52:23 +0100 Subject: locking, hpet: annotate false positive warning Alexander Beregalov reported that this warning is caused by the HPET code: > hpet0: at MMIO 0xfed00000, IRQs 2, 8, 0 > hpet0: 3 comparators, 64-bit 14.318180 MHz counter > ODEBUG: object is on stack, but not annotated > ------------[ cut here ]------------ > WARNING: at lib/debugobjects.c:251 __debug_object_init+0x2a4/0x352() > Bisected down to 26afe5f2fbf06ea0765aaa316640c4dd472310c0 > (x86: HPET_MSI Initialise per-cpu HPET timers) The commit is fine - but the on-stack workqueue entry needs annotation. Reported-and-bisected-by: Alexander Beregalov Signed-off-by: Peter Zijlstra Tested-by: Alexander Beregalov Signed-off-by: Ingo Molnar --- arch/x86/kernel/hpet.c | 2 +- include/linux/workqueue.h | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index cd759ad90690..bb2e0f0975ae 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -628,7 +628,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n, switch (action & 0xf) { case CPU_ONLINE: - INIT_DELAYED_WORK(&work.work, hpet_work); + INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work); init_completion(&work.complete); /* FIXME: add schedule_work_on() */ schedule_delayed_work_on(cpu, &work.work, 0); diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index b36291130f22..47151c8495aa 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -118,6 +118,12 @@ struct execute_work { init_timer(&(_work)->timer); \ } while (0) +#define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ + do { \ + INIT_WORK(&(_work)->work, (_func)); \ + init_timer_on_stack(&(_work)->timer); \ + } while (0) + #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ do { \ INIT_WORK(&(_work)->work, (_func)); \ -- cgit v1.2.3 From f313e12308f7c5ea645f18e759d104d088b18615 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 9 Jan 2009 12:17:43 -0800 Subject: x86: avoid theoretical vmalloc fault loop Ajith Kumar noticed: I was going through the vmalloc fault handling for x86_64 and am unclear about the following lines in the vmalloc_fault() function. pgd = pgd_offset(current->mm ?: &init_mm, address); pgd_ref = pgd_offset_k(address); Here the intention is to get the pgd corresponding to the current process and sync it up with the pgd in init_mm(obtained from pgd_offset_k). However, for kernel threads current->mm is NULL and hence pgd = pgd_offset(init_mm, address) = pgd_ref which means the fault handler returns without setting the pgd entry in the MM structure in the context of which the kernel thread has faulted. This could lead to never-ending faults and busy looping of kernel threads like pdflush. So, shouldn't the pgd = pgd_offset(current->mm ?: &init_mm, address); be pgd = pgd_offset(current->active_mm ?: &init_mm, address); We can use active_mm unconditionally because it should be always set. Signed-off-by: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar --- arch/x86/mm/fault.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9e268b6b204e..90dfae511a41 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -534,7 +534,7 @@ static int vmalloc_fault(unsigned long address) happen within a race in page table update. In the later case just flush. */ - pgd = pgd_offset(current->mm ?: &init_mm, address); + pgd = pgd_offset(current->active_mm, address); pgd_ref = pgd_offset_k(address); if (pgd_none(*pgd_ref)) return -1; -- cgit v1.2.3 From 4884d8e6a05026ec906355436cea9dc1acb1d09e Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 11 Jan 2009 18:38:55 +0530 Subject: x86: fix mpparse.c build error on latest git Fix this by reintroducing asm/smp.h include in mpparse.c - later on I will fix this by removing non-smp data from smp.h. Reported-by: Petr Titera Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/kernel/mpparse.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index c0601c2848a1..a649a4ccad43 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #ifdef CONFIG_X86_32 -- cgit v1.2.3 From 2bc1379712e74c5b99adaa6db433c14d8841ab4f Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 11 Jan 2009 20:34:47 +0530 Subject: x86: fix apic.c build error on latest git Fix this by reintroducing asm/smp.h include in apic.c - later on I will fix this by removing non-smp data from smp.h Also fix the __inquire_remote_apic() prototype/inline. Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mach-default/mach_wakecpu.h | 6 ++++++ arch/x86/kernel/apic.c | 1 + 2 files changed, 7 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h index ceb013660146..89897a6a65b9 100644 --- a/arch/x86/include/asm/mach-default/mach_wakecpu.h +++ b/arch/x86/include/asm/mach-default/mach_wakecpu.h @@ -24,7 +24,13 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) { } +#ifdef CONFIG_SMP extern void __inquire_remote_apic(int apicid); +#else /* CONFIG_SMP */ +static inline void __inquire_remote_apic(int apicid) +{ +} +#endif /* CONFIG_SMP */ static inline void inquire_remote_apic(int apicid) { diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 566a08466b19..0f830e4f5675 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include -- cgit v1.2.3 From 50c668d678fd01284799a6e4f1b91829d83cb9ed Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 12 Jan 2009 10:49:53 +0100 Subject: Revert "cpumask: use work_on_cpu in acpi-cpufreq.c for drv_read and drv_write" This reverts commit 7503bfbae89eba07b46441a5d1594647f6b8ab7d. Dieter Ries reported bootup soft-hangs and bisected it back to this commit, and reverting this commit gave him a working system. The commit introduces work_on_cpu() use into the cpufreq code, but that is subtly problematic from a lock hierarchy POV: the hotplug-cpu lock is an highlevel lock that is taken before lowlevel locks, and in this codepath we are called with the policy lock taken. Dieter did not have lockdep enabled so we dont have a nice stack trace proof for this, but using work_on_cpu() in such a lowlevel place certainly looks wrong, so we revert the patch. work_on_cpu() needs to be reworked to be more generally usable. Reported-by: Dieter Ries Tested-by: Dieter Ries Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 06fcd8f9323c..6f11e029e8c5 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -150,9 +150,8 @@ struct drv_cmd { u32 val; }; -static long do_drv_read(void *_cmd) +static void do_drv_read(struct drv_cmd *cmd) { - struct drv_cmd *cmd = _cmd; u32 h; switch (cmd->type) { @@ -167,12 +166,10 @@ static long do_drv_read(void *_cmd) default: break; } - return 0; } -static long do_drv_write(void *_cmd) +static void do_drv_write(struct drv_cmd *cmd) { - struct drv_cmd *cmd = _cmd; u32 lo, hi; switch (cmd->type) { @@ -189,23 +186,30 @@ static long do_drv_write(void *_cmd) default: break; } - return 0; } static void drv_read(struct drv_cmd *cmd) { + cpumask_t saved_mask = current->cpus_allowed; cmd->val = 0; - work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); + set_cpus_allowed_ptr(current, cmd->mask); + do_drv_read(cmd); + set_cpus_allowed_ptr(current, &saved_mask); } static void drv_write(struct drv_cmd *cmd) { + cpumask_t saved_mask = current->cpus_allowed; unsigned int i; for_each_cpu(i, cmd->mask) { - work_on_cpu(i, do_drv_write, cmd); + set_cpus_allowed_ptr(current, cpumask_of(i)); + do_drv_write(cmd); } + + set_cpus_allowed_ptr(current, &saved_mask); + return; } static u32 get_cur_val(const struct cpumask *mask) @@ -231,15 +235,10 @@ static u32 get_cur_val(const struct cpumask *mask) return 0; } - if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL))) - return 0; - cpumask_copy(cmd.mask, mask); drv_read(&cmd); - free_cpumask_var(cmd.mask); - dprintk("get_cur_val = %u\n", cmd.val); return cmd.val; -- cgit v1.2.3 From e8cea892dff8e3ebed42954c46730309b617196f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 12 Jan 2009 19:36:59 +0100 Subject: Revert "i386: add TRACE_IRQS_OFF for the nmi" This reverts commit e0c7317557c8fc8eacf611e30c2a80f4e24e47a3. This patch was wrong, as lockdep (and thus the irq state tracer) aren't nmi safe. People are already seeing lockdep warnings due to this. Signed-off-by: Ingo Molnar --- arch/x86/kernel/entry_32.S | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index d6f0490a7391..46469029e9d3 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1203,7 +1203,6 @@ nmi_stack_correct: pushl %eax CFI_ADJUST_CFA_OFFSET 4 SAVE_ALL - TRACE_IRQS_OFF xorl %edx,%edx # zero error code movl %esp,%eax # pt_regs pointer call do_nmi @@ -1244,7 +1243,6 @@ nmi_espfix_stack: pushl %eax CFI_ADJUST_CFA_OFFSET 4 SAVE_ALL - TRACE_IRQS_OFF FIXUP_ESPFIX_STACK # %eax == %esp xorl %edx,%edx # zero error code call do_nmi -- cgit v1.2.3 From 4a922a969cb0190ce4580d4b064e2ac35f3ac9bf Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 13 Jan 2009 16:11:00 +0100 Subject: x86, cpufreq: remove leftover copymask_copy() Impact: fix potential boot crash on MAXSMP Remove code left over by: 50c668d: Revert "cpumask: use work_on_cpu in acpi-cpufreq.c for drv_read That cmd.cpumask is not allocated anymore. No impact on default !MAXSMP kernels. Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 6f11e029e8c5..8f3c95c7e61f 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -235,8 +235,6 @@ static u32 get_cur_val(const struct cpumask *mask) return 0; } - cpumask_copy(cmd.mask, mask); - drv_read(&cmd); dprintk("get_cur_val = %u\n", cmd.val); -- cgit v1.2.3 From c8399943bdb70fef78798b97f975506ecc99e039 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Mon, 12 Jan 2009 23:01:15 +0100 Subject: x86, generic: mark complex bitops.h inlines as __always_inline Impact: reduce kernel image size Hugh Dickins noticed that older gcc versions when the kernel is built for code size didn't inline some of the bitops. Mark all complex x86 bitops that have more than a single asm statement or two as always inline to avoid this problem. Probably should be done for other architectures too. Ingo then found a better fix that only requires a single line change, but it unfortunately only works on gcc 4.3. On older gccs the original patch still makes a ~0.3% defconfig difference with CONFIG_OPTIMIZE_INLINING=y. With gcc 4.1 and a defconfig like build: 6116998 1138540 883788 8139326 7c323e vmlinux-oi-with-patch 6137043 1138540 883788 8159371 7c808b vmlinux-optimize-inlining ~20k / 0.3% difference. Signed-off-by: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar --- arch/x86/include/asm/bitops.h | 14 ++++++++++---- include/asm-generic/bitops/__ffs.h | 2 +- include/asm-generic/bitops/__fls.h | 2 +- include/asm-generic/bitops/fls.h | 2 +- include/asm-generic/bitops/fls64.h | 4 ++-- 5 files changed, 15 insertions(+), 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index e02a359d2aa5..02b47a603fc8 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -3,6 +3,9 @@ /* * Copyright 1992, Linus Torvalds. + * + * Note: inlines with more than a single statement should be marked + * __always_inline to avoid problems with older gcc's inlining heuristics. */ #ifndef _LINUX_BITOPS_H @@ -53,7 +56,8 @@ * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void set_bit(unsigned int nr, volatile unsigned long *addr) +static __always_inline void +set_bit(unsigned int nr, volatile unsigned long *addr) { if (IS_IMMEDIATE(nr)) { asm volatile(LOCK_PREFIX "orb %1,%0" @@ -90,7 +94,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr) * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */ -static inline void clear_bit(int nr, volatile unsigned long *addr) +static __always_inline void +clear_bit(int nr, volatile unsigned long *addr) { if (IS_IMMEDIATE(nr)) { asm volatile(LOCK_PREFIX "andb %1,%0" @@ -204,7 +209,8 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr) * * This is the same as test_and_set_bit on x86. */ -static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr) +static __always_inline int +test_and_set_bit_lock(int nr, volatile unsigned long *addr) { return test_and_set_bit(nr, addr); } @@ -300,7 +306,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) return oldbit; } -static inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) +static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) { return ((1UL << (nr % BITS_PER_LONG)) & (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h index 9a3274aecf83..937d7c435575 100644 --- a/include/asm-generic/bitops/__ffs.h +++ b/include/asm-generic/bitops/__ffs.h @@ -9,7 +9,7 @@ * * Undefined if no bit exists, so code should check against 0 first. */ -static inline unsigned long __ffs(unsigned long word) +static __always_inline unsigned long __ffs(unsigned long word) { int num = 0; diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h index be24465403d6..a60a7ccb6782 100644 --- a/include/asm-generic/bitops/__fls.h +++ b/include/asm-generic/bitops/__fls.h @@ -9,7 +9,7 @@ * * Undefined if no set bit exists, so code should check against 0 first. */ -static inline unsigned long __fls(unsigned long word) +static __always_inline unsigned long __fls(unsigned long word) { int num = BITS_PER_LONG - 1; diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h index 850859bc5069..0576d1f42f43 100644 --- a/include/asm-generic/bitops/fls.h +++ b/include/asm-generic/bitops/fls.h @@ -9,7 +9,7 @@ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static inline int fls(int x) +static __always_inline int fls(int x) { int r = 32; diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h index 86d403f8b256..b097cf8444e3 100644 --- a/include/asm-generic/bitops/fls64.h +++ b/include/asm-generic/bitops/fls64.h @@ -15,7 +15,7 @@ * at position 64. */ #if BITS_PER_LONG == 32 -static inline int fls64(__u64 x) +static __always_inline int fls64(__u64 x) { __u32 h = x >> 32; if (h) @@ -23,7 +23,7 @@ static inline int fls64(__u64 x) return fls(x); } #elif BITS_PER_LONG == 64 -static inline int fls64(__u64 x) +static __always_inline int fls64(__u64 x) { if (x == 0) return 0; -- cgit v1.2.3 From afc7d20c8429f32f19d47367fdc36eeed2334ec3 Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Fri, 9 Jan 2009 16:13:10 -0800 Subject: x86 PAT: consolidate old memtype new memtype check into a function Impact: cleanup Move the new memtype old memtype allowed check to header so that is can be shared by other users. Subsequent patch uses this in pat.c in remap_pfn_range() code path. No functionality change in this patch. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pgtable.h | 19 +++++++++++++++++++ arch/x86/pci/i386.c | 12 +----------- 2 files changed, 20 insertions(+), 11 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 83e69f4a37f0..06bbcbd66e9c 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -341,6 +341,25 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) +static inline int is_new_memtype_allowed(unsigned long flags, + unsigned long new_flags) +{ + /* + * Certain new memtypes are not allowed with certain + * requested memtype: + * - request is uncached, return cannot be write-back + * - request is write-combine, return cannot be write-back + */ + if ((flags == _PAGE_CACHE_UC_MINUS && + new_flags == _PAGE_CACHE_WB) || + (flags == _PAGE_CACHE_WC && + new_flags == _PAGE_CACHE_WB)) { + return 0; + } + + return 1; +} + #ifndef __ASSEMBLY__ /* Indicate that x86 has its own track and untrack pfn vma functions */ #define __HAVE_PFNMAP_TRACKING diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index f884740da318..5ead808dd70c 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c @@ -314,17 +314,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, return retval; if (flags != new_flags) { - /* - * Do not fallback to certain memory types with certain - * requested type: - * - request is uncached, return cannot be write-back - * - request is uncached, return cannot be write-combine - * - request is write-combine, return cannot be write-back - */ - if ((flags == _PAGE_CACHE_UC_MINUS && - (new_flags == _PAGE_CACHE_WB)) || - (flags == _PAGE_CACHE_WC && - new_flags == _PAGE_CACHE_WB)) { + if (!is_new_memtype_allowed(flags, new_flags)) { free_memtype(addr, addr+len); return -EINVAL; } -- cgit v1.2.3 From e4b866ed197cef9989348e0479fed8d864ea465b Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Fri, 9 Jan 2009 16:13:11 -0800 Subject: x86 PAT: change track_pfn_vma_new to take pgprot_t pointer param Impact: cleanup Change the protection parameter for track_pfn_vma_new() into a pgprot_t pointer. Subsequent patch changes the x86 PAT handling to return a compatible memtype in pgprot_t, if what was requested cannot be allowed due to conflicts. No fuctionality change in this patch. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- arch/x86/mm/pat.c | 6 +++--- include/asm-generic/pgtable.h | 4 ++-- mm/memory.c | 7 ++++--- 3 files changed, 9 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 85cbd3cd3723..f88ac80530c0 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -741,7 +741,7 @@ cleanup_ret: * Note that this function can be called with caller trying to map only a * subrange/page inside the vma. */ -int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, +int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long size) { int retval = 0; @@ -758,14 +758,14 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, if (is_linear_pfn_mapping(vma)) { /* reserve the whole chunk starting from vm_pgoff */ paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; - return reserve_pfn_range(paddr, vma_size, prot); + return reserve_pfn_range(paddr, vma_size, *prot); } /* reserve page by page using pfn and size */ base_paddr = (resource_size_t)pfn << PAGE_SHIFT; for (i = 0; i < size; i += PAGE_SIZE) { paddr = base_paddr + i; - retval = reserve_pfn_range(paddr, PAGE_SIZE, prot); + retval = reserve_pfn_range(paddr, PAGE_SIZE, *prot); if (retval) goto cleanup_ret; } diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 72ebe91005a8..8e6d0ca70aba 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -301,7 +301,7 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, * track_pfn_vma_new is called when a _new_ pfn mapping is being established * for physical range indicated by pfn and size. */ -static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, +static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long size) { return 0; @@ -332,7 +332,7 @@ static inline void untrack_pfn_vma(struct vm_area_struct *vma, { } #else -extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, +extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long size); extern int track_pfn_vma_copy(struct vm_area_struct *vma); extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, diff --git a/mm/memory.c b/mm/memory.c index d3ee2ea5615c..22bfa7a47a0b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1511,6 +1511,7 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { int ret; + pgprot_t pgprot = vma->vm_page_prot; /* * Technically, architectures with pte_special can avoid all these * restrictions (same for remap_pfn_range). However we would like @@ -1525,10 +1526,10 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, if (addr < vma->vm_start || addr >= vma->vm_end) return -EFAULT; - if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE)) + if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE)) return -EINVAL; - ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot); + ret = insert_pfn(vma, addr, pfn, pgprot); if (ret) untrack_pfn_vma(vma, pfn, PAGE_SIZE); @@ -1671,7 +1672,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; - err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size)); + err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size)); if (err) { /* * To indicate that track_pfn related cleanup is not -- cgit v1.2.3 From cdecff6864a1cd352a41d44a65e7451b8ef5cee2 Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Fri, 9 Jan 2009 16:13:12 -0800 Subject: x86 PAT: return compatible mapping to remap_pfn_range callers Impact: avoid warning message, potentially solve 3D performance regression Change x86 PAT code to return compatible memtype if the exact memtype that was requested in remap_pfn_rage and friends is not available due to some conflict. This is done by returning the compatible type in pgprot parameter of track_pfn_vma_new(), and the caller uses that memtype for page table. Note that track_pfn_vma_copy() which is basically called during fork gets the prot from existing page table and should not have any conflict. Hence we use strict memtype check there and do not allow compatible memtypes. This patch fixes the bug reported here: http://marc.info/?l=linux-kernel&m=123108883716357&w=2 Specifically the error message: X:5010 map pfn expected mapping type write-back for d0000000-d0101000, got write-combining Should go away. Reported-and-bisected-by: Kevin Winchester Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- arch/x86/mm/pat.c | 43 ++++++++++++++++++++++++++++--------------- 1 file changed, 28 insertions(+), 15 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index f88ac80530c0..8b08fb955274 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -601,12 +601,13 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot) * Reserved non RAM regions only and after successful reserve_memtype, * this func also keeps identity mapping (if any) in sync with this new prot. */ -static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot) +static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, + int strict_prot) { int is_ram = 0; int id_sz, ret; unsigned long flags; - unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK); + unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); is_ram = pagerange_is_ram(paddr, paddr + size); @@ -625,15 +626,24 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot) return ret; if (flags != want_flags) { - free_memtype(paddr, paddr + size); - printk(KERN_ERR - "%s:%d map pfn expected mapping type %s for %Lx-%Lx, got %s\n", - current->comm, current->pid, - cattr_name(want_flags), - (unsigned long long)paddr, - (unsigned long long)(paddr + size), - cattr_name(flags)); - return -EINVAL; + if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) { + free_memtype(paddr, paddr + size); + printk(KERN_ERR "%s:%d map pfn expected mapping type %s" + " for %Lx-%Lx, got %s\n", + current->comm, current->pid, + cattr_name(want_flags), + (unsigned long long)paddr, + (unsigned long long)(paddr + size), + cattr_name(flags)); + return -EINVAL; + } + /* + * We allow returning different type than the one requested in + * non strict case. + */ + *vma_prot = __pgprot((pgprot_val(*vma_prot) & + (~_PAGE_CACHE_MASK)) | + flags); } /* Need to keep identity mapping in sync */ @@ -689,6 +699,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma) unsigned long vma_start = vma->vm_start; unsigned long vma_end = vma->vm_end; unsigned long vma_size = vma_end - vma_start; + pgprot_t pgprot; if (!pat_enabled) return 0; @@ -702,7 +713,8 @@ int track_pfn_vma_copy(struct vm_area_struct *vma) WARN_ON_ONCE(1); return -EINVAL; } - return reserve_pfn_range(paddr, vma_size, __pgprot(prot)); + pgprot = __pgprot(prot); + return reserve_pfn_range(paddr, vma_size, &pgprot, 1); } /* reserve entire vma page by page, using pfn and prot from pte */ @@ -710,7 +722,8 @@ int track_pfn_vma_copy(struct vm_area_struct *vma) if (follow_phys(vma, vma_start + i, 0, &prot, &paddr)) continue; - retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot)); + pgprot = __pgprot(prot); + retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1); if (retval) goto cleanup_ret; } @@ -758,14 +771,14 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, if (is_linear_pfn_mapping(vma)) { /* reserve the whole chunk starting from vm_pgoff */ paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; - return reserve_pfn_range(paddr, vma_size, *prot); + return reserve_pfn_range(paddr, vma_size, prot, 0); } /* reserve page by page using pfn and size */ base_paddr = (resource_size_t)pfn << PAGE_SHIFT; for (i = 0; i < size; i += PAGE_SIZE) { paddr = base_paddr + i; - retval = reserve_pfn_range(paddr, PAGE_SIZE, *prot); + retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0); if (retval) goto cleanup_ret; } -- cgit v1.2.3 From 58dab916dfb57328d50deb0aa9b3fc92efa248ff Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Fri, 9 Jan 2009 16:13:14 -0800 Subject: x86 PAT: remove CPA WARN_ON for zero pte Impact: reduce scope of debug check - avoid warnings The logic to find whether identity map exists or not using high_memory or max_low_pfn_mapped/max_pfn_mapped are not complete as the memory withing the range may not be mapped if there is a unusable hole in e820. Specifically, on my test system I started seeing these warnings with tools like hwinfo, acpidump trying to map ACPI region. [ 27.400018] ------------[ cut here ]------------ [ 27.400344] WARNING: at /home/venkip/src/linus/linux-2.6/arch/x86/mm/pageattr.c:560 __change_page_attr_set_clr+0xf3/0x8b8() [ 27.400821] Hardware name: X7DB8 [ 27.401070] CPA: called for zero pte. vaddr = ffff8800cff6a000 cpa->vaddr = ffff8800cff6a000 [ 27.401569] Modules linked in: [ 27.401882] Pid: 4913, comm: dmidecode Not tainted 2.6.28-05716-gfe0bdec #586 [ 27.402141] Call Trace: [ 27.402488] [] warn_slowpath+0xd3/0x10f [ 27.402749] [] ? find_get_page+0xb3/0xc9 [ 27.403028] [] ? find_get_page+0x0/0xc9 [ 27.403333] [] __change_page_attr_set_clr+0xf3/0x8b8 [ 27.403628] [] ? __purge_vmap_area_lazy+0x192/0x1a1 [ 27.403883] [] ? __purge_vmap_area_lazy+0x4b/0x1a1 [ 27.404172] [] ? vm_unmap_aliases+0x1ab/0x1bb [ 27.404512] [] ? vm_unmap_aliases+0x48/0x1bb [ 27.404766] [] change_page_attr_set_clr+0x13e/0x2e6 [ 27.405026] [] ? _spin_unlock+0x26/0x2a [ 27.405292] [] ? reserve_memtype+0x19b/0x4e3 [ 27.405590] [] _set_memory_wb+0x22/0x24 [ 27.405844] [] ioremap_change_attr+0x26/0x28 [ 27.406097] [] reserve_pfn_range+0x1a3/0x235 [ 27.406427] [] track_pfn_vma_new+0x49/0xb3 [ 27.406686] [] remap_pfn_range+0x94/0x32c [ 27.406940] [] ? phys_mem_access_prot_allowed+0xb5/0x1a8 [ 27.407209] [] mmap_mem+0x75/0x9d [ 27.407523] [] mmap_region+0x2cf/0x53e [ 27.407776] [] do_mmap_pgoff+0x2a9/0x30d [ 27.408034] [] sys_mmap+0x92/0xce [ 27.408339] [] system_call_fastpath+0x16/0x1b [ 27.408614] ---[ end trace 4b16ad70c09a602d ]--- [ 27.408871] dmidecode:4913 reserve_pfn_range ioremap_change_attr failed write-back for cff6a000-cff6b000 This is wih track_pfn_vma_new trying to keep identity map in sync. The address cff6a000 is the ACPI region according to e820. [ 0.000000] BIOS-provided physical RAM map: [ 0.000000] BIOS-e820: 0000000000000000 - 000000000009c000 (usable) [ 0.000000] BIOS-e820: 000000000009c000 - 00000000000a0000 (reserved) [ 0.000000] BIOS-e820: 00000000000cc000 - 00000000000d0000 (reserved) [ 0.000000] BIOS-e820: 00000000000e4000 - 0000000000100000 (reserved) [ 0.000000] BIOS-e820: 0000000000100000 - 00000000cff60000 (usable) [ 0.000000] BIOS-e820: 00000000cff60000 - 00000000cff69000 (ACPI data) [ 0.000000] BIOS-e820: 00000000cff69000 - 00000000cff80000 (ACPI NVS) [ 0.000000] BIOS-e820: 00000000cff80000 - 00000000d0000000 (reserved) [ 0.000000] BIOS-e820: 00000000e0000000 - 00000000f0000000 (reserved) [ 0.000000] BIOS-e820: 00000000fec00000 - 00000000fec10000 (reserved) [ 0.000000] BIOS-e820: 00000000fee00000 - 00000000fee01000 (reserved) [ 0.000000] BIOS-e820: 00000000ff000000 - 0000000100000000 (reserved) [ 0.000000] BIOS-e820: 0000000100000000 - 0000000230000000 (usable) And is not mapped as per init_memory_mapping. [ 0.000000] init_memory_mapping: 0000000000000000-00000000cff60000 [ 0.000000] init_memory_mapping: 0000000100000000-0000000230000000 We can add logic to check for this. But, there can also be other holes in identity map when we have 1GB of aligned reserved space in e820. This patch handles it by removing the WARN_ON and returning a specific error value (EFAULT) to indicate that the address does not have any identity mapping. The code that tries to keep identity map in sync can ignore this error, with other callers of cpa still getting error here. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Signed-off-by: Ingo Molnar --- arch/x86/mm/pageattr.c | 10 ++++++---- arch/x86/mm/pat.c | 45 ++++++++++++++++++++++++++++++++------------- 2 files changed, 38 insertions(+), 17 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index e89d24815f26..4cf30dee8161 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -555,10 +555,12 @@ repeat: if (!pte_val(old_pte)) { if (!primary) return 0; - WARN(1, KERN_WARNING "CPA: called for zero pte. " - "vaddr = %lx cpa->vaddr = %lx\n", address, - *cpa->vaddr); - return -EINVAL; + + /* + * Special error value returned, indicating that the mapping + * did not exist at this address. + */ + return -EFAULT; } if (level == PG_LEVEL_4K) { diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 8b08fb955274..160c42d3eb8f 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -505,6 +505,35 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) } #endif /* CONFIG_STRICT_DEVMEM */ +/* + * Change the memory type for the physial address range in kernel identity + * mapping space if that range is a part of identity map. + */ +static int kernel_map_sync_memtype(u64 base, unsigned long size, + unsigned long flags) +{ + unsigned long id_sz; + int ret; + + if (!pat_enabled || base >= __pa(high_memory)) + return 0; + + id_sz = (__pa(high_memory) < base + size) ? + __pa(high_memory) - base : + size; + + ret = ioremap_change_attr((unsigned long)__va(base), id_sz, flags); + /* + * -EFAULT return means that the addr was not valid and did not have + * any identity mapping. That case is a success for + * kernel_map_sync_memtype. + */ + if (ret == -EFAULT) + ret = 0; + + return ret; +} + int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot) { @@ -555,9 +584,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, if (retval < 0) return 0; - if (((pfn < max_low_pfn_mapped) || - (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) && - ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) { + if (kernel_map_sync_memtype(offset, size, flags)) { free_memtype(offset, offset + size); printk(KERN_INFO "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n", @@ -605,7 +632,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, int strict_prot) { int is_ram = 0; - int id_sz, ret; + int ret; unsigned long flags; unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); @@ -646,15 +673,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, flags); } - /* Need to keep identity mapping in sync */ - if (paddr >= __pa(high_memory)) - return 0; - - id_sz = (__pa(high_memory) < paddr + size) ? - __pa(high_memory) - paddr : - size; - - if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) { + if (kernel_map_sync_memtype(paddr, size, flags)) { free_memtype(paddr, paddr + size); printk(KERN_ERR "%s:%d reserve_pfn_range ioremap_change_attr failed %s " -- cgit v1.2.3 From e55380edf68796d75bf41391a781c68ee678587d Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 14 Jan 2009 14:13:55 +0100 Subject: [CVE-2009-0029] Rename old_readdir to sys_old_readdir This way it matches the generic system call name convention. Signed-off-by: Heiko Carstens --- arch/arm/kernel/calls.S | 2 +- arch/cris/arch-v10/kernel/entry.S | 2 +- arch/cris/arch-v32/kernel/entry.S | 2 +- arch/h8300/kernel/syscalls.S | 2 +- arch/m68k/kernel/entry.S | 2 +- arch/m68knommu/kernel/syscalltable.S | 2 +- arch/mips/kernel/scall32-o32.S | 2 +- arch/mn10300/kernel/entry.S | 2 +- arch/powerpc/include/asm/systbl.h | 2 +- arch/sh/kernel/syscalls_32.S | 2 +- arch/sh/kernel/syscalls_64.S | 2 +- arch/sparc/kernel/systbls_32.S | 2 +- arch/x86/kernel/syscall_table_32.S | 2 +- fs/readdir.c | 2 +- include/linux/syscalls.h | 2 ++ 15 files changed, 16 insertions(+), 14 deletions(-) (limited to 'arch/x86') diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 09a061cb7838..9ca8d13f05f7 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -98,7 +98,7 @@ CALL(sys_uselib) CALL(sys_swapon) CALL(sys_reboot) - CALL(OBSOLETE(old_readdir)) /* used by libc4 */ + CALL(OBSOLETE(sys_old_readdir)) /* used by libc4 */ /* 90 */ CALL(OBSOLETE(old_mmap)) /* used by libc4 */ CALL(sys_munmap) CALL(sys_truncate) diff --git a/arch/cris/arch-v10/kernel/entry.S b/arch/cris/arch-v10/kernel/entry.S index ed171d389e65..72f5cd319b97 100644 --- a/arch/cris/arch-v10/kernel/entry.S +++ b/arch/cris/arch-v10/kernel/entry.S @@ -691,7 +691,7 @@ sys_call_table: .long sys_uselib .long sys_swapon .long sys_reboot - .long old_readdir + .long sys_old_readdir .long old_mmap /* 90 */ .long sys_munmap .long sys_truncate diff --git a/arch/cris/arch-v32/kernel/entry.S b/arch/cris/arch-v32/kernel/entry.S index 7f6f93e6b70e..5e674c8f7c51 100644 --- a/arch/cris/arch-v32/kernel/entry.S +++ b/arch/cris/arch-v32/kernel/entry.S @@ -614,7 +614,7 @@ sys_call_table: .long sys_uselib .long sys_swapon .long sys_reboot - .long old_readdir + .long sys_old_readdir .long old_mmap /* 90 */ .long sys_munmap .long sys_truncate diff --git a/arch/h8300/kernel/syscalls.S b/arch/h8300/kernel/syscalls.S index 54e21c3f2057..4eb67faac633 100644 --- a/arch/h8300/kernel/syscalls.S +++ b/arch/h8300/kernel/syscalls.S @@ -103,7 +103,7 @@ SYMBOL_NAME_LABEL(sys_call_table) .long SYMBOL_NAME(sys_uselib) .long SYMBOL_NAME(sys_swapon) .long SYMBOL_NAME(sys_reboot) - .long SYMBOL_NAME(old_readdir) + .long SYMBOL_NAME(sys_old_readdir) .long SYMBOL_NAME(old_mmap) /* 90 */ .long SYMBOL_NAME(sys_munmap) .long SYMBOL_NAME(sys_truncate) diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S index 5b780826647c..5c332f2b9b83 100644 --- a/arch/m68k/kernel/entry.S +++ b/arch/m68k/kernel/entry.S @@ -513,7 +513,7 @@ sys_call_table: .long sys_uselib .long sys_swapon .long sys_reboot - .long old_readdir + .long sys_old_readdir .long old_mmap /* 90 */ .long sys_munmap .long sys_truncate diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S index 812f8d8b7a85..5c3e3f62194a 100644 --- a/arch/m68knommu/kernel/syscalltable.S +++ b/arch/m68knommu/kernel/syscalltable.S @@ -107,7 +107,7 @@ ENTRY(sys_call_table) .long sys_uselib .long sys_ni_syscall /* sys_swapon */ .long sys_reboot - .long old_readdir + .long sys_old_readdir .long old_mmap /* 90 */ .long sys_munmap .long sys_truncate diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index d0916a55cd77..51d1ba415b90 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -398,7 +398,7 @@ einval: li v0, -ENOSYS sys sys_uselib 1 sys sys_swapon 2 sys sys_reboot 3 - sys old_readdir 3 + sys sys_old_readdir 3 sys old_mmap 6 /* 4090 */ sys sys_munmap 2 sys sys_truncate 2 diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S index 62fba8aa9b6e..ceeaaaa359e2 100644 --- a/arch/mn10300/kernel/entry.S +++ b/arch/mn10300/kernel/entry.S @@ -478,7 +478,7 @@ ENTRY(sys_call_table) .long sys_uselib .long sys_swapon .long sys_reboot - .long old_readdir + .long sys_old_readdir .long old_mmap /* 90 */ .long sys_munmap .long sys_truncate diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 803def236654..72353f6070a4 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -92,7 +92,7 @@ COMPAT_SYS_SPU(readlink) SYSCALL(uselib) SYSCALL(swapon) SYSCALL(reboot) -SYSX(sys_ni_syscall,compat_sys_old_readdir,old_readdir) +SYSX(sys_ni_syscall,compat_sys_old_readdir,sys_old_readdir) SYSCALL_SPU(mmap) SYSCALL_SPU(munmap) SYSCALL_SPU(truncate) diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S index 0af693e65764..a87ce076cfa2 100644 --- a/arch/sh/kernel/syscalls_32.S +++ b/arch/sh/kernel/syscalls_32.S @@ -105,7 +105,7 @@ ENTRY(sys_call_table) .long sys_uselib .long sys_swapon .long sys_reboot - .long old_readdir + .long sys_old_readdir .long old_mmap /* 90 */ .long sys_munmap .long sys_truncate diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S index 0b436aa3cad7..557cb91f5caf 100644 --- a/arch/sh/kernel/syscalls_64.S +++ b/arch/sh/kernel/syscalls_64.S @@ -109,7 +109,7 @@ sys_call_table: .long sys_uselib .long sys_swapon .long sys_reboot - .long old_readdir + .long sys_old_readdir .long old_mmap /* 90 */ .long sys_munmap .long sys_truncate diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index 7d0807586442..8a434f51ba0f 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S @@ -56,7 +56,7 @@ sys_call_table: /*185*/ .long sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sys_newuname /*190*/ .long sys_init_module, sys_personality, sparc_remap_file_pages, sys_epoll_create, sys_epoll_ctl /*195*/ .long sys_epoll_wait, sys_ioprio_set, sys_getppid, sparc_sigaction, sys_sgetmask -/*200*/ .long sys_ssetmask, sys_sigsuspend, sys_newlstat, sys_uselib, old_readdir +/*200*/ .long sys_ssetmask, sys_sigsuspend, sys_newlstat, sys_uselib, sys_old_readdir /*205*/ .long sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64 /*210*/ .long sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo /*215*/ .long sys_ipc, sys_sigreturn, sys_clone, sys_ioprio_get, sys_adjtimex diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index d44395ff34c3..e2e86a08f31d 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S @@ -88,7 +88,7 @@ ENTRY(sys_call_table) .long sys_uselib .long sys_swapon .long sys_reboot - .long old_readdir + .long sys_old_readdir .long old_mmap /* 90 */ .long sys_munmap .long sys_truncate diff --git a/fs/readdir.c b/fs/readdir.c index b318d9b5af2e..8b4c2a0051a6 100644 --- a/fs/readdir.c +++ b/fs/readdir.c @@ -102,7 +102,7 @@ efault: return -EFAULT; } -asmlinkage long old_readdir(unsigned int fd, struct old_linux_dirent __user * dirent, unsigned int count) +asmlinkage long sys_old_readdir(unsigned int fd, struct old_linux_dirent __user * dirent, unsigned int count) { int error; struct file * file; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 22290eeaf553..ca079c3d09e3 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -54,6 +54,7 @@ struct compat_stat; struct compat_timeval; struct robust_list_head; struct getcpu_cache; +struct old_linux_dirent; #include #include @@ -608,6 +609,7 @@ asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr); asmlinkage long sys_eventfd(unsigned int count); asmlinkage long sys_eventfd2(unsigned int count, int flags); asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len); +asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int); int kernel_execve(const char *filename, char *const argv[], char *const envp[]); -- cgit v1.2.3 From 5cca0cf15a94417f49625ce52e23589eed0a1675 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Fri, 9 Jan 2009 14:35:20 -0800 Subject: x86, pat: fix reserve_memtype() for legacy 1MB range Thierry Vignaud reported: > http://bugzilla.kernel.org/show_bug.cgi?id=12372 > > On P4 with an SiS motherboard (video card is a SiS 651) > X server fails to start with error: > xf86MapVidMem: Could not mmap framebuffer (0x00000000,0x2000) (Invalid > argument) Here X is trying to map first 8KB of memory using /dev/mem. Existing code treats first 0-4KB of memory as non-RAM and 4KB-8KB as RAM. Recent code changes don't allow to map memory with different attributes at the same time. Fix this by treating the first 1MB legacy region as special and always track the attribute requests with in this region using linear linked list (and don't bother if the range is RAM or non-RAM or mixed) Reported-and-tested-by: Thierry Vignaud Signed-off-by: Suresh Siddha Signed-off-by: Venkatesh Pallipadi Signed-off-by: Ingo Molnar --- arch/x86/mm/pat.c | 37 +++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 85cbd3cd3723..070ee4a3b225 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -333,11 +333,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, req_type & _PAGE_CACHE_MASK); } - is_range_ram = pagerange_is_ram(start, end); - if (is_range_ram == 1) - return reserve_ram_pages_type(start, end, req_type, new_type); - else if (is_range_ram < 0) - return -EINVAL; + /* + * For legacy reasons, some parts of the physical address range in the + * legacy 1MB region is treated as non-RAM (even when listed as RAM in + * the e820 tables). So we will track the memory attributes of this + * legacy 1MB region using the linear memtype_list always. + */ + if (end >= ISA_END_ADDRESS) { + is_range_ram = pagerange_is_ram(start, end); + if (is_range_ram == 1) + return reserve_ram_pages_type(start, end, req_type, + new_type); + else if (is_range_ram < 0) + return -EINVAL; + } new = kmalloc(sizeof(struct memtype), GFP_KERNEL); if (!new) @@ -437,11 +446,19 @@ int free_memtype(u64 start, u64 end) if (is_ISA_range(start, end - 1)) return 0; - is_range_ram = pagerange_is_ram(start, end); - if (is_range_ram == 1) - return free_ram_pages_type(start, end); - else if (is_range_ram < 0) - return -EINVAL; + /* + * For legacy reasons, some parts of the physical address range in the + * legacy 1MB region is treated as non-RAM (even when listed as RAM in + * the e820 tables). So we will track the memory attributes of this + * legacy 1MB region using the linear memtype_list always. + */ + if (end >= ISA_END_ADDRESS) { + is_range_ram = pagerange_is_ram(start, end); + if (is_range_ram == 1) + return free_ram_pages_type(start, end); + else if (is_range_ram < 0) + return -EINVAL; + } spin_lock(&memtype_lock); list_for_each_entry(entry, &memtype_list, nd) { -- cgit v1.2.3 From 2ea038917bbdd51a7ae4a898c6a04641324dd033 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Wed, 14 Jan 2009 21:38:20 +0100 Subject: Revert "kbuild: strip generated symbols from *.ko" This reverts commit ad7a953c522ceb496611d127e51e278bfe0ff483. And commit: ("allow stripping of generated symbols under CONFIG_KALLSYMS_ALL") 9bb482476c6c9d1ae033306440c51ceac93ea80c These stripping patches has caused a set of issues: 1) People have reported compatibility issues with binutils due to lack of support for `--strip-unneeded-symbols' with objcopy 2.15.92.0.2 Reported by: Wenji 2) ccache and distcc no longer works as expeced Reported by: Ted, Roland, + others 3) The installed modules increased a lot in size Reported by: Ted, Davej + others Reported-by: Wenji Huang Reported-by: "Theodore Ts'o" Reported-by: Dave Jones Reported-by: Roland McGrath Signed-off-by: Sam Ravnborg --- Makefile | 59 ++++------- arch/x86/scripts/strip-symbols | 1 - init/Kconfig | 7 -- kernel/kallsyms.c | 16 +-- scripts/Makefile.build | 55 ++++------- scripts/Makefile.modinst | 3 +- scripts/genksyms/genksyms.c | 21 ++-- scripts/genksyms/keywords.c_shipped | 189 ++++++++++++++++++------------------ scripts/genksyms/keywords.gperf | 2 - scripts/kallsyms.c | 21 ++-- scripts/mksysmap | 7 +- scripts/strip-symbols | 22 ----- 12 files changed, 166 insertions(+), 237 deletions(-) delete mode 100644 arch/x86/scripts/strip-symbols delete mode 100644 scripts/strip-symbols (limited to 'arch/x86') diff --git a/Makefile b/Makefile index c06e250eca18..c2c4bbeef59d 100644 --- a/Makefile +++ b/Makefile @@ -606,25 +606,20 @@ export INSTALL_PATH ?= /boot MODLIB = $(INSTALL_MOD_PATH)/lib/modules/$(KERNELRELEASE) export MODLIB -strip-symbols := $(srctree)/scripts/strip-symbols \ - $(wildcard $(srctree)/arch/$(ARCH)/scripts/strip-symbols) - # -# INSTALL_MOD_STRIP, if defined, will cause modules to be stripped while -# they get installed. If INSTALL_MOD_STRIP is '1', then the default -# options (see below) will be used. Otherwise, INSTALL_MOD_STRIP will -# be used as the option(s) to the objcopy command. +# INSTALL_MOD_STRIP, if defined, will cause modules to be +# stripped after they are installed. If INSTALL_MOD_STRIP is '1', then +# the default option --strip-debug will be used. Otherwise, +# INSTALL_MOD_STRIP will used as the options to the strip command. + ifdef INSTALL_MOD_STRIP ifeq ($(INSTALL_MOD_STRIP),1) -mod_strip_cmd = $(OBJCOPY) --strip-debug -ifeq ($(CONFIG_KALLSYMS_ALL),$(CONFIG_KALLSYMS_STRIP_GENERATED)) -mod_strip_cmd += --wildcard $(addprefix --strip-symbols ,$(strip-symbols)) -endif +mod_strip_cmd = $(STRIP) --strip-debug else -mod_strip_cmd = $(OBJCOPY) $(INSTALL_MOD_STRIP) +mod_strip_cmd = $(STRIP) $(INSTALL_MOD_STRIP) endif # INSTALL_MOD_STRIP=1 else -mod_strip_cmd = false +mod_strip_cmd = true endif # INSTALL_MOD_STRIP export mod_strip_cmd @@ -754,7 +749,6 @@ last_kallsyms := 2 endif kallsyms.o := .tmp_kallsyms$(last_kallsyms).o -kallsyms.h := $(wildcard include/config/kallsyms/*.h) $(wildcard include/config/kallsyms/*/*.h) define verify_kallsyms $(Q)$(if $($(quiet)cmd_sysmap), \ @@ -779,41 +773,24 @@ endef # Generate .S file with all kernel symbols quiet_cmd_kallsyms = KSYM $@ - cmd_kallsyms = { test $* -eq 0 || $(NM) -n $<; } \ - | $(KALLSYMS) $(if $(CONFIG_KALLSYMS_ALL),--all-symbols) >$@ - -quiet_cmd_kstrip = STRIP $@ - cmd_kstrip = $(OBJCOPY) --wildcard $(addprefix --strip$(if $(CONFIG_RELOCATABLE),-unneeded)-symbols ,$(filter %/scripts/strip-symbols,$^)) $< $@ + cmd_kallsyms = $(NM) -n $< | $(KALLSYMS) \ + $(if $(CONFIG_KALLSYMS_ALL),--all-symbols) > $@ -$(foreach n,0 1 2 3,.tmp_kallsyms$(n).o): KBUILD_AFLAGS += -Wa,--strip-local-absolute -$(foreach n,0 1 2 3,.tmp_kallsyms$(n).o): %.o: %.S scripts FORCE +.tmp_kallsyms1.o .tmp_kallsyms2.o .tmp_kallsyms3.o: %.o: %.S scripts FORCE $(call if_changed_dep,as_o_S) -ifeq ($(CONFIG_KALLSYMS_STRIP_GENERATED),y) -strip-ext := .stripped -endif - -.tmp_kallsyms%.S: .tmp_vmlinux%$(strip-ext) $(KALLSYMS) $(kallsyms.h) +.tmp_kallsyms%.S: .tmp_vmlinux% $(KALLSYMS) $(call cmd,kallsyms) -# make -jN seems to have problems with intermediate files, see bug #3330. -.SECONDARY: $(foreach n,1 2 3,.tmp_vmlinux$(n).stripped) -.tmp_vmlinux%.stripped: .tmp_vmlinux% $(strip-symbols) $(kallsyms.h) - $(call cmd,kstrip) - -ifneq ($(CONFIG_DEBUG_INFO),y) -.tmp_vmlinux%: LDFLAGS_vmlinux += -S -endif # .tmp_vmlinux1 must be complete except kallsyms, so update vmlinux version -.tmp_vmlinux%: $(vmlinux-lds) $(vmlinux-all) FORCE - $(if $(filter 1,$*),$(call if_changed_rule,ksym_ld),$(call if_changed,vmlinux__)) +.tmp_vmlinux1: $(vmlinux-lds) $(vmlinux-all) FORCE + $(call if_changed_rule,ksym_ld) -.tmp_vmlinux0$(strip-ext): - $(Q)echo "placeholder" >$@ +.tmp_vmlinux2: $(vmlinux-lds) $(vmlinux-all) .tmp_kallsyms1.o FORCE + $(call if_changed,vmlinux__) -.tmp_vmlinux1: .tmp_kallsyms0.o -.tmp_vmlinux2: .tmp_kallsyms1.o -.tmp_vmlinux3: .tmp_kallsyms2.o +.tmp_vmlinux3: $(vmlinux-lds) $(vmlinux-all) .tmp_kallsyms2.o FORCE + $(call if_changed,vmlinux__) # Needs to visit scripts/ before $(KALLSYMS) can be used. $(KALLSYMS): scripts ; diff --git a/arch/x86/scripts/strip-symbols b/arch/x86/scripts/strip-symbols deleted file mode 100644 index a2f1ccb827c7..000000000000 --- a/arch/x86/scripts/strip-symbols +++ /dev/null @@ -1 +0,0 @@ -__cpu_vendor_dev_X86_VENDOR_* diff --git a/init/Kconfig b/init/Kconfig index a724a149bf3f..0e9924743a17 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -626,13 +626,6 @@ config KALLSYMS_ALL Say N. -config KALLSYMS_STRIP_GENERATED - bool "Strip machine generated symbols from kallsyms" - depends on KALLSYMS_ALL - default y - help - Say N if you want kallsyms to retain even machine generated symbols. - config KALLSYMS_EXTRA_PASS bool "Do an extra kallsyms pass" depends on KALLSYMS diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index e694afa0eb8c..7b8b0f21a5b1 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -30,19 +30,20 @@ #define all_var 0 #endif -extern const unsigned long kallsyms_addresses[]; -extern const u8 kallsyms_names[]; +/* These will be re-linked against their real values during the second link stage */ +extern const unsigned long kallsyms_addresses[] __attribute__((weak)); +extern const u8 kallsyms_names[] __attribute__((weak)); /* tell the compiler that the count isn't in the small data section if the arch * has one (eg: FRV) */ extern const unsigned long kallsyms_num_syms - __attribute__((__section__(".rodata"))); +__attribute__((weak, section(".rodata"))); -extern const u8 kallsyms_token_table[]; -extern const u16 kallsyms_token_index[]; +extern const u8 kallsyms_token_table[] __attribute__((weak)); +extern const u16 kallsyms_token_index[] __attribute__((weak)); -extern const unsigned long kallsyms_markers[]; +extern const unsigned long kallsyms_markers[] __attribute__((weak)); static inline int is_kernel_inittext(unsigned long addr) { @@ -167,6 +168,9 @@ static unsigned long get_symbol_pos(unsigned long addr, unsigned long symbol_start = 0, symbol_end = 0; unsigned long i, low, high, mid; + /* This kernel should never had been booted. */ + BUG_ON(!kallsyms_addresses); + /* do a binary search on the sorted kallsyms_addresses array */ low = 0; high = kallsyms_num_syms; diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 5d900307de3e..c7de8b39fcf1 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -151,16 +151,16 @@ cmd_cc_i_c = $(CPP) $(c_flags) -o $@ $< $(obj)/%.i: $(src)/%.c FORCE $(call if_changed_dep,cc_i_c) -cmd_genksyms = \ +cmd_gensymtypes = \ $(CPP) -D__GENKSYMS__ $(c_flags) $< | \ - $(GENKSYMS) -T $@ -A -a $(ARCH) \ + $(GENKSYMS) -T $@ -a $(ARCH) \ $(if $(KBUILD_PRESERVE),-p) \ $(if $(1),-r $(firstword $(wildcard $(@:.symtypes=.symref) /dev/null))) quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@ cmd_cc_symtypes_c = \ set -e; \ - $(call cmd_genksyms, true) >/dev/null; \ + $(call cmd_gensymtypes, true) >/dev/null; \ test -s $@ || rm -f $@ $(obj)/%.symtypes : $(src)/%.c FORCE @@ -177,38 +177,28 @@ cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $< else # When module versioning is enabled the following steps are executed: -# o compile a .tmp_.s from .c -# o if .tmp_.s doesn't contain a __ksymtab version, i.e. does -# not export symbols, we just assemble .tmp_.s to .o and +# o compile a .tmp_.o from .c +# o if .tmp_.o doesn't contain a __ksymtab version, i.e. does +# not export symbols, we just rename .tmp_.o to .o and # are done. # o otherwise, we calculate symbol versions using the good old # genksyms on the preprocessed source and postprocess them in a way -# that they are usable as assembly source -# o assemble .o from .tmp_.s forcing inclusion of directives -# defining the actual values of __crc_*, followed by objcopy-ing them -# to force these symbols to be local to permit stripping them later. -s_file = $(@D)/.tmp_$(@F:.o=.s) -v_file = $(@D)/.tmp_$(@F:.o=.v) -tmp_o_file = $(@D)/.tmp_$(@F) -no_g_c_flags = $(filter-out -g%,$(c_flags)) - -cmd_cc_o_c = $(CC) $(c_flags) -S -o $(s_file) $< +# that they are usable as a linker script +# o generate .o from .tmp_.o using the linker to +# replace the unresolved symbols __crc_exported_symbol with +# the actual value of the checksum generated by genksyms +cmd_cc_o_c = $(CC) $(c_flags) -c -o $(@D)/.tmp_$(@F) $< cmd_modversions = \ - if grep -q __ksymtab $(s_file); then \ - if $(call cmd_genksyms, $(KBUILD_SYMTYPES)) > $(v_file) \ - && $(CC) $(no_g_c_flags) -c -Wa,$(v_file) \ - -o $(tmp_o_file) $(s_file) \ - && $(OBJCOPY) -L '__crc_*' -L '___crc_*' -w \ - $(tmp_o_file) $@; \ - then \ - : ; \ - else \ - rm -f $@; exit 1; \ - fi; \ + if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \ + $(call cmd_gensymtypes, $(KBUILD_SYMTYPES)) \ + > $(@D)/.tmp_$(@F:.o=.ver); \ + \ + $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \ + -T $(@D)/.tmp_$(@F:.o=.ver); \ + rm -f $(@D)/.tmp_$(@F) $(@D)/.tmp_$(@F:.o=.ver); \ else \ - rm -f $(v_file); \ - $(CC) $(no_g_c_flags) -c -o $@ $(s_file); \ + mv -f $(@D)/.tmp_$(@F) $@; \ fi; endif @@ -225,12 +215,7 @@ define rule_cc_o_c $(cmd_record_mcount) \ scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,cc_o_c)' > \ $(dot-target).tmp; \ - if [ -r $(@D)/.tmp_$(@F:.o=.v) ]; then \ - echo >> $(dot-target).tmp; \ - echo '$@: $(GENKSYMS)' >> $(dot-target).tmp; \ - echo '$(GENKSYMS):: ;' >> $(dot-target).tmp; \ - fi; \ - rm -f $(depfile) $(@D)/.tmp_$(@F:.o=.?); \ + rm -f $(depfile); \ mv -f $(dot-target).tmp $(dot-target).cmd endef diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst index a5122dce1264..efa5d940e632 100644 --- a/scripts/Makefile.modinst +++ b/scripts/Makefile.modinst @@ -17,8 +17,7 @@ __modinst: $(modules) @: quiet_cmd_modules_install = INSTALL $@ - cmd_modules_install = mkdir -p $(2); \ - $(mod_strip_cmd) $@ $(2)/$(notdir $@) || cp $@ $(2) + cmd_modules_install = mkdir -p $(2); cp $@ $(2) ; $(mod_strip_cmd) $(2)/$(notdir $@) # Modules built outside the kernel source tree go into extra by default INSTALL_MOD_DIR ?= extra diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c index f8bb4cabd62d..3a8297b5184c 100644 --- a/scripts/genksyms/genksyms.c +++ b/scripts/genksyms/genksyms.c @@ -43,7 +43,7 @@ int cur_line = 1; char *cur_filename; static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types, - flag_preserve, flag_warnings, flag_asm; + flag_preserve, flag_warnings; static const char *arch = ""; static const char *mod_prefix = ""; @@ -610,11 +610,8 @@ void export_symbol(const char *name) if (flag_dump_defs) fputs(">\n", debugfile); - /* Used as assembly source or a linker script. */ - printf(flag_asm - ? ".equiv %s__crc_%s, %#08lx\n" - : "%s__crc_%s = %#08lx ;\n", - mod_prefix, name, crc); + /* Used as a linker script. */ + printf("%s__crc_%s = 0x%08lx ;\n", mod_prefix, name, crc); } } @@ -651,10 +648,9 @@ void error_with_pos(const char *fmt, ...) static void genksyms_usage(void) { - fputs("Usage:\n" "genksyms [-aAdDTwqhV] > /path/to/.tmp_obj.ver\n" "\n" + fputs("Usage:\n" "genksyms [-adDTwqhV] > /path/to/.tmp_obj.ver\n" "\n" #ifdef __GNU_LIBRARY__ " -a, --arch Select architecture\n" - " -A, --asm Generate assembly rather than linker script\n" " -d, --debug Increment the debug level (repeatable)\n" " -D, --dump Dump expanded symbol defs (for debugging only)\n" " -r, --reference file Read reference symbols from a file\n" @@ -666,7 +662,6 @@ static void genksyms_usage(void) " -V, --version Print the release version\n" #else /* __GNU_LIBRARY__ */ " -a Select architecture\n" - " -A Generate assembly rather than linker script\n" " -d Increment the debug level (repeatable)\n" " -D Dump expanded symbol defs (for debugging only)\n" " -r file Read reference symbols from a file\n" @@ -688,7 +683,6 @@ int main(int argc, char **argv) #ifdef __GNU_LIBRARY__ struct option long_opts[] = { {"arch", 1, 0, 'a'}, - {"asm", 0, 0, 'A'}, {"debug", 0, 0, 'd'}, {"warnings", 0, 0, 'w'}, {"quiet", 0, 0, 'q'}, @@ -701,10 +695,10 @@ int main(int argc, char **argv) {0, 0, 0, 0} }; - while ((o = getopt_long(argc, argv, "a:dwqVADr:T:ph", + while ((o = getopt_long(argc, argv, "a:dwqVDr:T:ph", &long_opts[0], NULL)) != EOF) #else /* __GNU_LIBRARY__ */ - while ((o = getopt(argc, argv, "a:dwqVADr:T:ph")) != EOF) + while ((o = getopt(argc, argv, "a:dwqVDr:T:ph")) != EOF) #endif /* __GNU_LIBRARY__ */ switch (o) { case 'a': @@ -722,9 +716,6 @@ int main(int argc, char **argv) case 'V': fputs("genksyms version 2.5.60\n", stderr); break; - case 'A': - flag_asm = 1; - break; case 'D': flag_dump_defs = 1; break; diff --git a/scripts/genksyms/keywords.c_shipped b/scripts/genksyms/keywords.c_shipped index 83484fe93ede..971e0113ae7a 100644 --- a/scripts/genksyms/keywords.c_shipped +++ b/scripts/genksyms/keywords.c_shipped @@ -1,4 +1,4 @@ -/* ANSI-C code produced by gperf version 3.0.1 */ +/* ANSI-C code produced by gperf version 3.0.2 */ /* Command-line: gperf -L ANSI-C -a -C -E -g -H is_reserved_hash -k '1,3,$' -N is_reserved_word -p -t scripts/genksyms/keywords.gperf */ #if !((' ' == 32) && ('!' == 33) && ('"' == 34) && ('#' == 35) \ @@ -32,7 +32,7 @@ #line 3 "scripts/genksyms/keywords.gperf" struct resword { const char *name; int token; }; -/* maximum key range = 64, duplicates = 0 */ +/* maximum key range = 62, duplicates = 0 */ #ifdef __GNUC__ __inline @@ -46,32 +46,32 @@ is_reserved_hash (register const char *str, register unsigned int len) { static const unsigned char asso_values[] = { - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 0, - 67, 67, 67, 67, 67, 67, 15, 67, 67, 67, - 0, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 0, 67, 0, 67, 5, - 25, 20, 15, 30, 67, 15, 67, 67, 10, 0, - 10, 40, 20, 67, 10, 5, 0, 10, 15, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, - 67, 67, 67, 67, 67, 67 + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 5, + 65, 65, 65, 65, 65, 65, 35, 65, 65, 65, + 0, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 0, 65, 0, 65, 5, + 20, 15, 10, 30, 65, 15, 65, 65, 20, 0, + 10, 35, 20, 65, 10, 5, 0, 10, 5, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65 }; return len + asso_values[(unsigned char)str[2]] + asso_values[(unsigned char)str[0]] + asso_values[(unsigned char)str[len - 1]]; } @@ -84,119 +84,116 @@ is_reserved_word (register const char *str, register unsigned int len) { enum { - TOTAL_KEYWORDS = 45, + TOTAL_KEYWORDS = 43, MIN_WORD_LENGTH = 3, MAX_WORD_LENGTH = 24, MIN_HASH_VALUE = 3, - MAX_HASH_VALUE = 66 + MAX_HASH_VALUE = 64 }; static const struct resword wordlist[] = { {""}, {""}, {""}, -#line 28 "scripts/genksyms/keywords.gperf" +#line 26 "scripts/genksyms/keywords.gperf" {"asm", ASM_KEYW}, {""}, -#line 10 "scripts/genksyms/keywords.gperf" +#line 8 "scripts/genksyms/keywords.gperf" {"__asm", ASM_KEYW}, {""}, -#line 11 "scripts/genksyms/keywords.gperf" +#line 9 "scripts/genksyms/keywords.gperf" {"__asm__", ASM_KEYW}, {""}, {""}, -#line 54 "scripts/genksyms/keywords.gperf" +#line 52 "scripts/genksyms/keywords.gperf" {"__typeof__", TYPEOF_KEYW}, {""}, -#line 14 "scripts/genksyms/keywords.gperf" +#line 12 "scripts/genksyms/keywords.gperf" {"__const", CONST_KEYW}, -#line 13 "scripts/genksyms/keywords.gperf" +#line 11 "scripts/genksyms/keywords.gperf" {"__attribute__", ATTRIBUTE_KEYW}, -#line 15 "scripts/genksyms/keywords.gperf" +#line 13 "scripts/genksyms/keywords.gperf" {"__const__", CONST_KEYW}, -#line 20 "scripts/genksyms/keywords.gperf" +#line 18 "scripts/genksyms/keywords.gperf" {"__signed__", SIGNED_KEYW}, -#line 46 "scripts/genksyms/keywords.gperf" +#line 44 "scripts/genksyms/keywords.gperf" {"static", STATIC_KEYW}, - {""}, -#line 41 "scripts/genksyms/keywords.gperf" +#line 20 "scripts/genksyms/keywords.gperf" + {"__volatile__", VOLATILE_KEYW}, +#line 39 "scripts/genksyms/keywords.gperf" {"int", INT_KEYW}, -#line 34 "scripts/genksyms/keywords.gperf" +#line 32 "scripts/genksyms/keywords.gperf" {"char", CHAR_KEYW}, -#line 35 "scripts/genksyms/keywords.gperf" +#line 33 "scripts/genksyms/keywords.gperf" {"const", CONST_KEYW}, -#line 47 "scripts/genksyms/keywords.gperf" +#line 45 "scripts/genksyms/keywords.gperf" {"struct", STRUCT_KEYW}, -#line 26 "scripts/genksyms/keywords.gperf" +#line 24 "scripts/genksyms/keywords.gperf" {"__restrict__", RESTRICT_KEYW}, -#line 27 "scripts/genksyms/keywords.gperf" - {"restrict", RESTRICT_KEYW}, -#line 7 "scripts/genksyms/keywords.gperf" - {"EXPORT_SYMBOL_GPL_FUTURE", EXPORT_SYMBOL_KEYW}, -#line 18 "scripts/genksyms/keywords.gperf" - {"__inline__", INLINE_KEYW}, - {""}, -#line 22 "scripts/genksyms/keywords.gperf" - {"__volatile__", VOLATILE_KEYW}, -#line 5 "scripts/genksyms/keywords.gperf" - {"EXPORT_SYMBOL", EXPORT_SYMBOL_KEYW}, #line 25 "scripts/genksyms/keywords.gperf" + {"restrict", RESTRICT_KEYW}, +#line 23 "scripts/genksyms/keywords.gperf" {"_restrict", RESTRICT_KEYW}, - {""}, -#line 12 "scripts/genksyms/keywords.gperf" - {"__attribute", ATTRIBUTE_KEYW}, -#line 6 "scripts/genksyms/keywords.gperf" - {"EXPORT_SYMBOL_GPL", EXPORT_SYMBOL_KEYW}, #line 16 "scripts/genksyms/keywords.gperf" + {"__inline__", INLINE_KEYW}, +#line 10 "scripts/genksyms/keywords.gperf" + {"__attribute", ATTRIBUTE_KEYW}, + {""}, +#line 14 "scripts/genksyms/keywords.gperf" {"__extension__", EXTENSION_KEYW}, -#line 37 "scripts/genksyms/keywords.gperf" +#line 35 "scripts/genksyms/keywords.gperf" {"enum", ENUM_KEYW}, -#line 8 "scripts/genksyms/keywords.gperf" - {"EXPORT_UNUSED_SYMBOL", EXPORT_SYMBOL_KEYW}, -#line 38 "scripts/genksyms/keywords.gperf" +#line 19 "scripts/genksyms/keywords.gperf" + {"__volatile", VOLATILE_KEYW}, +#line 36 "scripts/genksyms/keywords.gperf" {"extern", EXTERN_KEYW}, {""}, -#line 19 "scripts/genksyms/keywords.gperf" +#line 17 "scripts/genksyms/keywords.gperf" {"__signed", SIGNED_KEYW}, -#line 9 "scripts/genksyms/keywords.gperf" - {"EXPORT_UNUSED_SYMBOL_GPL", EXPORT_SYMBOL_KEYW}, -#line 49 "scripts/genksyms/keywords.gperf" - {"union", UNION_KEYW}, -#line 53 "scripts/genksyms/keywords.gperf" +#line 7 "scripts/genksyms/keywords.gperf" + {"EXPORT_SYMBOL_GPL_FUTURE", EXPORT_SYMBOL_KEYW}, + {""}, +#line 51 "scripts/genksyms/keywords.gperf" {"typeof", TYPEOF_KEYW}, -#line 48 "scripts/genksyms/keywords.gperf" +#line 46 "scripts/genksyms/keywords.gperf" {"typedef", TYPEDEF_KEYW}, -#line 17 "scripts/genksyms/keywords.gperf" +#line 15 "scripts/genksyms/keywords.gperf" {"__inline", INLINE_KEYW}, -#line 33 "scripts/genksyms/keywords.gperf" +#line 31 "scripts/genksyms/keywords.gperf" {"auto", AUTO_KEYW}, -#line 21 "scripts/genksyms/keywords.gperf" - {"__volatile", VOLATILE_KEYW}, +#line 47 "scripts/genksyms/keywords.gperf" + {"union", UNION_KEYW}, {""}, {""}, -#line 50 "scripts/genksyms/keywords.gperf" +#line 48 "scripts/genksyms/keywords.gperf" {"unsigned", UNSIGNED_KEYW}, - {""}, -#line 44 "scripts/genksyms/keywords.gperf" +#line 49 "scripts/genksyms/keywords.gperf" + {"void", VOID_KEYW}, +#line 42 "scripts/genksyms/keywords.gperf" {"short", SHORT_KEYW}, -#line 40 "scripts/genksyms/keywords.gperf" + {""}, {""}, +#line 50 "scripts/genksyms/keywords.gperf" + {"volatile", VOLATILE_KEYW}, + {""}, +#line 37 "scripts/genksyms/keywords.gperf" + {"float", FLOAT_KEYW}, +#line 34 "scripts/genksyms/keywords.gperf" + {"double", DOUBLE_KEYW}, + {""}, +#line 5 "scripts/genksyms/keywords.gperf" + {"EXPORT_SYMBOL", EXPORT_SYMBOL_KEYW}, + {""}, {""}, +#line 38 "scripts/genksyms/keywords.gperf" {"inline", INLINE_KEYW}, +#line 6 "scripts/genksyms/keywords.gperf" + {"EXPORT_SYMBOL_GPL", EXPORT_SYMBOL_KEYW}, +#line 41 "scripts/genksyms/keywords.gperf" + {"register", REGISTER_KEYW}, {""}, -#line 52 "scripts/genksyms/keywords.gperf" - {"volatile", VOLATILE_KEYW}, -#line 42 "scripts/genksyms/keywords.gperf" - {"long", LONG_KEYW}, -#line 24 "scripts/genksyms/keywords.gperf" +#line 22 "scripts/genksyms/keywords.gperf" {"_Bool", BOOL_KEYW}, - {""}, {""}, #line 43 "scripts/genksyms/keywords.gperf" - {"register", REGISTER_KEYW}, -#line 51 "scripts/genksyms/keywords.gperf" - {"void", VOID_KEYW}, -#line 39 "scripts/genksyms/keywords.gperf" - {"float", FLOAT_KEYW}, -#line 36 "scripts/genksyms/keywords.gperf" - {"double", DOUBLE_KEYW}, - {""}, {""}, {""}, {""}, -#line 45 "scripts/genksyms/keywords.gperf" - {"signed", SIGNED_KEYW} + {"signed", SIGNED_KEYW}, + {""}, {""}, +#line 40 "scripts/genksyms/keywords.gperf" + {"long", LONG_KEYW} }; if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH) diff --git a/scripts/genksyms/keywords.gperf b/scripts/genksyms/keywords.gperf index 8abe7ab8d88f..5ef3733225fb 100644 --- a/scripts/genksyms/keywords.gperf +++ b/scripts/genksyms/keywords.gperf @@ -5,8 +5,6 @@ struct resword { const char *name; int token; } EXPORT_SYMBOL, EXPORT_SYMBOL_KEYW EXPORT_SYMBOL_GPL, EXPORT_SYMBOL_KEYW EXPORT_SYMBOL_GPL_FUTURE, EXPORT_SYMBOL_KEYW -EXPORT_UNUSED_SYMBOL, EXPORT_SYMBOL_KEYW -EXPORT_UNUSED_SYMBOL_GPL, EXPORT_SYMBOL_KEYW __asm, ASM_KEYW __asm__, ASM_KEYW __attribute, ATTRIBUTE_KEYW diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 92758120a767..ad2434b26970 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c @@ -130,9 +130,18 @@ static int read_symbol(FILE *in, struct sym_entry *s) static int symbol_valid(struct sym_entry *s) { /* Symbols which vary between passes. Passes 1 and 2 must have - * identical symbol lists. + * identical symbol lists. The kallsyms_* symbols below are only added + * after pass 1, they would be included in pass 2 when --all-symbols is + * specified so exclude them to get a stable symbol list. */ static char *special_symbols[] = { + "kallsyms_addresses", + "kallsyms_num_syms", + "kallsyms_names", + "kallsyms_markers", + "kallsyms_token_table", + "kallsyms_token_index", + /* Exclude linker generated symbols which vary between passes */ "_SDA_BASE_", /* ppc */ "_SDA2_BASE_", /* ppc */ @@ -164,9 +173,7 @@ static int symbol_valid(struct sym_entry *s) } /* Exclude symbols which vary between passes. */ - if (strstr((char *)s->sym + offset, "_compiled.") || - strncmp((char*)s->sym + offset, "__compound_literal.", 19) == 0 || - strncmp((char*)s->sym + offset, "__compound_literal$", 19) == 0) + if (strstr((char *)s->sym + offset, "_compiled.")) return 0; for (i = 0; special_symbols[i]; i++) @@ -543,10 +550,8 @@ int main(int argc, char **argv) usage(); read_map(stdin); - if (table_cnt) { - sort_symbols(); - optimize_token_table(); - } + sort_symbols(); + optimize_token_table(); write_src(); return 0; diff --git a/scripts/mksysmap b/scripts/mksysmap index 1db316a3712b..6e133a0bae7a 100644 --- a/scripts/mksysmap +++ b/scripts/mksysmap @@ -37,6 +37,9 @@ # readprofile starts reading symbols when _stext is found, and # continue until it finds a symbol which is not either of 'T', 't', -# 'W' or 'w'. +# 'W' or 'w'. __crc_ are 'A' and placed in the middle +# so we just ignore them to let readprofile continue to work. +# (At least sparc64 has __crc_ in the middle). + +$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)' > $2 -$NM -n $1 | grep -v '\( [aNUw] \)\|\( \$[adt]\)' > $2 diff --git a/scripts/strip-symbols b/scripts/strip-symbols deleted file mode 100644 index 29ee8c1a014b..000000000000 --- a/scripts/strip-symbols +++ /dev/null @@ -1,22 +0,0 @@ -<*> -*.h -__compound_literal[$.][0-9]* -__crc_[a-zA-Z_]* -__exitcall_[a-zA-Z_]* -__func__[$.][0-9]* -__FUNCTION__[$.][0-9]* -gcc[0-9]_compiled[$.] -__initcall_[a-zA-Z_]* -__kcrctab_[a-zA-Z_]* -__kstrtab_[a-zA-Z_]* -__ksymtab_[a-zA-Z_]* -__mod_[a-zA-Z_]*[0-9] -__module_depends -__param_[a-zA-Z_]* -__pci_fixup_*PCI_ANY_IDPCI_ANY_ID* -__pci_fixup_*PCI_ANY_IDPCI_DEVICE_ID_* -__pci_fixup_*PCI_VENDOR_ID_*PCI_ANY_ID* -__pci_fixup_*PCI_VENDOR_ID_*PCI_DEVICE_ID_* -__PRETTY_FUNCTION__[$.][0-9]* -__setup_[a-zA-Z_]* -____versions -- cgit v1.2.3 From 74d96f018673759d04d032c137d132f6447bfb1e Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Tue, 13 Jan 2009 19:27:09 -0800 Subject: byteorder: make swab.h include asm/swab.h like a regular header Add swab.h to kbuild.asm and remove the individual entries from each arch, mark as unifdef as some arches have some kernel-only bits inside. Signed-off-by: Harvey Harrison Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/Kbuild | 1 - arch/alpha/include/asm/byteorder.h | 1 - arch/arm/include/asm/Kbuild | 1 - arch/arm/include/asm/byteorder.h | 2 -- arch/avr32/include/asm/Kbuild | 1 - arch/avr32/include/asm/byteorder.h | 1 - arch/blackfin/include/asm/Kbuild | 1 - arch/blackfin/include/asm/byteorder.h | 1 - arch/cris/include/asm/Kbuild | 1 - arch/cris/include/asm/byteorder.h | 1 - arch/h8300/include/asm/Kbuild | 1 - arch/h8300/include/asm/byteorder.h | 1 - arch/ia64/include/asm/Kbuild | 1 - arch/ia64/include/asm/byteorder.h | 1 - arch/m68knommu/include/asm/Kbuild | 2 -- arch/m68knommu/include/asm/byteorder.h | 1 - arch/mips/include/asm/Kbuild | 1 - arch/mips/include/asm/byteorder.h | 2 -- arch/parisc/include/asm/Kbuild | 1 - arch/parisc/include/asm/byteorder.h | 1 - arch/powerpc/include/asm/Kbuild | 1 - arch/powerpc/include/asm/byteorder.h | 2 -- arch/s390/include/asm/Kbuild | 1 - arch/s390/include/asm/byteorder.h | 1 - arch/sh/include/asm/Kbuild | 1 - arch/sh/include/asm/byteorder.h | 2 -- arch/sparc/include/asm/Kbuild | 1 - arch/sparc/include/asm/byteorder.h | 1 - arch/x86/include/asm/Kbuild | 1 - arch/x86/include/asm/byteorder.h | 1 - arch/xtensa/include/asm/Kbuild | 2 -- arch/xtensa/include/asm/byteorder.h | 2 -- include/asm-frv/Kbuild | 1 - include/asm-frv/byteorder.h | 1 - include/asm-generic/Kbuild.asm | 1 + include/asm-m32r/Kbuild | 1 - include/asm-m32r/byteorder.h | 2 -- include/asm-m68k/Kbuild | 1 - include/asm-m68k/byteorder.h | 1 - include/asm-mn10300/Kbuild | 1 - include/asm-mn10300/byteorder.h | 1 - include/linux/swab.h | 2 +- 42 files changed, 2 insertions(+), 49 deletions(-) (limited to 'arch/x86') diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild index 4dad27360576..b7c8f188b313 100644 --- a/arch/alpha/include/asm/Kbuild +++ b/arch/alpha/include/asm/Kbuild @@ -9,4 +9,3 @@ unifdef-y += console.h unifdef-y += fpu.h unifdef-y += sysinfo.h unifdef-y += compiler.h -unifdef-y += swab.h diff --git a/arch/alpha/include/asm/byteorder.h b/arch/alpha/include/asm/byteorder.h index 6772f3168701..73683093202d 100644 --- a/arch/alpha/include/asm/byteorder.h +++ b/arch/alpha/include/asm/byteorder.h @@ -1,7 +1,6 @@ #ifndef _ALPHA_BYTEORDER_H #define _ALPHA_BYTEORDER_H -#include #include #endif /* _ALPHA_BYTEORDER_H */ diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 43b0b2ba392f..73237bd130a2 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -1,4 +1,3 @@ include include/asm-generic/Kbuild.asm unifdef-y += hwcap.h -unifdef-y += swab.h diff --git a/arch/arm/include/asm/byteorder.h b/arch/arm/include/asm/byteorder.h index c02b6fc28e1a..77379748b171 100644 --- a/arch/arm/include/asm/byteorder.h +++ b/arch/arm/include/asm/byteorder.h @@ -15,8 +15,6 @@ #ifndef __ASM_ARM_BYTEORDER_H #define __ASM_ARM_BYTEORDER_H -#include - #ifdef __ARMEB__ #include #else diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild index 219822c8ad18..3136628ba8d2 100644 --- a/arch/avr32/include/asm/Kbuild +++ b/arch/avr32/include/asm/Kbuild @@ -1,4 +1,3 @@ include include/asm-generic/Kbuild.asm -header-y += swab.h header-y += cachectl.h diff --git a/arch/avr32/include/asm/byteorder.h b/arch/avr32/include/asm/byteorder.h index 2aba64b4e122..50abc21619a8 100644 --- a/arch/avr32/include/asm/byteorder.h +++ b/arch/avr32/include/asm/byteorder.h @@ -4,7 +4,6 @@ #ifndef __ASM_AVR32_BYTEORDER_H #define __ASM_AVR32_BYTEORDER_H -#include #include #endif /* __ASM_AVR32_BYTEORDER_H */ diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild index d0d1ac435544..606ecfdcc962 100644 --- a/arch/blackfin/include/asm/Kbuild +++ b/arch/blackfin/include/asm/Kbuild @@ -1,4 +1,3 @@ include include/asm-generic/Kbuild.asm unifdef-y += fixed_code.h -unifdef-y += swab.h diff --git a/arch/blackfin/include/asm/byteorder.h b/arch/blackfin/include/asm/byteorder.h index b9e797a497b4..3e69106a4d37 100644 --- a/arch/blackfin/include/asm/byteorder.h +++ b/arch/blackfin/include/asm/byteorder.h @@ -1,7 +1,6 @@ #ifndef _BLACKFIN_BYTEORDER_H #define _BLACKFIN_BYTEORDER_H -#include #include #endif /* _BLACKFIN_BYTEORDER_H */ diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index b79b7c6543a6..d5b631935ec8 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild @@ -9,4 +9,3 @@ header-y += sync_serial.h unifdef-y += etraxgpio.h unifdef-y += rs485.h -unifdef-y += swab.h diff --git a/arch/cris/include/asm/byteorder.h b/arch/cris/include/asm/byteorder.h index 7678d86317ae..bcd189798e26 100644 --- a/arch/cris/include/asm/byteorder.h +++ b/arch/cris/include/asm/byteorder.h @@ -1,7 +1,6 @@ #ifndef _CRIS_BYTEORDER_H #define _CRIS_BYTEORDER_H -#include #include #endif diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild index 27b108a86b39..c68e1680da01 100644 --- a/arch/h8300/include/asm/Kbuild +++ b/arch/h8300/include/asm/Kbuild @@ -1,2 +1 @@ include include/asm-generic/Kbuild.asm -unifdef-y += swab.h diff --git a/arch/h8300/include/asm/byteorder.h b/arch/h8300/include/asm/byteorder.h index c36b80a3dd84..13539da99efd 100644 --- a/arch/h8300/include/asm/byteorder.h +++ b/arch/h8300/include/asm/byteorder.h @@ -1,7 +1,6 @@ #ifndef _H8300_BYTEORDER_H #define _H8300_BYTEORDER_H -#include #include #endif /* _H8300_BYTEORDER_H */ diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index 3b25bd9dca91..ccbe8ae47a61 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild @@ -14,4 +14,3 @@ unifdef-y += gcc_intrin.h unifdef-y += intrinsics.h unifdef-y += perfmon.h unifdef-y += ustack.h -unifdef-y += swab.h diff --git a/arch/ia64/include/asm/byteorder.h b/arch/ia64/include/asm/byteorder.h index 0f84c5cb703d..a8dd73558150 100644 --- a/arch/ia64/include/asm/byteorder.h +++ b/arch/ia64/include/asm/byteorder.h @@ -1,7 +1,6 @@ #ifndef _ASM_IA64_BYTEORDER_H #define _ASM_IA64_BYTEORDER_H -#include #include #endif /* _ASM_IA64_BYTEORDER_H */ diff --git a/arch/m68knommu/include/asm/Kbuild b/arch/m68knommu/include/asm/Kbuild index 58c02a454130..c68e1680da01 100644 --- a/arch/m68knommu/include/asm/Kbuild +++ b/arch/m68knommu/include/asm/Kbuild @@ -1,3 +1 @@ include include/asm-generic/Kbuild.asm - -unifdef-y += swab.h diff --git a/arch/m68knommu/include/asm/byteorder.h b/arch/m68knommu/include/asm/byteorder.h index a6f0b8f7f622..9c6c76a15041 100644 --- a/arch/m68knommu/include/asm/byteorder.h +++ b/arch/m68knommu/include/asm/byteorder.h @@ -1,7 +1,6 @@ #ifndef _M68KNOMMU_BYTEORDER_H #define _M68KNOMMU_BYTEORDER_H -#include #include #endif /* _M68KNOMMU_BYTEORDER_H */ diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 023866c0c102..7897f05e3165 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -1,4 +1,3 @@ include include/asm-generic/Kbuild.asm header-y += cachectl.h sgidefs.h sysmips.h -header-y += swab.h diff --git a/arch/mips/include/asm/byteorder.h b/arch/mips/include/asm/byteorder.h index 607b71830707..9579051ff1c7 100644 --- a/arch/mips/include/asm/byteorder.h +++ b/arch/mips/include/asm/byteorder.h @@ -8,8 +8,6 @@ #ifndef _ASM_BYTEORDER_H #define _ASM_BYTEORDER_H -#include - #if defined(__MIPSEB__) #include #elif defined(__MIPSEL__) diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index 2121d99f8364..f88b252e419c 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild @@ -1,4 +1,3 @@ include include/asm-generic/Kbuild.asm unifdef-y += pdc.h -unifdef-y += swab.h diff --git a/arch/parisc/include/asm/byteorder.h b/arch/parisc/include/asm/byteorder.h index da66029c4cb2..58af2c5f5d61 100644 --- a/arch/parisc/include/asm/byteorder.h +++ b/arch/parisc/include/asm/byteorder.h @@ -1,7 +1,6 @@ #ifndef _PARISC_BYTEORDER_H #define _PARISC_BYTEORDER_H -#include #include #endif /* _PARISC_BYTEORDER_H */ diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index 9268602de5d0..5ab7d7fe198c 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -35,4 +35,3 @@ unifdef-y += spu_info.h unifdef-y += termios.h unifdef-y += types.h unifdef-y += unistd.h -unifdef-y += swab.h diff --git a/arch/powerpc/include/asm/byteorder.h b/arch/powerpc/include/asm/byteorder.h index 5cca27a41532..aa6cc4fac965 100644 --- a/arch/powerpc/include/asm/byteorder.h +++ b/arch/powerpc/include/asm/byteorder.h @@ -7,8 +7,6 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ - -#include #include #endif /* _ASM_POWERPC_BYTEORDER_H */ diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild index f2af4167bd5f..63a23415fba6 100644 --- a/arch/s390/include/asm/Kbuild +++ b/arch/s390/include/asm/Kbuild @@ -13,4 +13,3 @@ unifdef-y += cmb.h unifdef-y += debug.h unifdef-y += chpid.h unifdef-y += schid.h -unifdef-y += swab.h diff --git a/arch/s390/include/asm/byteorder.h b/arch/s390/include/asm/byteorder.h index b95a2b2933fb..a332e59e26fc 100644 --- a/arch/s390/include/asm/byteorder.h +++ b/arch/s390/include/asm/byteorder.h @@ -1,7 +1,6 @@ #ifndef _S390_BYTEORDER_H #define _S390_BYTEORDER_H -#include #include #endif /* _S390_BYTEORDER_H */ diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index f1a2a0d1c79c..43910cdf78a5 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild @@ -6,4 +6,3 @@ unifdef-y += unistd_32.h unifdef-y += unistd_64.h unifdef-y += posix_types_32.h unifdef-y += posix_types_64.h -unifdef-y += swab.h diff --git a/arch/sh/include/asm/byteorder.h b/arch/sh/include/asm/byteorder.h index e95c41a5c8cc..db2f5d7cb17d 100644 --- a/arch/sh/include/asm/byteorder.h +++ b/arch/sh/include/asm/byteorder.h @@ -1,8 +1,6 @@ #ifndef __ASM_SH_BYTEORDER_H #define __ASM_SH_BYTEORDER_H -#include - #ifdef __LITTLE_ENDIAN__ #include #else diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index 95e38a43dff0..deeb0fba8029 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -17,4 +17,3 @@ header-y += traps.h header-y += uctx.h header-y += utrap.h header-y += watchdog.h -header-y += swab.h diff --git a/arch/sparc/include/asm/byteorder.h b/arch/sparc/include/asm/byteorder.h index 48a047cd6fa9..ccc1b6b7de6c 100644 --- a/arch/sparc/include/asm/byteorder.h +++ b/arch/sparc/include/asm/byteorder.h @@ -1,7 +1,6 @@ #ifndef _SPARC_BYTEORDER_H #define _SPARC_BYTEORDER_H -#include #include #endif /* _SPARC_BYTEORDER_H */ diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index a9f8a814a1f7..4a8e80cdcfa5 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild @@ -22,4 +22,3 @@ unifdef-y += unistd_32.h unifdef-y += unistd_64.h unifdef-y += vm86.h unifdef-y += vsyscall.h -unifdef-y += swab.h diff --git a/arch/x86/include/asm/byteorder.h b/arch/x86/include/asm/byteorder.h index 7c49917e3d9d..b13a7a88f3eb 100644 --- a/arch/x86/include/asm/byteorder.h +++ b/arch/x86/include/asm/byteorder.h @@ -1,7 +1,6 @@ #ifndef _ASM_X86_BYTEORDER_H #define _ASM_X86_BYTEORDER_H -#include #include #endif /* _ASM_X86_BYTEORDER_H */ diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 58c02a454130..c68e1680da01 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -1,3 +1 @@ include include/asm-generic/Kbuild.asm - -unifdef-y += swab.h diff --git a/arch/xtensa/include/asm/byteorder.h b/arch/xtensa/include/asm/byteorder.h index 329b94591ca4..54eb6315349c 100644 --- a/arch/xtensa/include/asm/byteorder.h +++ b/arch/xtensa/include/asm/byteorder.h @@ -1,8 +1,6 @@ #ifndef _XTENSA_BYTEORDER_H #define _XTENSA_BYTEORDER_H -#include - #ifdef __XTENSA_EL__ #include #elif defined(__XTENSA_EB__) diff --git a/include/asm-frv/Kbuild b/include/asm-frv/Kbuild index 1f44e7c76995..0f8956def738 100644 --- a/include/asm-frv/Kbuild +++ b/include/asm-frv/Kbuild @@ -3,4 +3,3 @@ include include/asm-generic/Kbuild.asm header-y += registers.h unifdef-y += termios.h -unifdef-y += swab.h diff --git a/include/asm-frv/byteorder.h b/include/asm-frv/byteorder.h index 1187e51ecd13..f29b7593e088 100644 --- a/include/asm-frv/byteorder.h +++ b/include/asm-frv/byteorder.h @@ -1,7 +1,6 @@ #ifndef _ASM_BYTEORDER_H #define _ASM_BYTEORDER_H -#include #include #endif /* _ASM_BYTEORDER_H */ diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm index 1870d5e05f1c..70d185534b9d 100644 --- a/include/asm-generic/Kbuild.asm +++ b/include/asm-generic/Kbuild.asm @@ -31,6 +31,7 @@ unifdef-y += socket.h unifdef-y += sockios.h unifdef-y += stat.h unifdef-y += statfs.h +unifdef-y += swab.h unifdef-y += termbits.h unifdef-y += termios.h unifdef-y += types.h diff --git a/include/asm-m32r/Kbuild b/include/asm-m32r/Kbuild index 27b108a86b39..c68e1680da01 100644 --- a/include/asm-m32r/Kbuild +++ b/include/asm-m32r/Kbuild @@ -1,2 +1 @@ include include/asm-generic/Kbuild.asm -unifdef-y += swab.h diff --git a/include/asm-m32r/byteorder.h b/include/asm-m32r/byteorder.h index 61ff9cfd8451..21855d8b028b 100644 --- a/include/asm-m32r/byteorder.h +++ b/include/asm-m32r/byteorder.h @@ -1,8 +1,6 @@ #ifndef _ASM_M32R_BYTEORDER_H #define _ASM_M32R_BYTEORDER_H -#include - #if defined(__LITTLE_ENDIAN__) # include #else diff --git a/include/asm-m68k/Kbuild b/include/asm-m68k/Kbuild index 52fd96b4142a..1a922fad76f7 100644 --- a/include/asm-m68k/Kbuild +++ b/include/asm-m68k/Kbuild @@ -1,3 +1,2 @@ include include/asm-generic/Kbuild.asm header-y += cachectl.h -unifdef-y += swab.h diff --git a/include/asm-m68k/byteorder.h b/include/asm-m68k/byteorder.h index 300866523b86..31b260a88803 100644 --- a/include/asm-m68k/byteorder.h +++ b/include/asm-m68k/byteorder.h @@ -1,7 +1,6 @@ #ifndef _M68K_BYTEORDER_H #define _M68K_BYTEORDER_H -#include #include #endif /* _M68K_BYTEORDER_H */ diff --git a/include/asm-mn10300/Kbuild b/include/asm-mn10300/Kbuild index 27b108a86b39..c68e1680da01 100644 --- a/include/asm-mn10300/Kbuild +++ b/include/asm-mn10300/Kbuild @@ -1,2 +1 @@ include include/asm-generic/Kbuild.asm -unifdef-y += swab.h diff --git a/include/asm-mn10300/byteorder.h b/include/asm-mn10300/byteorder.h index 45b18ded19e6..5dd0bdd9feee 100644 --- a/include/asm-mn10300/byteorder.h +++ b/include/asm-mn10300/byteorder.h @@ -1,7 +1,6 @@ #ifndef _ASM_BYTEORDER_H #define _ASM_BYTEORDER_H -#include #include #endif /* _ASM_BYTEORDER_H */ diff --git a/include/linux/swab.h b/include/linux/swab.h index be5284d4a053..ea0c02fd5163 100644 --- a/include/linux/swab.h +++ b/include/linux/swab.h @@ -3,7 +3,7 @@ #include #include -#include +#include /* * casts are necessary for constants, because we never know how for sure -- cgit v1.2.3 From 4a13ad0bd8869bb491c67918662f9b1852595af5 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Wed, 14 Jan 2009 12:28:51 +0000 Subject: x86: avoid early crash in disable_local_APIC() E.g. when called due to an early panic. Signed-off-by: Jan Beulich Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 0f830e4f5675..d19aa3aab62b 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -895,6 +895,10 @@ void disable_local_APIC(void) { unsigned int value; + /* APIC hasn't been mapped yet */ + if (!apic_phys) + return; + clear_local_APIC(); /* -- cgit v1.2.3 From 18c07cf530cf4aa8b7551801f68ed40db5ee4e45 Mon Sep 17 00:00:00 2001 From: Cliff Wickman Date: Thu, 15 Jan 2009 09:51:20 -0600 Subject: x86, UV: cpu_relax in uv_wait_completion The function uv_wait_completion() spins on reads of a memory-mapped register, waiting for completion of BAU hardware replies. It should call "cpu_relax()" between those reads to improve performance on hyperthreaded configurations. Signed-off-by: Cliff Wickman Acked-by: Jack Steiner Signed-off-by: Ingo Molnar --- arch/x86/kernel/tlb_uv.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index f885023167e0..6812b829ed83 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c @@ -200,6 +200,7 @@ static int uv_wait_completion(struct bau_desc *bau_desc, destination_timeouts = 0; } } + cpu_relax(); } return FLUSH_COMPLETE; } -- cgit v1.2.3 From b5db0e38653bfada34a92f360b4111566ede3842 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 15 Jan 2009 15:32:12 -0800 Subject: Revert "x86 PAT: remove CPA WARN_ON for zero pte" This reverts commit 58dab916dfb57328d50deb0aa9b3fc92efa248ff, which makes my Nehalem come to a nasty crawling almost-halt. It looks like it turns off caching of regular kernel RAM, with the understandable slowdown of a few orders of magnitude as a result. Acked-by: Ingo Molnar Cc: Yinghai Lu Cc: Peter Anvin Cc: Venkatesh Pallipadi Cc: Suresh Siddha Signed-off-by: Linus Torvalds --- arch/x86/mm/pageattr.c | 10 ++++------ arch/x86/mm/pat.c | 45 +++++++++++++-------------------------------- 2 files changed, 17 insertions(+), 38 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 4cf30dee8161..e89d24815f26 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -555,12 +555,10 @@ repeat: if (!pte_val(old_pte)) { if (!primary) return 0; - - /* - * Special error value returned, indicating that the mapping - * did not exist at this address. - */ - return -EFAULT; + WARN(1, KERN_WARNING "CPA: called for zero pte. " + "vaddr = %lx cpa->vaddr = %lx\n", address, + *cpa->vaddr); + return -EINVAL; } if (level == PG_LEVEL_4K) { diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 160c42d3eb8f..8b08fb955274 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -505,35 +505,6 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) } #endif /* CONFIG_STRICT_DEVMEM */ -/* - * Change the memory type for the physial address range in kernel identity - * mapping space if that range is a part of identity map. - */ -static int kernel_map_sync_memtype(u64 base, unsigned long size, - unsigned long flags) -{ - unsigned long id_sz; - int ret; - - if (!pat_enabled || base >= __pa(high_memory)) - return 0; - - id_sz = (__pa(high_memory) < base + size) ? - __pa(high_memory) - base : - size; - - ret = ioremap_change_attr((unsigned long)__va(base), id_sz, flags); - /* - * -EFAULT return means that the addr was not valid and did not have - * any identity mapping. That case is a success for - * kernel_map_sync_memtype. - */ - if (ret == -EFAULT) - ret = 0; - - return ret; -} - int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot) { @@ -584,7 +555,9 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, if (retval < 0) return 0; - if (kernel_map_sync_memtype(offset, size, flags)) { + if (((pfn < max_low_pfn_mapped) || + (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) && + ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) { free_memtype(offset, offset + size); printk(KERN_INFO "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n", @@ -632,7 +605,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, int strict_prot) { int is_ram = 0; - int ret; + int id_sz, ret; unsigned long flags; unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); @@ -673,7 +646,15 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, flags); } - if (kernel_map_sync_memtype(paddr, size, flags)) { + /* Need to keep identity mapping in sync */ + if (paddr >= __pa(high_memory)) + return 0; + + id_sz = (__pa(high_memory) < paddr + size) ? + __pa(high_memory) - paddr : + size; + + if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) { free_memtype(paddr, paddr + size); printk(KERN_ERR "%s:%d reserve_pfn_range ioremap_change_attr failed %s " -- cgit v1.2.3 From a3c6018e565dc07cf3738ace6bbe412f97b1bba8 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Fri, 16 Jan 2009 11:59:33 +0000 Subject: x86: fix assumed to be contiguous leaf page tables for kmap_atomic region (take 2) Debugging and original patch from Nick Piggin The early fixmap pmd entry inserted at the very top of the KVA is causing the subsequent fixmap mapping code to not provide physically linear pte pages over the kmap atomic portion of the fixmap (which relies on said property to calculate pte addresses). This has caused weird boot failures in kmap_atomic much later in the boot process (initial userspace faults) on a 32-bit PAE system with a larger number of CPUs (smaller CPU counts tend not to run over into the next page so don't show up the problem). Solve this by attempting to clear out the page table, and copy any of its entries to the new one. Also, add a bug if a nonlinear condition is encountered and can't be resolved, which might save some hours of debugging if this fragile scheme ever breaks again... Once we have such logic, we can also use it to eliminate the early ioremap trickery around the page table setup for the fixmap area. This also fixes potential issues with FIX_* entries sharing the leaf page table with the early ioremap ones getting discarded by early_ioremap_clear() and not restored by early_ioremap_reset(). It at once eliminates the temporary (and configuration, namely NR_CPUS, dependent) unavailability of early fixed mappings during the time the fixmap area page tables get constructed. Finally, also replace the hard coded calculation of the initial table space needed for the fixmap area with a proper one, allowing kernels configured for large CPU counts to actually boot. Based-on: Nick Piggin Signed-off-by: Jan Beulich Signed-off-by: Ingo Molnar --- arch/x86/include/asm/io.h | 1 - arch/x86/mm/init_32.c | 48 ++++++++++++++++++++++++++++++++++++++++++++--- arch/x86/mm/ioremap.c | 25 ------------------------ 3 files changed, 45 insertions(+), 29 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 05cfed4485fa..1dbbdf4be9b4 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -99,7 +99,6 @@ extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); * A boot-time mapping is currently limited to at most 16 pages. */ extern void early_ioremap_init(void); -extern void early_ioremap_clear(void); extern void early_ioremap_reset(void); extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); extern void __iomem *early_memremap(unsigned long offset, unsigned long size); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 88f1b10de3be..2cef05074413 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -138,6 +138,47 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) return pte_offset_kernel(pmd, 0); } +static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, + unsigned long vaddr, pte_t *lastpte) +{ +#ifdef CONFIG_HIGHMEM + /* + * Something (early fixmap) may already have put a pte + * page here, which causes the page table allocation + * to become nonlinear. Attempt to fix it, and if it + * is still nonlinear then we have to bug. + */ + int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; + int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; + + if (pmd_idx_kmap_begin != pmd_idx_kmap_end + && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin + && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end + && ((__pa(pte) >> PAGE_SHIFT) < table_start + || (__pa(pte) >> PAGE_SHIFT) >= table_end)) { + pte_t *newpte; + int i; + + BUG_ON(after_init_bootmem); + newpte = alloc_low_page(); + for (i = 0; i < PTRS_PER_PTE; i++) + set_pte(newpte + i, pte[i]); + + paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); + set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); + BUG_ON(newpte != pte_offset_kernel(pmd, 0)); + __flush_tlb_all(); + + paravirt_release_pte(__pa(pte) >> PAGE_SHIFT); + pte = newpte; + } + BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) + && vaddr > fix_to_virt(FIX_KMAP_END) + && lastpte && lastpte + PTRS_PER_PTE != pte); +#endif + return pte; +} + /* * This function initializes a certain range of kernel virtual memory * with new bootmem page tables, everywhere page tables are missing in @@ -154,6 +195,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) unsigned long vaddr; pgd_t *pgd; pmd_t *pmd; + pte_t *pte = NULL; vaddr = start; pgd_idx = pgd_index(vaddr); @@ -165,7 +207,8 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) pmd = pmd + pmd_index(vaddr); for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) { - one_page_table_init(pmd); + pte = page_table_kmap_check(one_page_table_init(pmd), + pmd, vaddr, pte); vaddr += PMD_SIZE; } @@ -508,7 +551,6 @@ static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base) * Fixed mappings, only the page table structure has to be * created - mappings will be set by set_fixmap(): */ - early_ioremap_clear(); vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; page_table_range_init(vaddr, end, pgd_base); @@ -801,7 +843,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse) tables += PAGE_ALIGN(ptes * sizeof(pte_t)); /* for fixmap */ - tables += PAGE_SIZE * 2; + tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t)); /* * RED-PEN putting page tables only on node 0 could diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index bd85d42819e1..af750ab973b6 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -557,34 +557,9 @@ void __init early_ioremap_init(void) } } -void __init early_ioremap_clear(void) -{ - pmd_t *pmd; - - if (early_ioremap_debug) - printk(KERN_INFO "early_ioremap_clear()\n"); - - pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); - pmd_clear(pmd); - paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT); - __flush_tlb_all(); -} - void __init early_ioremap_reset(void) { - enum fixed_addresses idx; - unsigned long addr, phys; - pte_t *pte; - after_paging_init = 1; - for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { - addr = fix_to_virt(idx); - pte = early_ioremap_pte(addr); - if (pte_present(*pte)) { - phys = pte_val(*pte) & PAGE_MASK; - set_fixmap(idx, phys); - } - } } static void __init __early_set_fixmap(enum fixed_addresses idx, -- cgit v1.2.3 From 5a4ccaf37ffece09ef33f1cfec67efa8ee56f967 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 6 Jan 2009 21:15:32 +0100 Subject: kprobes: check CONFIG_FREEZER instead of CONFIG_PM Check CONFIG_FREEZER instead of CONFIG_PM because kprobe booster depends on freeze_processes() and thaw_processes() when CONFIG_PREEMPT=y. This fixes a linkage error which occurs when CONFIG_PREEMPT=y, CONFIG_PM=y and CONFIG_FREEZER=n. Reported-by: Cheng Renquan Signed-off-by: Masami Hiramatsu Signed-off-by: Rafael J. Wysocki Acked-by: Ingo Molnar Signed-off-by: Len Brown --- arch/ia64/kernel/kprobes.c | 2 +- arch/x86/kernel/kprobes.c | 2 +- kernel/kprobes.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index f90be51b1123..9adac441ac9b 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -870,7 +870,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) return 1; ss_probe: -#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM) +#if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER) if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { /* Boost up -- we can execute copied instructions directly */ ia64_psr(regs)->ri = p->ainsn.slot; diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 884d985b8b82..e948b28a5a9a 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -446,7 +446,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { -#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM) +#if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER) if (p->ainsn.boostable == 1 && !p->post_handler) { /* Boost up -- we can execute copied instructions directly */ reset_current_kprobe(); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 1b9cbdc0127a..7ba8cd9845cb 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -123,7 +123,7 @@ static int collect_garbage_slots(void); static int __kprobes check_safety(void) { int ret = 0; -#if defined(CONFIG_PREEMPT) && defined(CONFIG_PM) +#if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER) ret = freeze_processes(); if (ret == 0) { struct task_struct *p, *q; -- cgit v1.2.3 From b2b815d80a5c4e5b50be0a98aba8c445ce8f3e1f Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Fri, 16 Jan 2009 15:22:16 -0800 Subject: x86: put trigger in to detect mismatched apic versions Impact: add debug warning Fire off one message if two apic's discovered with different apic versions. (this code is only called during CPU init) The goal of this is to pave the way of the removal of the apic_version[] array. We dont expect any apic version incompatibilities in the x86 landscape of systems [if so we dont handle them very well and probably never will handle deep apic version assymetries well], but it's prudent to have a debug check for one kernel cycle nevertheless. Signed-off-by: Mike Travis Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index d19aa3aab62b..4b6df2469fe3 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -1837,6 +1837,11 @@ void __cpuinit generic_processor_info(int apicid, int version) num_processors++; cpu = cpumask_next_zero(-1, cpu_present_mask); + if (version != apic_version[boot_cpu_physical_apicid]) + WARN_ONCE(1, + "ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n", + apic_version[boot_cpu_physical_apicid], cpu, version); + physid_set(apicid, phys_cpu_present_map); if (apicid == boot_cpu_physical_apicid) { /* -- cgit v1.2.3 From c7f8562a51c2e5dcc1a00a2bdd232b9965ff960d Mon Sep 17 00:00:00 2001 From: Leonardo Potenza Date: Sun, 18 Jan 2009 23:03:56 +0100 Subject: x86: fix section mismatch warnings in kernel/setup_percpu.c The function setup_cpu_local_masks() has been marked __init, in order to remove the following section mismatch messages: WARNING: vmlinux.o(.text+0x3c2c7): Section mismatch in reference from the function setup_cpu_local_masks() to the function .init.text:alloc_bootmem_cpumask_var() The function setup_cpu_local_masks() references the function __init alloc_bootmem_cpumask_var(). This is often because setup_cpu_local_masks lacks a __init annotation or the annotation of alloc_bootmem_cpumask_var is wrong. WARNING: vmlinux.o(.text+0x3c2d3): Section mismatch in reference from the function setup_cpu_local_masks() to the function .init.text:alloc_bootmem_cpumask_var() The function setup_cpu_local_masks() references the function __init alloc_bootmem_cpumask_var(). This is often because setup_cpu_local_masks lacks a __init annotation or the annotation of alloc_bootmem_cpumask_var is wrong. WARNING: vmlinux.o(.text+0x3c2df): Section mismatch in reference from the function setup_cpu_local_masks() to the function .init.text:alloc_bootmem_cpumask_var() The function setup_cpu_local_masks() references the function __init alloc_bootmem_cpumask_var(). This is often because setup_cpu_local_masks lacks a __init annotation or the annotation of alloc_bootmem_cpumask_var is wrong. WARNING: vmlinux.o(.text+0x3c2eb): Section mismatch in reference from the function setup_cpu_local_masks() to the function .init.text:alloc_bootmem_cpumask_var() The function setup_cpu_local_masks() references the function __init alloc_bootmem_cpumask_var(). This is often because setup_cpu_local_masks lacks a __init annotation or the annotation of alloc_bootmem_cpumask_var is wrong. Signed-off-by: Leonardo Potenza Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup_percpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 55c46074eba0..01161077a49c 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -136,7 +136,7 @@ static void __init setup_cpu_pda_map(void) #ifdef CONFIG_X86_64 /* correctly size the local cpu masks */ -static void setup_cpu_local_masks(void) +static void __init setup_cpu_local_masks(void) { alloc_bootmem_cpumask_var(&cpu_initialized_mask); alloc_bootmem_cpumask_var(&cpu_callin_mask); -- cgit v1.2.3 From 72859081851af2bda04117ca3d64206ffa199e5e Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Fri, 16 Jan 2009 15:31:15 -0800 Subject: cpufreq: use work_on_cpu in acpi-cpufreq.c for drv_read and drv_write Impact: use new work_on_cpu function to reduce stack usage Replace the saving of current->cpus_allowed and set_cpus_allowed_ptr() with a work_on_cpu function for drv_read() and drv_write(). Basically converts do_drv_{read,write} into "work_on_cpu" functions that are now called by drv_read and drv_write. Note: This patch basically reverts 50c668d6 which reverted 7503bfba, now that the work_on_cpu() function is more stable. Signed-off-by: Mike Travis Acked-by: Rusty Russell Tested-by: Dieter Ries Tested-by: Maciej Rutecki Cc: Dave Jones Cc: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 8f3c95c7e61f..5bf2c834a236 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -150,8 +150,9 @@ struct drv_cmd { u32 val; }; -static void do_drv_read(struct drv_cmd *cmd) +static long do_drv_read(void *_cmd) { + struct drv_cmd *cmd = _cmd; u32 h; switch (cmd->type) { @@ -166,10 +167,12 @@ static void do_drv_read(struct drv_cmd *cmd) default: break; } + return 0; } -static void do_drv_write(struct drv_cmd *cmd) +static long do_drv_write(void *_cmd) { + struct drv_cmd *cmd = _cmd; u32 lo, hi; switch (cmd->type) { @@ -186,30 +189,23 @@ static void do_drv_write(struct drv_cmd *cmd) default: break; } + return 0; } static void drv_read(struct drv_cmd *cmd) { - cpumask_t saved_mask = current->cpus_allowed; cmd->val = 0; - set_cpus_allowed_ptr(current, cmd->mask); - do_drv_read(cmd); - set_cpus_allowed_ptr(current, &saved_mask); + work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); } static void drv_write(struct drv_cmd *cmd) { - cpumask_t saved_mask = current->cpus_allowed; unsigned int i; for_each_cpu(i, cmd->mask) { - set_cpus_allowed_ptr(current, cpumask_of(i)); - do_drv_write(cmd); + work_on_cpu(i, do_drv_write, cmd); } - - set_cpus_allowed_ptr(current, &saved_mask); - return; } static u32 get_cur_val(const struct cpumask *mask) @@ -366,7 +362,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) return freq; } -static unsigned int check_freqs(const cpumask_t *mask, unsigned int freq, +static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, struct acpi_cpufreq_data *data) { unsigned int cur_freq; -- cgit v1.2.3 From bfa318ad52a23f1e303d176b44366cdd2bb71ad2 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 15 Jan 2009 15:46:08 +0100 Subject: fix: crash: IP: __bitmap_intersects+0x48/0x73 -tip testing found this crash: > [ 35.258515] calling acpi_cpufreq_init+0x0/0x127 @ 1 > [ 35.264127] BUG: unable to handle kernel NULL pointer dereference at (null) > [ 35.267554] IP: [] __bitmap_intersects+0x48/0x73 > [ 35.267554] PGD 0 > [ 35.267554] Oops: 0000 [#1] SMP DEBUG_PAGEALLOC arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c is still broken: there's no allocation of the variable mask, so we pass in an uninitialized cmd.mask field to drv_read(), which then passes it to the scheduler which then crashes ... Switch it over to the much simpler constant-cpumask-pointers approach. Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 5bf2c834a236..4b1c319d30c3 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -145,7 +145,7 @@ typedef union { struct drv_cmd { unsigned int type; - cpumask_var_t mask; + const struct cpumask *mask; drv_addr_union addr; u32 val; }; @@ -231,6 +231,7 @@ static u32 get_cur_val(const struct cpumask *mask) return 0; } + cmd.mask = mask; drv_read(&cmd); dprintk("get_cur_val = %u\n", cmd.val); @@ -397,9 +398,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, return -ENODEV; } - if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL))) - return -ENOMEM; - perf = data->acpi_data; result = cpufreq_frequency_table_target(policy, data->freq_table, @@ -444,9 +442,9 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, /* cpufreq holds the hotplug lock, so we are safe from here on */ if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) - cpumask_and(cmd.mask, cpu_online_mask, policy->cpus); + cmd.mask = policy->cpus; else - cpumask_copy(cmd.mask, cpumask_of(policy->cpu)); + cmd.mask = cpumask_of(policy->cpu); freqs.old = perf->states[perf->state].core_frequency * 1000; freqs.new = data->freq_table[next_state].frequency; @@ -473,7 +471,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, perf->state = next_perf_state; out: - free_cpumask_var(cmd.mask); return result; } -- cgit v1.2.3 From f5495506c3c1300d249d403c36f92de71920dbeb Mon Sep 17 00:00:00 2001 From: Gary Hade Date: Mon, 19 Jan 2009 13:46:41 -0800 Subject: x86: remove kernel_physical_mapping_init() from init section Impact: fix crash with memory hotplug enabled kernel_physical_mapping_init() is called during memory hotplug so it does not belong in the init section. If the kernel is built with CONFIG_DEBUG_SECTION_MISMATCH=y on the make command line, arch/x86/mm/init_64.c is compiled with the -fno-inline-functions-called-once gcc option defeating inlining of kernel_physical_mapping_init() within init_memory_mapping(). When kernel_physical_mapping_init() is not inlined it is placed in the .init.text section according to the __init in it's current declaration. A later call to kernel_physical_mapping_init() during a memory hotplug operation encounters an int3 trap because the .init.text section memory has been freed. This patch eliminates the crash caused by the int3 trap by moving the non-inlined kernel_physical_mapping_init() from .init.text to .meminit.text. Signed-off-by: Gary Hade Signed-off-by: Ingo Molnar --- arch/x86/mm/init_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 23f68e77ad1f..e6d36b490250 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -596,7 +596,7 @@ static void __init init_gbpages(void) direct_gbpages = 0; } -static unsigned long __init kernel_physical_mapping_init(unsigned long start, +static unsigned long __meminit kernel_physical_mapping_init(unsigned long start, unsigned long end, unsigned long page_size_mask) { -- cgit v1.2.3 From e0a96129db574d6365e3439d16d88517c437ab33 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 16 Jan 2009 15:22:11 +0100 Subject: x86: use early clobbers in usercopy*.c Impact: fix rare (but currently harmless) miscompile with certain configs and gcc versions Hugh Dickins noticed that strncpy_from_user() was miscompiled in some circumstances with gcc 4.3. Thanks to Hugh's excellent analysis it was easy to track down. Hugh writes: > Try building an x86_64 defconfig 2.6.29-rc1 kernel tree, > except not quite defconfig, switch CONFIG_PREEMPT_NONE=y > and CONFIG_PREEMPT_VOLUNTARY off (because it expands a > might_fault() there, which hides the issue): using a > gcc 4.3.2 (I've checked both openSUSE 11.1 and Fedora 10). > > It generates the following: > > 0000000000000000 <__strncpy_from_user>: > 0: 48 89 d1 mov %rdx,%rcx > 3: 48 85 c9 test %rcx,%rcx > 6: 74 0e je 16 <__strncpy_from_user+0x16> > 8: ac lods %ds:(%rsi),%al > 9: aa stos %al,%es:(%rdi) > a: 84 c0 test %al,%al > c: 74 05 je 13 <__strncpy_from_user+0x13> > e: 48 ff c9 dec %rcx > 11: 75 f5 jne 8 <__strncpy_from_user+0x8> > 13: 48 29 c9 sub %rcx,%rcx > 16: 48 89 c8 mov %rcx,%rax > 19: c3 retq > > Observe that "sub %rcx,%rcx; mov %rcx,%rax", whereas gcc 4.2.1 > (and many other configs) say "sub %rcx,%rdx; mov %rdx,%rax". > Isn't it returning 0 when it ought to be returning strlen? The asm constraints for the strncpy_from_user() result were missing an early clobber, which tells gcc that the last output arguments are written before all input arguments are read. Also add more early clobbers in the rest of the file and fix 32-bit usercopy.c in the same way. Signed-off-by: Andi Kleen Signed-off-by: H. Peter Anvin [ since this API is rarely used and no in-kernel user relies on a 'len' return value (they only rely on negative return values) this miscompile was never noticed in the field. But it's worth fixing it nevertheless. ] Signed-off-by: Ingo Molnar --- arch/x86/lib/usercopy_32.c | 4 ++-- arch/x86/lib/usercopy_64.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 4a20b2f9a381..7c8ca91bb9ec 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -56,7 +56,7 @@ do { \ " jmp 2b\n" \ ".previous\n" \ _ASM_EXTABLE(0b,3b) \ - : "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \ + : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \ "=&D" (__d2) \ : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ : "memory"); \ @@ -218,7 +218,7 @@ long strnlen_user(const char __user *s, long n) " .align 4\n" " .long 0b,2b\n" ".previous" - :"=r" (n), "=D" (s), "=a" (res), "=c" (tmp) + :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp) :"0" (n), "1" (s), "2" (0), "3" (mask) :"cc"); return res & mask; diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 64d6c84e6353..ec13cb5f17ed 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -32,7 +32,7 @@ do { \ " jmp 2b\n" \ ".previous\n" \ _ASM_EXTABLE(0b,3b) \ - : "=r"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \ + : "=&r"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \ "=&D" (__d2) \ : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ : "memory"); \ @@ -86,7 +86,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size) ".previous\n" _ASM_EXTABLE(0b,3b) _ASM_EXTABLE(1b,2b) - : [size8] "=c"(size), [dst] "=&D" (__d0) + : [size8] "=&c"(size), [dst] "=&D" (__d0) : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), [zero] "r" (0UL), [eight] "r" (8UL)); return size; -- cgit v1.2.3 From 552b8aa4d1edcc1c764ff6f61a7686347a2d1827 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 20 Jan 2009 09:31:49 +0100 Subject: Revert "x86: signal: change type of paramter for sys_rt_sigreturn()" This reverts commit 4217458dafaa57d8e26a46f5d05ab8c53cf64191. Justin Madru bisected this commit, it was causing weird Firefox crashes. The reason is that GCC mis-optimizes (re-uses) the on-stack parameters of the calling frame, which corrupts the syscall return pt_regs state and thus corrupts user-space register state. So we go back to the slightly less clean but more optimization-safe method of getting to pt_regs. Also add a comment to explain this. Resolves: http://bugzilla.kernel.org/show_bug.cgi?id=12505 Reported-and-bisected-by: Justin Madru Tested-by: Justin Madru Signed-off-by: Ingo Molnar --- arch/x86/include/asm/syscalls.h | 2 +- arch/x86/kernel/signal.c | 11 +++++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 9c6797c3e56c..c0b0bda754ee 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h @@ -40,7 +40,7 @@ asmlinkage int sys_sigaction(int, const struct old_sigaction __user *, struct old_sigaction __user *); asmlinkage int sys_sigaltstack(unsigned long); asmlinkage unsigned long sys_sigreturn(unsigned long); -asmlinkage int sys_rt_sigreturn(struct pt_regs); +asmlinkage int sys_rt_sigreturn(unsigned long); /* kernel/ioport.c */ asmlinkage long sys_iopl(unsigned long); diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 89bb7668041d..df0587f24c54 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -632,9 +632,16 @@ badframe: } #ifdef CONFIG_X86_32 -asmlinkage int sys_rt_sigreturn(struct pt_regs regs) +/* + * Note: do not pass in pt_regs directly as with tail-call optimization + * GCC will incorrectly stomp on the caller's frame and corrupt user-space + * register state: + */ +asmlinkage int sys_rt_sigreturn(unsigned long __unused) { - return do_rt_sigreturn(®s); + struct pt_regs *regs = (struct pt_regs *)&__unused; + + return do_rt_sigreturn(regs); } #else /* !CONFIG_X86_32 */ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) -- cgit v1.2.3 From a1e46212a410793d575718818e81ddc442a65283 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Tue, 20 Jan 2009 14:20:21 -0800 Subject: x86: fix page attribute corruption with cpa() Impact: fix sporadic slowdowns and warning messages This patch fixes a performance issue reported by Linus on his Nehalem system. While Linus reverted the PAT patch (commit 58dab916dfb57328d50deb0aa9b3fc92efa248ff) which exposed the issue, existing cpa() code can potentially still cause wrong(page attribute corruption) behavior. This patch also fixes the "WARNING: at arch/x86/mm/pageattr.c:560" that various people reported. In 64bit kernel, kernel identity mapping might have holes depending on the available memory and how e820 reports the address range covering the RAM, ACPI, PCI reserved regions. If there is a 2MB/1GB hole in the address range that is not listed by e820 entries, kernel identity mapping will have a corresponding hole in its 1-1 identity mapping. If cpa() happens on the kernel identity mapping which falls into these holes, existing code fails like this: __change_page_attr_set_clr() __change_page_attr() returns 0 because of if (!kpte). But doesn't set cpa->numpages and cpa->pfn. cpa_process_alias() uses uninitialized cpa->pfn (random value) which can potentially lead to changing the page attribute of kernel text/data, kernel identity mapping of RAM pages etc. oops! This bug was easily exposed by another PAT patch which was doing cpa() more often on kernel identity mapping holes (physical range between max_low_pfn_mapped and 4GB), where in here it was setting the cache disable attribute(PCD) for kernel identity mappings aswell. Fix cpa() to handle the kernel identity mapping holes. Retain the WARN() for cpa() calls to other not present address ranges (kernel-text/data, ioremap() addresses) Signed-off-by: Suresh Siddha Signed-off-by: Venkatesh Pallipadi Cc: Signed-off-by: Ingo Molnar --- arch/x86/mm/pageattr.c | 49 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 34 insertions(+), 15 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index e89d24815f26..84ba74820ad6 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -534,6 +534,36 @@ out_unlock: return 0; } +static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, + int primary) +{ + /* + * Ignore all non primary paths. + */ + if (!primary) + return 0; + + /* + * Ignore the NULL PTE for kernel identity mapping, as it is expected + * to have holes. + * Also set numpages to '1' indicating that we processed cpa req for + * one virtual address page and its pfn. TBD: numpages can be set based + * on the initial value and the level returned by lookup_address(). + */ + if (within(vaddr, PAGE_OFFSET, + PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { + cpa->numpages = 1; + cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; + return 0; + } else { + WARN(1, KERN_WARNING "CPA: called for zero pte. " + "vaddr = %lx cpa->vaddr = %lx\n", vaddr, + *cpa->vaddr); + + return -EFAULT; + } +} + static int __change_page_attr(struct cpa_data *cpa, int primary) { unsigned long address; @@ -549,17 +579,11 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) repeat: kpte = lookup_address(address, &level); if (!kpte) - return 0; + return __cpa_process_fault(cpa, address, primary); old_pte = *kpte; - if (!pte_val(old_pte)) { - if (!primary) - return 0; - WARN(1, KERN_WARNING "CPA: called for zero pte. " - "vaddr = %lx cpa->vaddr = %lx\n", address, - *cpa->vaddr); - return -EINVAL; - } + if (!pte_val(old_pte)) + return __cpa_process_fault(cpa, address, primary); if (level == PG_LEVEL_4K) { pte_t new_pte; @@ -657,12 +681,7 @@ static int cpa_process_alias(struct cpa_data *cpa) vaddr = *cpa->vaddr; if (!(within(vaddr, PAGE_OFFSET, - PAGE_OFFSET + (max_low_pfn_mapped << PAGE_SHIFT)) -#ifdef CONFIG_X86_64 - || within(vaddr, PAGE_OFFSET + (1UL<<32), - PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)) -#endif - )) { + PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { alias_cpa = *cpa; temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); -- cgit v1.2.3 From 731f1872f4e8a0f1eabd49c3548207e79a421202 Mon Sep 17 00:00:00 2001 From: Thomas Renninger Date: Tue, 20 Jan 2009 10:37:39 +0100 Subject: x86: mtrr fix debug boot parameter while looking at: http://bugzilla.kernel.org/show_bug.cgi?id=11541 I realized that the mtrr.show param cannot work, because the code is processed much too early. This patch: - Declares mtrr.show as early_param - Stays consistent with the previous param (which I doubt that it ever worked), so mtrr.show=1 would still work - Declares mtrr_show as initdata Signed-off-by: Thomas Renninger Acked-by: Jan Beulich Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mtrr/generic.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index b59ddcc88cd8..0c0a455fe95c 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -33,11 +33,13 @@ u64 mtrr_tom2; struct mtrr_state_type mtrr_state = {}; EXPORT_SYMBOL_GPL(mtrr_state); -#undef MODULE_PARAM_PREFIX -#define MODULE_PARAM_PREFIX "mtrr." - -static int mtrr_show; -module_param_named(show, mtrr_show, bool, 0); +static int __initdata mtrr_show; +static int __init mtrr_debug(char *opt) +{ + mtrr_show = 1; + return 0; +} +early_param("mtrr.show", mtrr_debug); /* * Returns the effective MTRR type for the region -- cgit v1.2.3 From 9597134218300c045cf219be3664615e97cb239c Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Tue, 13 Jan 2009 10:21:30 -0800 Subject: x86: fix PTE corruption issue while mapping RAM using /dev/mem Beschorner Daniel reported: > hwinfo problem since 2.6.28, showing this in the oops: > Corrupted page table at address 7fd04de3ec00 Also, PaX Team reported a regression with this commit: > commit 9542ada803198e6eba29d3289abb39ea82047b92 > Author: Suresh Siddha > Date: Wed Sep 24 08:53:33 2008 -0700 > > x86: track memtype for RAM in page struct This commit breaks mapping any RAM page through /dev/mem, as the reserve_memtype() was not initializing the return attribute type and as such corrupting the PTE entry that was setup with the return attribute type. Because of this bug, application mapping this RAM page through /dev/mem will die with "Corrupted page table at address xxxx" message in the kernel log and also the kernel identity mapping which maps the underlying RAM page gets converted to UC. Fix this by initializing the return attribute type before calling reserve_ram_pages_type() Reported-by: PaX Team Reported-and-tested-by: Beschorner Daniel Tested-and-Acked-by: PaX Team Signed-off-by: Suresh Siddha Signed-off-by: Venkatesh Pallipadi Signed-off-by: Ingo Molnar --- arch/x86/mm/pat.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 070ee4a3b225..ffc88cc00fda 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -333,6 +333,9 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, req_type & _PAGE_CACHE_MASK); } + if (new_type) + *new_type = actual_type; + /* * For legacy reasons, some parts of the physical address range in the * legacy 1MB region is treated as non-RAM (even when listed as RAM in @@ -356,9 +359,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, new->end = end; new->type = actual_type; - if (new_type) - *new_type = actual_type; - spin_lock(&memtype_lock); if (cached_entry && start >= cached_start) -- cgit v1.2.3 From bdf21a49bab28f0d9613e8d8724ef9c9168b61b9 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Wed, 21 Jan 2009 15:01:56 -0800 Subject: x86: add MSR_IA32_MISC_ENABLE bits to Impact: None (new bit definitions currently unused) Add bit definitions for the MSR_IA32_MISC_ENABLE MSRs to . Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/msr-index.h | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index cb58643947b9..358acc59ae04 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -202,6 +202,35 @@ #define MSR_IA32_THERM_STATUS 0x0000019c #define MSR_IA32_MISC_ENABLE 0x000001a0 +/* MISC_ENABLE bits: architectural */ +#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0) +#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1) +#define MSR_IA32_MISC_ENABLE_EMON (1ULL << 7) +#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << 11) +#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << 12) +#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << 16) +#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18) +#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << 22) +#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << 23) +#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << 34) + +/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */ +#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << 2) +#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << 3) +#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << 4) +#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << 6) +#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << 8) +#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << 9) +#define MSR_IA32_MISC_ENABLE_FERR (1ULL << 10) +#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << 10) +#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << 13) +#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << 19) +#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << 20) +#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << 24) +#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << 37) +#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << 38) +#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << 39) + /* Intel Model 6 */ #define MSR_P6_EVNTSEL0 0x00000186 #define MSR_P6_EVNTSEL1 0x00000187 -- cgit v1.2.3 From 066941bd4eeb159307a5d7d795100d0887c00442 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Wed, 21 Jan 2009 15:04:32 -0800 Subject: x86: unmask CPUID levels on Intel CPUs Impact: Fixes crashes with misconfigured BIOSes on XSAVE hardware Avuton Olrich reported early boot crashes with v2.6.28 and bisected it down to dc1e35c6e95e8923cf1d3510438b63c600fee1e2 ("x86, xsave: enable xsave/xrstor on cpus with xsave support"). If the CPUID limit bit in MSR_IA32_MISC_ENABLE is set, clear it to make all CPUID information available. This is required for some features to work, in particular XSAVE. Reported-and-bisected-by: Avuton Olrich Tested-by: Avuton Olrich Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/intel.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 8ea6929e974c..43c1dcf0bec7 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -29,6 +29,16 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) { + u64 misc_enable; + + /* Unmask CPUID levels if masked */ + if (!rdmsrl_safe(MSR_IA32_MISC_ENABLE, &misc_enable) && + (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID)) { + misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; + wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); + c->cpuid_level = cpuid_eax(0); + } + if ((c->x86 == 0xf && c->x86_model >= 0x03) || (c->x86 == 0x6 && c->x86_model >= 0x0e)) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); -- cgit v1.2.3 From 336f6c322d87806ef93afad6308ac65083a865e5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 22 Jan 2009 09:50:44 +0100 Subject: debugobjects: add and use INIT_WORK_ON_STACK Impact: Fix debugobjects warning debugobject enabled kernels spit out a warning in hpet code due to a workqueue which is initialized on stack. Add INIT_WORK_ON_STACK() which calls init_timer_on_stack() and use it in hpet. Signed-off-by: Thomas Gleixner --- arch/x86/kernel/hpet.c | 3 ++- include/linux/workqueue.h | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index cd759ad90690..64d5ad0b8add 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -628,11 +628,12 @@ static int hpet_cpuhp_notify(struct notifier_block *n, switch (action & 0xf) { case CPU_ONLINE: - INIT_DELAYED_WORK(&work.work, hpet_work); + INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work); init_completion(&work.complete); /* FIXME: add schedule_work_on() */ schedule_delayed_work_on(cpu, &work.work, 0); wait_for_completion(&work.complete); + destroy_timer_on_stack(&work.work.timer); break; case CPU_DEAD: if (hdev) { diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index b36291130f22..20b59eb1facd 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -124,6 +124,12 @@ struct execute_work { init_timer_deferrable(&(_work)->timer); \ } while (0) +#define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ + do { \ + INIT_WORK(&(_work)->work, (_func)); \ + init_timer_on_stack(&(_work)->timer); \ + } while (0) + /** * work_pending - Find out whether a work item is currently pending * @work: The work item in question -- cgit v1.2.3 From ba2607fe9c1f2d4ad5a3d4c4ae9117c5bfdca826 Mon Sep 17 00:00:00 2001 From: Markus Metzger Date: Mon, 19 Jan 2009 10:38:35 +0100 Subject: x86, ds, bts: cleanup/fix DS configuration Cleanup the cpuid check for DS configuration. This also fixes a Corei7 CPUID enumeration bug. Signed-off-by: Markus Metzger Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- arch/x86/kernel/ds.c | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index da91701a2348..169a120587be 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c @@ -15,8 +15,8 @@ * - buffer allocation (memory accounting) * * - * Copyright (C) 2007-2008 Intel Corporation. - * Markus Metzger , 2007-2008 + * Copyright (C) 2007-2009 Intel Corporation. + * Markus Metzger , 2007-2009 */ @@ -890,7 +890,7 @@ int ds_set_pebs_reset(struct pebs_tracer *tracer, u64 value) } static const struct ds_configuration ds_cfg_netburst = { - .name = "netburst", + .name = "Netburst", .ctl[dsf_bts] = (1 << 2) | (1 << 3), .ctl[dsf_bts_kernel] = (1 << 5), .ctl[dsf_bts_user] = (1 << 6), @@ -904,7 +904,7 @@ static const struct ds_configuration ds_cfg_netburst = { #endif }; static const struct ds_configuration ds_cfg_pentium_m = { - .name = "pentium m", + .name = "Pentium M", .ctl[dsf_bts] = (1 << 6) | (1 << 7), .sizeof_field = sizeof(long), @@ -915,8 +915,8 @@ static const struct ds_configuration ds_cfg_pentium_m = { .sizeof_rec[ds_pebs] = sizeof(long) * 18, #endif }; -static const struct ds_configuration ds_cfg_core2 = { - .name = "core 2", +static const struct ds_configuration ds_cfg_core2_atom = { + .name = "Core 2/Atom", .ctl[dsf_bts] = (1 << 6) | (1 << 7), .ctl[dsf_bts_kernel] = (1 << 9), .ctl[dsf_bts_user] = (1 << 10), @@ -949,19 +949,22 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c) switch (c->x86) { case 0x6: switch (c->x86_model) { - case 0 ... 0xC: - /* sorry, don't know about them */ - break; - case 0xD: - case 0xE: /* Pentium M */ + case 0x9: + case 0xd: /* Pentium M */ ds_configure(&ds_cfg_pentium_m); break; - default: /* Core2, Atom, ... */ - ds_configure(&ds_cfg_core2); + case 0xf: + case 0x17: /* Core2 */ + case 0x1c: /* Atom */ + ds_configure(&ds_cfg_core2_atom); + break; + case 0x1a: /* i7 */ + default: + /* sorry, don't know about them */ break; } break; - case 0xF: + case 0xf: switch (c->x86_model) { case 0x0: case 0x1: -- cgit v1.2.3 From 42ef73fe134732b2e91c0326df5fd568da17c4b2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 23 Jan 2009 17:37:49 +0100 Subject: x86, mm: fix pte_free() On -rt we were seeing spurious bad page states like: Bad page state in process 'firefox' page:c1bc2380 flags:0x40000000 mapping:c1bc2390 mapcount:0 count:0 Trying to fix it up, but a reboot is needed Backtrace: Pid: 503, comm: firefox Not tainted 2.6.26.8-rt13 #3 [] ? printk+0x14/0x19 [] bad_page+0x4e/0x79 [] free_hot_cold_page+0x5b/0x1d3 [] free_hot_page+0xf/0x11 [] __free_pages+0x20/0x2b [] __pte_alloc+0x87/0x91 [] handle_mm_fault+0xe4/0x733 [] ? rt_mutex_down_read_trylock+0x57/0x63 [] ? rt_mutex_down_read_trylock+0x57/0x63 [] do_page_fault+0x36f/0x88a This is the case where a concurrent fault already installed the PTE and we get to free the newly allocated one. This is due to pgtable_page_ctor() doing the spin_lock_init(&page->ptl) which is overlaid with the {private, mapping} struct. union { struct { unsigned long private; struct address_space *mapping; }; spinlock_t ptl; struct kmem_cache *slab; struct page *first_page; }; Normally the spinlock is small enough to not stomp on page->mapping, but PREEMPT_RT=y has huge 'spin'locks. But lockdep kernels should also be able to trigger this splat, as the lock tracking code grows the spinlock to cover page->mapping. The obvious fix is calling pgtable_page_dtor() like the regular pte free path __pte_free_tlb() does. It seems all architectures except x86 and nm10300 already do this, and nm10300 doesn't seem to use pgtable_page_ctor(), which suggests it doesn't do SMP or simply doesnt do MMU at all or something. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar Cc: --- arch/x86/include/asm/pgalloc.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index cb7c151a8bff..dd14c54ac718 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -42,6 +42,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) static inline void pte_free(struct mm_struct *mm, struct page *pte) { + pgtable_page_dtor(pte); __free_page(pte); } -- cgit v1.2.3 From e1b4d1143651fb3838be1117785b6e0386fa151f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 25 Jan 2009 16:57:00 +0100 Subject: x86: use standard PIT frequency the RDC and ELAN platforms use slighly different PIT clocks, resulting in a timex.h hack that changes PIT_TICK_RATE during build time. But if a tester enables any of these platform support .config options, the PIT will be miscalibrated on standard PC platforms. So use one frequency - in a subsequent patch we'll add a quirk to allow x86 platforms to define different PIT frequencies. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/timex.h | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/timex.h b/arch/x86/include/asm/timex.h index 1287dc1347d6..b5c9d45c981f 100644 --- a/arch/x86/include/asm/timex.h +++ b/arch/x86/include/asm/timex.h @@ -1,18 +1,13 @@ -/* x86 architecture timex specifications */ #ifndef _ASM_X86_TIMEX_H #define _ASM_X86_TIMEX_H #include #include -#ifdef CONFIG_X86_ELAN -# define PIT_TICK_RATE 1189200 /* AMD Elan has different frequency! */ -#elif defined(CONFIG_X86_RDC321X) -# define PIT_TICK_RATE 1041667 /* Underlying HZ for R8610 */ -#else -# define PIT_TICK_RATE 1193182 /* Underlying HZ */ -#endif -#define CLOCK_TICK_RATE PIT_TICK_RATE +/* The PIT ticks at this frequency (in HZ): */ +#define PIT_TICK_RATE 1193182 + +#define CLOCK_TICK_RATE PIT_TICK_RATE #define ARCH_HAS_READ_CURRENT_TIMER -- cgit v1.2.3 From ef5fa0ab24b87646c7bc98645acbb4b51fc2acd4 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Fri, 23 Jan 2009 14:14:21 -0800 Subject: x86: work around PAGE_KERNEL_WC not getting WC in iomap_atomic_prot_pfn. In the absence of PAT, PAGE_KERNEL_WC ends up mapping to a memory type that gets UC behavior even in the presence of a WC MTRR covering the area in question. By swapping to PAGE_KERNEL_UC_MINUS, we can get the actual behavior the caller wanted (WC if you can manage it, UC otherwise). This recovers the 40% performance improvement of using WC in the DRM to upload vertex data. Signed-off-by: Eric Anholt Signed-off-by: H. Peter Anvin --- arch/x86/mm/iomap_32.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index d0151d8ce452..ca53224fc56c 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -17,6 +17,7 @@ */ #include +#include #include /* Map 'pfn' using fixed map 'type' and protections 'prot' @@ -29,6 +30,15 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) pagefault_disable(); + /* + * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS. + * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the + * MTRR is UC or WC. UC_MINUS gets the real intention, of the + * user, which is "WC if the MTRR is WC, UC if you can't do that." + */ + if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) + prot = PAGE_KERNEL_UC_MINUS; + idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte(kmap_pte-idx, pfn_pte(pfn, prot)); -- cgit v1.2.3 From 99fb4d349db7e7dacb2099c5cc320a9e2d31c1ef Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 26 Jan 2009 04:30:41 +0100 Subject: x86: unmask CPUID levels on Intel CPUs, fix Impact: fix boot hang on pre-model-15 Intel CPUs rdmsrl_safe() does not work in very early bootup code yet, because we dont have the pagefault handler installed yet so exception section does not get parsed. rdmsr_safe() will just crash and hang the bootup. So limit the MSR_IA32_MISC_ENABLE MSR read to those CPU types that support it. Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/intel.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 43c1dcf0bec7..549f2ada55f5 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -29,14 +29,17 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) { - u64 misc_enable; - - /* Unmask CPUID levels if masked */ - if (!rdmsrl_safe(MSR_IA32_MISC_ENABLE, &misc_enable) && - (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID)) { - misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; - wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); - c->cpuid_level = cpuid_eax(0); + /* Unmask CPUID levels if masked: */ + if (c->x86 == 6 && c->x86_model >= 15) { + u64 misc_enable; + + rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); + + if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { + misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; + wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); + c->cpuid_level = cpuid_eax(0); + } } if ((c->x86 == 0xf && c->x86_model >= 0x03) || -- cgit v1.2.3 From 659d2618b38f8a4d91bdb19cfc5c7fb330a4c55a Mon Sep 17 00:00:00 2001 From: Rakib Mullick Date: Sat, 24 Jan 2009 01:46:03 +0600 Subject: x86: fix section mismatch warning Here function vmi_activate calls a init function activate_vmi , which causes the following section mismatch warnings: LD arch/x86/kernel/built-in.o WARNING: arch/x86/kernel/built-in.o(.text+0x13ba9): Section mismatch in reference from the function vmi_activate() to the function .init.text:vmi_time_init() The function vmi_activate() references the function __init vmi_time_init(). This is often because vmi_activate lacks a __init annotation or the annotation of vmi_time_init is wrong. WARNING: arch/x86/kernel/built-in.o(.text+0x13bd1): Section mismatch in reference from the function vmi_activate() to the function .devinit.text:vmi_time_bsp_init() The function vmi_activate() references the function __devinit vmi_time_bsp_init(). This is often because vmi_activate lacks a __devinit annotation or the annotation of vmi_time_bsp_init is wrong. WARNING: arch/x86/kernel/built-in.o(.text+0x13bdb): Section mismatch in reference from the function vmi_activate() to the function .devinit.text:vmi_time_ap_init() The function vmi_activate() references the function __devinit vmi_time_ap_init(). This is often because vmi_activate lacks a __devinit annotation or the annotation of vmi_time_ap_init is wrong. Fix it by marking vmi_activate() as __init too. Signed-off-by: Rakib Mullick Signed-off-by: Ingo Molnar --- arch/x86/kernel/vmi_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 23206ba16874..1d3302cc2ddf 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c @@ -858,7 +858,7 @@ void __init vmi_init(void) #endif } -void vmi_activate(void) +void __init vmi_activate(void) { unsigned long flags; -- cgit v1.2.3 From 30a0fb947a68ad3ab8a7184e3b3d79dce10e3688 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Mon, 26 Jan 2009 09:40:58 -0800 Subject: x86: correct the CPUID pattern for MSR_IA32_MISC_ENABLE availability Impact: re-enable CPUID unmasking on affected processors As far as I am capable of discerning from the documentation, MSR_IA32_MISC_ENABLE should be available for all family 0xf CPUs, as well as family 6 for model >= 0xd (newer Pentium M). The documentation on this isn't ideal, so we need to be on the lookout for errors, still. Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/intel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 549f2ada55f5..430e5c38a544 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -30,7 +30,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) { /* Unmask CPUID levels if masked: */ - if (c->x86 == 6 && c->x86_model >= 15) { + if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { u64 misc_enable; rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); -- cgit v1.2.3 From dd7f8dbe2b3c0611ba969cd867c10cb63d163e25 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Mon, 26 Jan 2009 21:19:57 +0100 Subject: eeprom: More consistent symbol names Now that all EEPROM drivers live in the same place, let's harmonize their symbol names. Also fix eeprom's dependencies, it definitely needs sysfs, and is no longer experimental after many years in the kernel tree. Signed-off-by: Jean Delvare Acked-by: Wolfram Sang Cc: David Brownell --- arch/arm/configs/afeb9260_defconfig | 6 +++--- arch/arm/configs/ams_delta_defconfig | 2 +- arch/arm/configs/at91cap9adk_defconfig | 4 ++-- arch/arm/configs/at91rm9200dk_defconfig | 2 +- arch/arm/configs/at91rm9200ek_defconfig | 2 +- arch/arm/configs/at91sam9260ek_defconfig | 2 +- arch/arm/configs/at91sam9261ek_defconfig | 4 ++-- arch/arm/configs/at91sam9263ek_defconfig | 4 ++-- arch/arm/configs/at91sam9g20ek_defconfig | 2 +- arch/arm/configs/at91sam9rlek_defconfig | 4 ++-- arch/arm/configs/ateb9200_defconfig | 2 +- arch/arm/configs/badge4_defconfig | 2 +- arch/arm/configs/cam60_defconfig | 4 ++-- arch/arm/configs/cm_x300_defconfig | 4 ++-- arch/arm/configs/colibri_defconfig | 2 +- arch/arm/configs/corgi_defconfig | 6 +++--- arch/arm/configs/csb337_defconfig | 2 +- arch/arm/configs/csb637_defconfig | 2 +- arch/arm/configs/ecbat91_defconfig | 4 ++-- arch/arm/configs/ep93xx_defconfig | 2 +- arch/arm/configs/ezx_defconfig | 4 ++-- arch/arm/configs/footbridge_defconfig | 2 +- arch/arm/configs/iop13xx_defconfig | 2 +- arch/arm/configs/iop32x_defconfig | 2 +- arch/arm/configs/iop33x_defconfig | 2 +- arch/arm/configs/ixp2000_defconfig | 2 +- arch/arm/configs/ixp23xx_defconfig | 2 +- arch/arm/configs/ixp4xx_defconfig | 2 +- arch/arm/configs/kafa_defconfig | 2 +- arch/arm/configs/kirkwood_defconfig | 6 +++--- arch/arm/configs/loki_defconfig | 4 ++-- arch/arm/configs/magician_defconfig | 2 +- arch/arm/configs/msm_defconfig | 2 +- arch/arm/configs/mv78xx0_defconfig | 2 +- arch/arm/configs/n770_defconfig | 4 ++-- arch/arm/configs/neocore926_defconfig | 6 +++--- arch/arm/configs/neponset_defconfig | 2 +- arch/arm/configs/omap3_beagle_defconfig | 4 ++-- arch/arm/configs/omap3_pandora_defconfig | 6 +++--- arch/arm/configs/omap_2430sdp_defconfig | 4 ++-- arch/arm/configs/omap_apollon_2420_defconfig | 2 +- arch/arm/configs/omap_generic_1510_defconfig | 2 +- arch/arm/configs/omap_generic_1610_defconfig | 2 +- arch/arm/configs/omap_h2_1610_defconfig | 4 ++-- arch/arm/configs/omap_h4_2420_defconfig | 2 +- arch/arm/configs/omap_innovator_1510_defconfig | 2 +- arch/arm/configs/omap_ldp_defconfig | 6 +++--- arch/arm/configs/omap_osk_5912_defconfig | 2 +- arch/arm/configs/onearm_defconfig | 2 +- arch/arm/configs/orion5x_defconfig | 4 ++-- arch/arm/configs/overo_defconfig | 6 +++--- arch/arm/configs/palmz71_defconfig | 2 +- arch/arm/configs/palmz72_defconfig | 6 +++--- arch/arm/configs/pcm027_defconfig | 2 +- arch/arm/configs/pcm038_defconfig | 4 ++-- arch/arm/configs/picotux200_defconfig | 2 +- arch/arm/configs/pnx4008_defconfig | 2 +- arch/arm/configs/qil-a9260_defconfig | 4 ++-- arch/arm/configs/rpc_defconfig | 2 +- arch/arm/configs/s3c2410_defconfig | 4 ++-- arch/arm/configs/s3c6400_defconfig | 4 ++-- arch/arm/configs/spitz_defconfig | 6 +++--- arch/arm/configs/sx1_defconfig | 2 +- arch/arm/configs/trizeps4_defconfig | 2 +- arch/arm/configs/usb-a9260_defconfig | 2 +- arch/arm/configs/usb-a9263_defconfig | 2 +- arch/arm/configs/versatile_defconfig | 2 +- arch/arm/configs/viper_defconfig | 4 ++-- arch/arm/configs/xm_x2xx_defconfig | 4 ++-- arch/arm/configs/yl9200_defconfig | 2 +- arch/avr32/configs/atngw100_defconfig | 6 +++--- arch/avr32/configs/atngw100_evklcd100_defconfig | 4 ++-- arch/avr32/configs/atngw100_evklcd101_defconfig | 4 ++-- arch/avr32/configs/atstk1002_defconfig | 6 +++--- arch/avr32/configs/atstk1003_defconfig | 6 +++--- arch/avr32/configs/atstk1004_defconfig | 2 +- arch/avr32/configs/atstk1006_defconfig | 6 +++--- arch/avr32/configs/favr-32_defconfig | 6 +++--- arch/avr32/configs/hammerhead_defconfig | 6 +++--- arch/avr32/configs/mimc200_defconfig | 6 +++--- arch/blackfin/configs/BF518F-EZBRD_defconfig | 4 ++-- arch/blackfin/configs/BF526-EZBRD_defconfig | 6 +++--- arch/blackfin/configs/BF527-EZKIT_defconfig | 6 +++--- arch/blackfin/configs/BF533-EZKIT_defconfig | 2 +- arch/blackfin/configs/BF533-STAMP_defconfig | 6 +++--- arch/blackfin/configs/BF537-STAMP_defconfig | 6 +++--- arch/blackfin/configs/BF538-EZKIT_defconfig | 6 +++--- arch/blackfin/configs/BF548-EZKIT_defconfig | 6 +++--- arch/blackfin/configs/BF561-EZKIT_defconfig | 2 +- arch/blackfin/configs/BlackStamp_defconfig | 4 ++-- arch/blackfin/configs/CM-BF527_defconfig | 4 ++-- arch/blackfin/configs/CM-BF548_defconfig | 4 ++-- arch/blackfin/configs/H8606_defconfig | 2 +- arch/blackfin/configs/IP0X_defconfig | 2 +- arch/blackfin/configs/PNAV-10_defconfig | 6 +++--- arch/blackfin/configs/SRV1_defconfig | 4 ++-- arch/blackfin/configs/TCM-BF537_defconfig | 2 +- arch/ia64/configs/bigsur_defconfig | 2 +- arch/ia64/configs/zx1_defconfig | 2 +- arch/m32r/configs/m32104ut_defconfig | 2 +- arch/mips/configs/bigsur_defconfig | 2 +- arch/mips/configs/emma2rh_defconfig | 2 +- arch/mips/configs/fulong_defconfig | 4 ++-- arch/mips/configs/msp71xx_defconfig | 2 +- arch/mips/configs/mtx1_defconfig | 4 ++-- arch/mips/configs/pnx8335-stb225_defconfig | 4 ++-- arch/mips/configs/rbtx49xx_defconfig | 2 +- arch/powerpc/configs/44x/sam440ep_defconfig | 4 ++-- arch/powerpc/configs/44x/warp_defconfig | 4 ++-- arch/powerpc/configs/52xx/cm5200_defconfig | 4 ++-- arch/powerpc/configs/52xx/lite5200b_defconfig | 4 ++-- arch/powerpc/configs/52xx/motionpro_defconfig | 4 ++-- arch/powerpc/configs/52xx/pcm030_defconfig | 4 ++-- arch/powerpc/configs/52xx/tqm5200_defconfig | 4 ++-- arch/powerpc/configs/83xx/asp8347_defconfig | 4 ++-- arch/powerpc/configs/83xx/mpc8313_rdb_defconfig | 6 +++--- arch/powerpc/configs/83xx/mpc8315_rdb_defconfig | 6 +++--- arch/powerpc/configs/83xx/mpc832x_mds_defconfig | 4 ++-- arch/powerpc/configs/83xx/mpc832x_rdb_defconfig | 6 +++--- arch/powerpc/configs/83xx/mpc834x_itx_defconfig | 6 +++--- arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig | 6 +++--- arch/powerpc/configs/83xx/mpc834x_mds_defconfig | 4 ++-- arch/powerpc/configs/83xx/mpc836x_mds_defconfig | 4 ++-- arch/powerpc/configs/83xx/mpc836x_rdk_defconfig | 6 +++--- arch/powerpc/configs/83xx/mpc837x_mds_defconfig | 4 ++-- arch/powerpc/configs/83xx/mpc837x_rdb_defconfig | 4 ++-- arch/powerpc/configs/83xx/sbc834x_defconfig | 4 ++-- arch/powerpc/configs/85xx/mpc8536_ds_defconfig | 4 ++-- arch/powerpc/configs/85xx/mpc8544_ds_defconfig | 4 ++-- arch/powerpc/configs/85xx/mpc8568mds_defconfig | 4 ++-- arch/powerpc/configs/85xx/mpc8572_ds_defconfig | 4 ++-- arch/powerpc/configs/85xx/stx_gp3_defconfig | 4 ++-- arch/powerpc/configs/85xx/tqm8540_defconfig | 4 ++-- arch/powerpc/configs/85xx/tqm8541_defconfig | 4 ++-- arch/powerpc/configs/85xx/tqm8548_defconfig | 4 ++-- arch/powerpc/configs/85xx/tqm8555_defconfig | 4 ++-- arch/powerpc/configs/85xx/tqm8560_defconfig | 4 ++-- arch/powerpc/configs/86xx/gef_sbc610_defconfig | 4 ++-- arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig | 4 ++-- arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig | 4 ++-- arch/powerpc/configs/86xx/sbc8641d_defconfig | 4 ++-- arch/powerpc/configs/c2k_defconfig | 4 ++-- arch/powerpc/configs/cell_defconfig | 2 +- arch/powerpc/configs/celleb_defconfig | 2 +- arch/powerpc/configs/chrp32_defconfig | 4 ++-- arch/powerpc/configs/g5_defconfig | 4 ++-- arch/powerpc/configs/linkstation_defconfig | 4 ++-- arch/powerpc/configs/maple_defconfig | 4 ++-- arch/powerpc/configs/mpc5200_defconfig | 4 ++-- arch/powerpc/configs/mpc83xx_defconfig | 4 ++-- arch/powerpc/configs/mpc85xx_defconfig | 4 ++-- arch/powerpc/configs/mpc86xx_defconfig | 4 ++-- arch/powerpc/configs/pasemi_defconfig | 2 +- arch/powerpc/configs/pmac32_defconfig | 4 ++-- arch/powerpc/configs/ppc40x_defconfig | 4 ++-- arch/powerpc/configs/ppc44x_defconfig | 4 ++-- arch/powerpc/configs/ppc64_defconfig | 4 ++-- arch/powerpc/configs/ppc6xx_defconfig | 4 ++-- arch/powerpc/configs/prpmc2800_defconfig | 4 ++-- arch/powerpc/configs/pseries_defconfig | 4 ++-- arch/powerpc/configs/storcenter_defconfig | 4 ++-- arch/sh/configs/cayman_defconfig | 4 ++-- arch/sh/configs/edosk7760_defconfig | 4 ++-- arch/sh/configs/migor_defconfig | 4 ++-- arch/sh/configs/r7780mp_defconfig | 4 ++-- arch/sh/configs/r7785rp_defconfig | 4 ++-- arch/sh/configs/rts7751r2d1_defconfig | 2 +- arch/sh/configs/rts7751r2dplus_defconfig | 2 +- arch/sh/configs/se7343_defconfig | 4 ++-- arch/sh/configs/sh7785lcr_defconfig | 4 ++-- arch/sh/configs/shx3_defconfig | 6 +++--- arch/sparc/configs/sparc64_defconfig | 4 ++-- arch/x86/configs/i386_defconfig | 4 ++-- arch/x86/configs/x86_64_defconfig | 4 ++-- drivers/misc/eeprom/Kconfig | 8 ++++---- drivers/misc/eeprom/Makefile | 6 +++--- 176 files changed, 326 insertions(+), 326 deletions(-) (limited to 'arch/x86') diff --git a/arch/arm/configs/afeb9260_defconfig b/arch/arm/configs/afeb9260_defconfig index ce909586a34f..f7a272cb3da0 100644 --- a/arch/arm/configs/afeb9260_defconfig +++ b/arch/arm/configs/afeb9260_defconfig @@ -719,8 +719,8 @@ CONFIG_I2C_GPIO=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -CONFIG_AT24=y -# CONFIG_SENSORS_EEPROM is not set +CONFIG_EEPROM_AT24=y +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -744,7 +744,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set CONFIG_SPI_SPIDEV=y # CONFIG_SPI_TLE62X0 is not set # CONFIG_W1 is not set diff --git a/arch/arm/configs/ams_delta_defconfig b/arch/arm/configs/ams_delta_defconfig index 2c4aa11f0b0d..764732529ea3 100644 --- a/arch/arm/configs/ams_delta_defconfig +++ b/arch/arm/configs/ams_delta_defconfig @@ -767,7 +767,7 @@ CONFIG_I2C_OMAP=y # # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/at91cap9adk_defconfig b/arch/arm/configs/at91cap9adk_defconfig index bf97801a1068..bc6bd9f6174d 100644 --- a/arch/arm/configs/at91cap9adk_defconfig +++ b/arch/arm/configs/at91cap9adk_defconfig @@ -676,7 +676,7 @@ CONFIG_I2C_CHARDEV=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set @@ -703,7 +703,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set # CONFIG_W1 is not set diff --git a/arch/arm/configs/at91rm9200dk_defconfig b/arch/arm/configs/at91rm9200dk_defconfig index 868fb7b9530b..238b218394e3 100644 --- a/arch/arm/configs/at91rm9200dk_defconfig +++ b/arch/arm/configs/at91rm9200dk_defconfig @@ -636,7 +636,7 @@ CONFIG_I2C_GPIO=y # # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/at91rm9200ek_defconfig b/arch/arm/configs/at91rm9200ek_defconfig index de43fc675616..9f7a99ace514 100644 --- a/arch/arm/configs/at91rm9200ek_defconfig +++ b/arch/arm/configs/at91rm9200ek_defconfig @@ -610,7 +610,7 @@ CONFIG_I2C_GPIO=y # # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/at91sam9260ek_defconfig b/arch/arm/configs/at91sam9260ek_defconfig index 38e6a0abeb4e..e0ee7060f9aa 100644 --- a/arch/arm/configs/at91sam9260ek_defconfig +++ b/arch/arm/configs/at91sam9260ek_defconfig @@ -582,7 +582,7 @@ CONFIG_I2C_GPIO=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/at91sam9261ek_defconfig b/arch/arm/configs/at91sam9261ek_defconfig index 93b779f94b41..01d1ef97d8be 100644 --- a/arch/arm/configs/at91sam9261ek_defconfig +++ b/arch/arm/configs/at91sam9261ek_defconfig @@ -660,7 +660,7 @@ CONFIG_I2C_GPIO=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set @@ -687,7 +687,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set # CONFIG_W1 is not set diff --git a/arch/arm/configs/at91sam9263ek_defconfig b/arch/arm/configs/at91sam9263ek_defconfig index a7ddd94363ca..036a126725c1 100644 --- a/arch/arm/configs/at91sam9263ek_defconfig +++ b/arch/arm/configs/at91sam9263ek_defconfig @@ -670,7 +670,7 @@ CONFIG_I2C_GPIO=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set @@ -697,7 +697,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set # CONFIG_W1 is not set diff --git a/arch/arm/configs/at91sam9g20ek_defconfig b/arch/arm/configs/at91sam9g20ek_defconfig index df0d6ee672b3..7e018a04c31b 100644 --- a/arch/arm/configs/at91sam9g20ek_defconfig +++ b/arch/arm/configs/at91sam9g20ek_defconfig @@ -665,7 +665,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set CONFIG_SPI_SPIDEV=y # CONFIG_SPI_TLE62X0 is not set # CONFIG_W1 is not set diff --git a/arch/arm/configs/at91sam9rlek_defconfig b/arch/arm/configs/at91sam9rlek_defconfig index 811bebbdc784..237a2a6a8517 100644 --- a/arch/arm/configs/at91sam9rlek_defconfig +++ b/arch/arm/configs/at91sam9rlek_defconfig @@ -566,7 +566,7 @@ CONFIG_I2C_GPIO=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set @@ -593,7 +593,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set # CONFIG_W1 is not set diff --git a/arch/arm/configs/ateb9200_defconfig b/arch/arm/configs/ateb9200_defconfig index 85c80f723d8e..a19e824cf7f8 100644 --- a/arch/arm/configs/ateb9200_defconfig +++ b/arch/arm/configs/ateb9200_defconfig @@ -723,7 +723,7 @@ CONFIG_I2C_GPIO=m # # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/badge4_defconfig b/arch/arm/configs/badge4_defconfig index b2bbf217c707..80222feb7dad 100644 --- a/arch/arm/configs/badge4_defconfig +++ b/arch/arm/configs/badge4_defconfig @@ -750,7 +750,7 @@ CONFIG_I2C_ELEKTOR=m # Other I2C Chip support # # CONFIG_SENSORS_DS1337 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCF8591 is not set # CONFIG_SENSORS_RTC8564 is not set diff --git a/arch/arm/configs/cam60_defconfig b/arch/arm/configs/cam60_defconfig index f945105d6cd6..8448108347cf 100644 --- a/arch/arm/configs/cam60_defconfig +++ b/arch/arm/configs/cam60_defconfig @@ -722,7 +722,7 @@ CONFIG_I2C_ALGOBIT=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set @@ -749,7 +749,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set # CONFIG_W1 is not set diff --git a/arch/arm/configs/cm_x300_defconfig b/arch/arm/configs/cm_x300_defconfig index 46f1c9dc350c..227da0843ead 100644 --- a/arch/arm/configs/cm_x300_defconfig +++ b/arch/arm/configs/cm_x300_defconfig @@ -763,8 +763,8 @@ CONFIG_I2C_PXA=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/colibri_defconfig b/arch/arm/configs/colibri_defconfig index c3e3418ed4fe..744086fff414 100644 --- a/arch/arm/configs/colibri_defconfig +++ b/arch/arm/configs/colibri_defconfig @@ -801,7 +801,7 @@ CONFIG_I2C_CHARDEV=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/corgi_defconfig b/arch/arm/configs/corgi_defconfig index 98765438048d..d6cd165e9310 100644 --- a/arch/arm/configs/corgi_defconfig +++ b/arch/arm/configs/corgi_defconfig @@ -982,8 +982,8 @@ CONFIG_I2C_PXA=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -1008,7 +1008,7 @@ CONFIG_SPI_PXA2XX=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_REQUIRE_GPIOLIB=y diff --git a/arch/arm/configs/csb337_defconfig b/arch/arm/configs/csb337_defconfig index 67e65e4f0cdc..29f68c2effe6 100644 --- a/arch/arm/configs/csb337_defconfig +++ b/arch/arm/configs/csb337_defconfig @@ -679,7 +679,7 @@ CONFIG_I2C_GPIO=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/csb637_defconfig b/arch/arm/configs/csb637_defconfig index 99702146c9fc..f7b60ceed6c7 100644 --- a/arch/arm/configs/csb637_defconfig +++ b/arch/arm/configs/csb637_defconfig @@ -704,7 +704,7 @@ CONFIG_I2C_CHARDEV=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/ecbat91_defconfig b/arch/arm/configs/ecbat91_defconfig index cfeb817ad21a..ca520733bdb0 100644 --- a/arch/arm/configs/ecbat91_defconfig +++ b/arch/arm/configs/ecbat91_defconfig @@ -721,7 +721,7 @@ CONFIG_I2C_GPIO=y # # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set @@ -747,7 +747,7 @@ CONFIG_SPI_AT91=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # diff --git a/arch/arm/configs/ep93xx_defconfig b/arch/arm/configs/ep93xx_defconfig index 21aa013793c6..3f89d5f25bce 100644 --- a/arch/arm/configs/ep93xx_defconfig +++ b/arch/arm/configs/ep93xx_defconfig @@ -681,7 +681,7 @@ CONFIG_I2C_ALGOBIT=y # # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set -CONFIG_SENSORS_EEPROM=y +CONFIG_EEPROM_LEGACY=y # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig index 2a84d557adc2..d5ee16e6abf3 100644 --- a/arch/arm/configs/ezx_defconfig +++ b/arch/arm/configs/ezx_defconfig @@ -877,7 +877,7 @@ CONFIG_I2C_PXA=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCF8591 is not set @@ -900,7 +900,7 @@ CONFIG_SPI_PXA2XX=m # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_HAVE_GPIO_LIB=y diff --git a/arch/arm/configs/footbridge_defconfig b/arch/arm/configs/footbridge_defconfig index 299dc22294a0..6ace512fa101 100644 --- a/arch/arm/configs/footbridge_defconfig +++ b/arch/arm/configs/footbridge_defconfig @@ -801,7 +801,7 @@ CONFIG_I2C=m # # Other I2C Chip support # -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCF8591 is not set # CONFIG_SENSORS_RTC8564 is not set diff --git a/arch/arm/configs/iop13xx_defconfig b/arch/arm/configs/iop13xx_defconfig index 482e57061053..89c17761726b 100644 --- a/arch/arm/configs/iop13xx_defconfig +++ b/arch/arm/configs/iop13xx_defconfig @@ -744,7 +744,7 @@ CONFIG_I2C_IOP3XX=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/iop32x_defconfig b/arch/arm/configs/iop32x_defconfig index 8612f58e1056..d70177b38f5f 100644 --- a/arch/arm/configs/iop32x_defconfig +++ b/arch/arm/configs/iop32x_defconfig @@ -847,7 +847,7 @@ CONFIG_I2C_IOP3XX=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/iop33x_defconfig b/arch/arm/configs/iop33x_defconfig index 8b0098d19d08..eec488298267 100644 --- a/arch/arm/configs/iop33x_defconfig +++ b/arch/arm/configs/iop33x_defconfig @@ -746,7 +746,7 @@ CONFIG_I2C_IOP3XX=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/ixp2000_defconfig b/arch/arm/configs/ixp2000_defconfig index 84680db6c615..57526c15e854 100644 --- a/arch/arm/configs/ixp2000_defconfig +++ b/arch/arm/configs/ixp2000_defconfig @@ -768,7 +768,7 @@ CONFIG_I2C_IXP2000=y # # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set -CONFIG_SENSORS_EEPROM=y +CONFIG_EEPROM_LEGACY=y # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/ixp23xx_defconfig b/arch/arm/configs/ixp23xx_defconfig index 4a2f7b2372db..ef97561ed75b 100644 --- a/arch/arm/configs/ixp23xx_defconfig +++ b/arch/arm/configs/ixp23xx_defconfig @@ -900,7 +900,7 @@ CONFIG_I2C_ALGOBIT=y # # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set -CONFIG_SENSORS_EEPROM=y +CONFIG_EEPROM_LEGACY=y # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/ixp4xx_defconfig b/arch/arm/configs/ixp4xx_defconfig index fc14932e3abd..95cd8dfb5f1e 100644 --- a/arch/arm/configs/ixp4xx_defconfig +++ b/arch/arm/configs/ixp4xx_defconfig @@ -1083,7 +1083,7 @@ CONFIG_I2C_IXP4XX=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -CONFIG_SENSORS_EEPROM=y +CONFIG_EEPROM_LEGACY=y # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/kafa_defconfig b/arch/arm/configs/kafa_defconfig index 6dd95a2c8d5d..9f92fc527f59 100644 --- a/arch/arm/configs/kafa_defconfig +++ b/arch/arm/configs/kafa_defconfig @@ -603,7 +603,7 @@ CONFIG_I2C_GPIO=y # # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/kirkwood_defconfig b/arch/arm/configs/kirkwood_defconfig index ab8b1e0d0dac..4bc38078d580 100644 --- a/arch/arm/configs/kirkwood_defconfig +++ b/arch/arm/configs/kirkwood_defconfig @@ -905,8 +905,8 @@ CONFIG_I2C_MV64XXX=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -930,7 +930,7 @@ CONFIG_SPI_ORION=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set # CONFIG_W1 is not set diff --git a/arch/arm/configs/loki_defconfig b/arch/arm/configs/loki_defconfig index 17da7c3b3d53..b720fcffbcd4 100644 --- a/arch/arm/configs/loki_defconfig +++ b/arch/arm/configs/loki_defconfig @@ -654,7 +654,7 @@ CONFIG_I2C_MV64XXX=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCF8591 is not set @@ -675,7 +675,7 @@ CONFIG_SPI_MASTER=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set # CONFIG_W1 is not set diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig index 4d11678584db..73ba62b71063 100644 --- a/arch/arm/configs/magician_defconfig +++ b/arch/arm/configs/magician_defconfig @@ -678,7 +678,7 @@ CONFIG_I2C_PXA=m # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/msm_defconfig b/arch/arm/configs/msm_defconfig index 3b4ecf2a90dd..cc3b06ee24f9 100644 --- a/arch/arm/configs/msm_defconfig +++ b/arch/arm/configs/msm_defconfig @@ -580,7 +580,7 @@ CONFIG_I2C_MSM=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set CONFIG_SENSORS_PCA9633=y diff --git a/arch/arm/configs/mv78xx0_defconfig b/arch/arm/configs/mv78xx0_defconfig index d38ebf8721a4..83c817f31bcc 100644 --- a/arch/arm/configs/mv78xx0_defconfig +++ b/arch/arm/configs/mv78xx0_defconfig @@ -832,7 +832,7 @@ CONFIG_I2C_MV64XXX=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/n770_defconfig b/arch/arm/configs/n770_defconfig index 568ef1770d5f..672f6db06a52 100644 --- a/arch/arm/configs/n770_defconfig +++ b/arch/arm/configs/n770_defconfig @@ -767,7 +767,7 @@ CONFIG_I2C_OMAP=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set @@ -798,7 +798,7 @@ CONFIG_SPI_OMAP_UWIRE=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_TSC2101 is not set # CONFIG_SPI_TSC2102 is not set # CONFIG_SPI_TSC210X is not set diff --git a/arch/arm/configs/neocore926_defconfig b/arch/arm/configs/neocore926_defconfig index 325f1e105f69..e0e4e98b5aa2 100644 --- a/arch/arm/configs/neocore926_defconfig +++ b/arch/arm/configs/neocore926_defconfig @@ -774,8 +774,8 @@ CONFIG_I2C_CHARDEV=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -798,7 +798,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set # CONFIG_W1 is not set diff --git a/arch/arm/configs/neponset_defconfig b/arch/arm/configs/neponset_defconfig index 92ccdc6492f7..d81ea219c934 100644 --- a/arch/arm/configs/neponset_defconfig +++ b/arch/arm/configs/neponset_defconfig @@ -737,7 +737,7 @@ CONFIG_I2C_ALGOBIT=y # # Other I2C Chip support # -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCF8591 is not set # CONFIG_SENSORS_RTC8564 is not set diff --git a/arch/arm/configs/omap3_beagle_defconfig b/arch/arm/configs/omap3_beagle_defconfig index e042d27eae16..4c6fb7e959df 100644 --- a/arch/arm/configs/omap3_beagle_defconfig +++ b/arch/arm/configs/omap3_beagle_defconfig @@ -687,8 +687,8 @@ CONFIG_I2C_OMAP=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/arm/configs/omap3_pandora_defconfig b/arch/arm/configs/omap3_pandora_defconfig index 09543f4de5bc..b54ad2e2da36 100644 --- a/arch/arm/configs/omap3_pandora_defconfig +++ b/arch/arm/configs/omap3_pandora_defconfig @@ -713,8 +713,8 @@ CONFIG_I2C_OMAP=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -740,7 +740,7 @@ CONFIG_SPI_OMAP24XX=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_REQUIRE_GPIOLIB=y diff --git a/arch/arm/configs/omap_2430sdp_defconfig b/arch/arm/configs/omap_2430sdp_defconfig index b0617c0da2a1..640e9afc4630 100644 --- a/arch/arm/configs/omap_2430sdp_defconfig +++ b/arch/arm/configs/omap_2430sdp_defconfig @@ -710,7 +710,7 @@ CONFIG_I2C_OMAP=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set @@ -743,7 +743,7 @@ CONFIG_SPI_MASTER=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_TSC2101 is not set # CONFIG_SPI_TSC2102 is not set # CONFIG_SPI_TSC210X is not set diff --git a/arch/arm/configs/omap_apollon_2420_defconfig b/arch/arm/configs/omap_apollon_2420_defconfig index bb39dfc72d69..ac7adf34c54a 100644 --- a/arch/arm/configs/omap_apollon_2420_defconfig +++ b/arch/arm/configs/omap_apollon_2420_defconfig @@ -612,7 +612,7 @@ CONFIG_SPI_OMAP24XX=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_TSC2101 is not set # CONFIG_SPI_TSC2102 is not set # CONFIG_SPI_TSC210X is not set diff --git a/arch/arm/configs/omap_generic_1510_defconfig b/arch/arm/configs/omap_generic_1510_defconfig index 4b1c252f2091..ccdc661b5856 100644 --- a/arch/arm/configs/omap_generic_1510_defconfig +++ b/arch/arm/configs/omap_generic_1510_defconfig @@ -637,7 +637,7 @@ CONFIG_I2C_ALGOBIT=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/omap_generic_1610_defconfig b/arch/arm/configs/omap_generic_1610_defconfig index fc66f019d56c..0c42c8955047 100644 --- a/arch/arm/configs/omap_generic_1610_defconfig +++ b/arch/arm/configs/omap_generic_1610_defconfig @@ -641,7 +641,7 @@ CONFIG_I2C_ALGOBIT=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/omap_h2_1610_defconfig b/arch/arm/configs/omap_h2_1610_defconfig index c03507202f3c..74dbdc644d32 100644 --- a/arch/arm/configs/omap_h2_1610_defconfig +++ b/arch/arm/configs/omap_h2_1610_defconfig @@ -700,7 +700,7 @@ CONFIG_I2C_OMAP=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCF8591 is not set @@ -731,7 +731,7 @@ CONFIG_SPI_OMAP_UWIRE=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set CONFIG_SPI_TSC2101=y # CONFIG_SPI_TSC2102 is not set # CONFIG_SPI_TSC210X is not set diff --git a/arch/arm/configs/omap_h4_2420_defconfig b/arch/arm/configs/omap_h4_2420_defconfig index 5bc89185a64f..a4aab8e4c29b 100644 --- a/arch/arm/configs/omap_h4_2420_defconfig +++ b/arch/arm/configs/omap_h4_2420_defconfig @@ -681,7 +681,7 @@ CONFIG_I2C_OMAP=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/omap_innovator_1510_defconfig b/arch/arm/configs/omap_innovator_1510_defconfig index 55b2611bd90a..0cfe363e3365 100644 --- a/arch/arm/configs/omap_innovator_1510_defconfig +++ b/arch/arm/configs/omap_innovator_1510_defconfig @@ -631,7 +631,7 @@ CONFIG_I2C_BOARDINFO=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/omap_ldp_defconfig b/arch/arm/configs/omap_ldp_defconfig index b77d054169ee..aa9d34feddc6 100644 --- a/arch/arm/configs/omap_ldp_defconfig +++ b/arch/arm/configs/omap_ldp_defconfig @@ -629,8 +629,8 @@ CONFIG_I2C_OMAP=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -656,7 +656,7 @@ CONFIG_SPI_OMAP24XX=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_REQUIRE_GPIOLIB=y diff --git a/arch/arm/configs/omap_osk_5912_defconfig b/arch/arm/configs/omap_osk_5912_defconfig index b68e0144cab5..6b3b5c610da0 100644 --- a/arch/arm/configs/omap_osk_5912_defconfig +++ b/arch/arm/configs/omap_osk_5912_defconfig @@ -711,7 +711,7 @@ CONFIG_I2C_OMAP=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/onearm_defconfig b/arch/arm/configs/onearm_defconfig index 418ca2febbe3..f8701fadb600 100644 --- a/arch/arm/configs/onearm_defconfig +++ b/arch/arm/configs/onearm_defconfig @@ -698,7 +698,7 @@ CONFIG_I2C_CHARDEV=y # # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig index b2456ca544c9..a8ee6984a09e 100644 --- a/arch/arm/configs/orion5x_defconfig +++ b/arch/arm/configs/orion5x_defconfig @@ -886,8 +886,8 @@ CONFIG_I2C_MV64XXX=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/arm/configs/overo_defconfig b/arch/arm/configs/overo_defconfig index 49200967a153..a57f9e4124fa 100644 --- a/arch/arm/configs/overo_defconfig +++ b/arch/arm/configs/overo_defconfig @@ -858,8 +858,8 @@ CONFIG_I2C_OMAP=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -CONFIG_SENSORS_EEPROM=y +# CONFIG_EEPROM_AT24 is not set +CONFIG_EEPROM_LEGACY=y # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -885,7 +885,7 @@ CONFIG_SPI_OMAP24XX=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_REQUIRE_GPIOLIB=y diff --git a/arch/arm/configs/palmz71_defconfig b/arch/arm/configs/palmz71_defconfig index 6361922e71c1..08e14068fff7 100644 --- a/arch/arm/configs/palmz71_defconfig +++ b/arch/arm/configs/palmz71_defconfig @@ -554,7 +554,7 @@ CONFIG_SPI_OMAP_UWIRE=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_TSC2101 is not set # CONFIG_SPI_TSC2102 is not set # CONFIG_SPI_TSC210X is not set diff --git a/arch/arm/configs/palmz72_defconfig b/arch/arm/configs/palmz72_defconfig index 3245f8f33e0a..a0dc37c05dea 100644 --- a/arch/arm/configs/palmz72_defconfig +++ b/arch/arm/configs/palmz72_defconfig @@ -527,8 +527,8 @@ CONFIG_I2C_PXA=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -552,7 +552,7 @@ CONFIG_SPI_MASTER=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set CONFIG_SPI_SPIDEV=y # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_REQUIRE_GPIOLIB=y diff --git a/arch/arm/configs/pcm027_defconfig b/arch/arm/configs/pcm027_defconfig index 17b9b2469570..05ad96a43b1d 100644 --- a/arch/arm/configs/pcm027_defconfig +++ b/arch/arm/configs/pcm027_defconfig @@ -606,7 +606,7 @@ CONFIG_I2C_PXA=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -CONFIG_SENSORS_EEPROM=y +CONFIG_EEPROM_LEGACY=y # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/pcm038_defconfig b/arch/arm/configs/pcm038_defconfig index 6b798c215ca8..41429a00f58c 100644 --- a/arch/arm/configs/pcm038_defconfig +++ b/arch/arm/configs/pcm038_defconfig @@ -604,7 +604,7 @@ CONFIG_I2C_BOARDINFO=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCF8591 is not set @@ -626,7 +626,7 @@ CONFIG_SPI_BITBANG=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_HAVE_GPIO_LIB=y diff --git a/arch/arm/configs/picotux200_defconfig b/arch/arm/configs/picotux200_defconfig index 59e4463c2da2..9018f0f298aa 100644 --- a/arch/arm/configs/picotux200_defconfig +++ b/arch/arm/configs/picotux200_defconfig @@ -744,7 +744,7 @@ CONFIG_I2C_GPIO=m # CONFIG_SENSORS_DS1337=m CONFIG_SENSORS_DS1374=m -CONFIG_SENSORS_EEPROM=m +CONFIG_EEPROM_LEGACY=m CONFIG_SENSORS_PCF8574=m CONFIG_SENSORS_PCA9539=m CONFIG_SENSORS_PCF8591=m diff --git a/arch/arm/configs/pnx4008_defconfig b/arch/arm/configs/pnx4008_defconfig index 811b8f60d19d..67b5f1e15f4a 100644 --- a/arch/arm/configs/pnx4008_defconfig +++ b/arch/arm/configs/pnx4008_defconfig @@ -915,7 +915,7 @@ CONFIG_I2C_ALGOPCA=m # # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set -CONFIG_SENSORS_EEPROM=m +CONFIG_EEPROM_LEGACY=m CONFIG_SENSORS_PCF8574=m # CONFIG_SENSORS_PCA9539 is not set CONFIG_SENSORS_PCF8591=m diff --git a/arch/arm/configs/qil-a9260_defconfig b/arch/arm/configs/qil-a9260_defconfig index 5cbd81589647..cd1d717903ac 100644 --- a/arch/arm/configs/qil-a9260_defconfig +++ b/arch/arm/configs/qil-a9260_defconfig @@ -687,7 +687,7 @@ CONFIG_I2C_CHARDEV=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set @@ -714,7 +714,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set # CONFIG_W1 is not set diff --git a/arch/arm/configs/rpc_defconfig b/arch/arm/configs/rpc_defconfig index f62d1817d2c6..a29d61fe4c6a 100644 --- a/arch/arm/configs/rpc_defconfig +++ b/arch/arm/configs/rpc_defconfig @@ -590,7 +590,7 @@ CONFIG_I2C_ACORN=y # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig index 35faaea8623e..65a583ee5df8 100644 --- a/arch/arm/configs/s3c2410_defconfig +++ b/arch/arm/configs/s3c2410_defconfig @@ -923,7 +923,7 @@ CONFIG_I2C_SIMTEC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -CONFIG_SENSORS_EEPROM=m +CONFIG_EEPROM_LEGACY=m # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCF8591 is not set @@ -950,7 +950,7 @@ CONFIG_SPI_S3C24XX_GPIO=m # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_HAVE_GPIO_LIB=y diff --git a/arch/arm/configs/s3c6400_defconfig b/arch/arm/configs/s3c6400_defconfig index cf3c1b5d7048..2e8fa50e9a09 100644 --- a/arch/arm/configs/s3c6400_defconfig +++ b/arch/arm/configs/s3c6400_defconfig @@ -465,8 +465,8 @@ CONFIG_I2C_S3C2410=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -CONFIG_AT24=y -# CONFIG_SENSORS_EEPROM is not set +CONFIG_EEPROM_AT24=y +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig index 4df5b4db2aa0..745c68ffb885 100644 --- a/arch/arm/configs/spitz_defconfig +++ b/arch/arm/configs/spitz_defconfig @@ -977,8 +977,8 @@ CONFIG_I2C_PXA=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -1003,7 +1003,7 @@ CONFIG_SPI_PXA2XX=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_REQUIRE_GPIOLIB=y diff --git a/arch/arm/configs/sx1_defconfig b/arch/arm/configs/sx1_defconfig index 853dcdd9f2e0..25b007ff8bab 100644 --- a/arch/arm/configs/sx1_defconfig +++ b/arch/arm/configs/sx1_defconfig @@ -610,7 +610,7 @@ CONFIG_I2C_OMAP=y # # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/trizeps4_defconfig b/arch/arm/configs/trizeps4_defconfig index 9033d147f052..b6f838197816 100644 --- a/arch/arm/configs/trizeps4_defconfig +++ b/arch/arm/configs/trizeps4_defconfig @@ -948,7 +948,7 @@ CONFIG_I2C_PXA_SLAVE=y # # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/usb-a9260_defconfig b/arch/arm/configs/usb-a9260_defconfig index fcb4aaabd439..fd7774033d64 100644 --- a/arch/arm/configs/usb-a9260_defconfig +++ b/arch/arm/configs/usb-a9260_defconfig @@ -676,7 +676,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set # CONFIG_W1 is not set diff --git a/arch/arm/configs/usb-a9263_defconfig b/arch/arm/configs/usb-a9263_defconfig index b786e0407e8e..e7c19dd92557 100644 --- a/arch/arm/configs/usb-a9263_defconfig +++ b/arch/arm/configs/usb-a9263_defconfig @@ -668,7 +668,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set # CONFIG_W1 is not set diff --git a/arch/arm/configs/versatile_defconfig b/arch/arm/configs/versatile_defconfig index 8355f88f7292..b11c5da3996c 100644 --- a/arch/arm/configs/versatile_defconfig +++ b/arch/arm/configs/versatile_defconfig @@ -611,7 +611,7 @@ CONFIG_I2C_ALGOBIT=y # # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set -CONFIG_SENSORS_EEPROM=m +CONFIG_EEPROM_LEGACY=m # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/arm/configs/viper_defconfig b/arch/arm/configs/viper_defconfig index d01fecb8673e..30f463d2fa8a 100644 --- a/arch/arm/configs/viper_defconfig +++ b/arch/arm/configs/viper_defconfig @@ -860,8 +860,8 @@ CONFIG_I2C_PXA=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/arm/configs/xm_x2xx_defconfig b/arch/arm/configs/xm_x2xx_defconfig index f891364deceb..1039f366bf8d 100644 --- a/arch/arm/configs/xm_x2xx_defconfig +++ b/arch/arm/configs/xm_x2xx_defconfig @@ -1009,8 +1009,8 @@ CONFIG_I2C_PXA=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/arm/configs/yl9200_defconfig b/arch/arm/configs/yl9200_defconfig index a9f41c24c9dc..9192e5977674 100644 --- a/arch/arm/configs/yl9200_defconfig +++ b/arch/arm/configs/yl9200_defconfig @@ -682,7 +682,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_TLE62X0 is not set # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig index 164e2814ae78..574aca975334 100644 --- a/arch/avr32/configs/atngw100_defconfig +++ b/arch/avr32/configs/atngw100_defconfig @@ -644,8 +644,8 @@ CONFIG_I2C_GPIO=m # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -CONFIG_AT24=m -# CONFIG_SENSORS_EEPROM is not set +CONFIG_EEPROM_AT24=m +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -670,7 +670,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set CONFIG_SPI_SPIDEV=m # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_REQUIRE_GPIOLIB=y diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig index b0572d213826..86a45b5c9d0d 100644 --- a/arch/avr32/configs/atngw100_evklcd100_defconfig +++ b/arch/avr32/configs/atngw100_evklcd100_defconfig @@ -671,7 +671,7 @@ CONFIG_I2C_GPIO=m # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCF8591 is not set @@ -699,7 +699,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set CONFIG_SPI_SPIDEV=m # CONFIG_SPI_TLE62X0 is not set CONFIG_HAVE_GPIO_LIB=y diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig index c5b898d21075..a96b68ea5e83 100644 --- a/arch/avr32/configs/atngw100_evklcd101_defconfig +++ b/arch/avr32/configs/atngw100_evklcd101_defconfig @@ -671,7 +671,7 @@ CONFIG_I2C_GPIO=m # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCF8591 is not set @@ -699,7 +699,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set CONFIG_SPI_SPIDEV=m # CONFIG_SPI_TLE62X0 is not set CONFIG_HAVE_GPIO_LIB=y diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig index c9dc64832a19..0abe90adb1a4 100644 --- a/arch/avr32/configs/atstk1002_defconfig +++ b/arch/avr32/configs/atstk1002_defconfig @@ -663,8 +663,8 @@ CONFIG_I2C_GPIO=m # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -CONFIG_AT24=m -# CONFIG_SENSORS_EEPROM is not set +CONFIG_EEPROM_AT24=m +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -689,7 +689,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set CONFIG_SPI_SPIDEV=m # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_REQUIRE_GPIOLIB=y diff --git a/arch/avr32/configs/atstk1003_defconfig b/arch/avr32/configs/atstk1003_defconfig index 29ea1327b498..101972f40f07 100644 --- a/arch/avr32/configs/atstk1003_defconfig +++ b/arch/avr32/configs/atstk1003_defconfig @@ -611,8 +611,8 @@ CONFIG_I2C_GPIO=m # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -CONFIG_AT24=m -# CONFIG_SENSORS_EEPROM is not set +CONFIG_EEPROM_AT24=m +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -637,7 +637,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set CONFIG_SPI_SPIDEV=m # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_REQUIRE_GPIOLIB=y diff --git a/arch/avr32/configs/atstk1004_defconfig b/arch/avr32/configs/atstk1004_defconfig index 69e6c0d08ce8..518f7898df75 100644 --- a/arch/avr32/configs/atstk1004_defconfig +++ b/arch/avr32/configs/atstk1004_defconfig @@ -394,7 +394,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_REQUIRE_GPIOLIB=y diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig index 361c31c2af10..c1603c4860e0 100644 --- a/arch/avr32/configs/atstk1006_defconfig +++ b/arch/avr32/configs/atstk1006_defconfig @@ -684,8 +684,8 @@ CONFIG_I2C_GPIO=m # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -710,7 +710,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set CONFIG_SPI_SPIDEV=m # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_REQUIRE_GPIOLIB=y diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig index e2bd9982e2af..4c3ac0643e4a 100644 --- a/arch/avr32/configs/favr-32_defconfig +++ b/arch/avr32/configs/favr-32_defconfig @@ -633,8 +633,8 @@ CONFIG_I2C_GPIO=m # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -659,7 +659,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set CONFIG_SPI_SPIDEV=m # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_REQUIRE_GPIOLIB=y diff --git a/arch/avr32/configs/hammerhead_defconfig b/arch/avr32/configs/hammerhead_defconfig index 0d3d2982c8f5..3970fcc4d76e 100644 --- a/arch/avr32/configs/hammerhead_defconfig +++ b/arch/avr32/configs/hammerhead_defconfig @@ -716,8 +716,8 @@ CONFIG_I2C_GPIO=m # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -742,7 +742,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set CONFIG_SPI_SPIDEV=m # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_REQUIRE_GPIOLIB=y diff --git a/arch/avr32/configs/mimc200_defconfig b/arch/avr32/configs/mimc200_defconfig index 981e4f8b8aeb..1a58ffbc752d 100644 --- a/arch/avr32/configs/mimc200_defconfig +++ b/arch/avr32/configs/mimc200_defconfig @@ -565,8 +565,8 @@ CONFIG_I2C_GPIO=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -CONFIG_AT24=y -# CONFIG_SENSORS_EEPROM is not set +CONFIG_EEPROM_AT24=y +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -592,7 +592,7 @@ CONFIG_SPI_ATMEL=y # # SPI Protocol Masters # -CONFIG_SPI_AT25=y +CONFIG_EEPROM_AT25=y # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_REQUIRE_GPIOLIB=y diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig index e0b3f242b555..defb9785c65b 100644 --- a/arch/blackfin/configs/BF518F-EZBRD_defconfig +++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig @@ -746,9 +746,9 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set +# CONFIG_EEPROM_AT24 is not set # CONFIG_SENSORS_AD5252 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig index 69f66c35b2a5..992424ff3153 100644 --- a/arch/blackfin/configs/BF526-EZBRD_defconfig +++ b/arch/blackfin/configs/BF526-EZBRD_defconfig @@ -793,9 +793,9 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set +# CONFIG_EEPROM_AT24 is not set # CONFIG_SENSORS_AD5252 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -820,7 +820,7 @@ CONFIG_SPI_BFIN=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig index f92668af00b0..21e3c1af55ba 100644 --- a/arch/blackfin/configs/BF527-EZKIT_defconfig +++ b/arch/blackfin/configs/BF527-EZKIT_defconfig @@ -837,9 +837,9 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set +# CONFIG_EEPROM_AT24 is not set # CONFIG_SENSORS_AD5252 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -864,7 +864,7 @@ CONFIG_SPI_BFIN=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig index 92afd988449b..0bdf20a1af61 100644 --- a/arch/blackfin/configs/BF533-EZKIT_defconfig +++ b/arch/blackfin/configs/BF533-EZKIT_defconfig @@ -719,7 +719,7 @@ CONFIG_SPI_BFIN=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig index 49eabb41e9e5..2f747d6e97e2 100644 --- a/arch/blackfin/configs/BF533-STAMP_defconfig +++ b/arch/blackfin/configs/BF533-STAMP_defconfig @@ -743,9 +743,9 @@ CONFIG_I2C_HELPER_AUTO=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set +# CONFIG_EEPROM_AT24 is not set # CONFIG_SENSORS_AD5252 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -770,7 +770,7 @@ CONFIG_SPI_BFIN=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig index 332142f7f9b4..8b0a81294e65 100644 --- a/arch/blackfin/configs/BF537-STAMP_defconfig +++ b/arch/blackfin/configs/BF537-STAMP_defconfig @@ -794,9 +794,9 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set +# CONFIG_EEPROM_AT24 is not set CONFIG_SENSORS_AD5252=m -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -821,7 +821,7 @@ CONFIG_SPI_BFIN=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig index ed15934c67c2..a1f766bf7d9b 100644 --- a/arch/blackfin/configs/BF538-EZKIT_defconfig +++ b/arch/blackfin/configs/BF538-EZKIT_defconfig @@ -796,9 +796,9 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set +# CONFIG_EEPROM_AT24 is not set # CONFIG_SENSORS_AD5252 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -823,7 +823,7 @@ CONFIG_SPI_BFIN=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig index d4ed9ce1f62f..cd2da6b7692c 100644 --- a/arch/blackfin/configs/BF548-EZKIT_defconfig +++ b/arch/blackfin/configs/BF548-EZKIT_defconfig @@ -928,9 +928,9 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set +# CONFIG_EEPROM_AT24 is not set # CONFIG_SENSORS_AD5252 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -955,7 +955,7 @@ CONFIG_SPI_BFIN=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig index 1ecb7a38c905..b398ca202dbd 100644 --- a/arch/blackfin/configs/BF561-EZKIT_defconfig +++ b/arch/blackfin/configs/BF561-EZKIT_defconfig @@ -756,7 +756,7 @@ CONFIG_SPI_BFIN=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig index 9683b2e13097..3a20e281d23c 100644 --- a/arch/blackfin/configs/BlackStamp_defconfig +++ b/arch/blackfin/configs/BlackStamp_defconfig @@ -691,7 +691,7 @@ CONFIG_I2C_GPIO=m # # CONFIG_DS1682 is not set # CONFIG_SENSORS_AD5252 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCF8591 is not set @@ -713,7 +713,7 @@ CONFIG_SPI_BFIN=y # # SPI Protocol Masters # -CONFIG_SPI_AT25=y +CONFIG_EEPROM_AT25=y CONFIG_SPI_SPIDEV=m # CONFIG_SPI_TLE62X0 is not set # CONFIG_W1 is not set diff --git a/arch/blackfin/configs/CM-BF527_defconfig b/arch/blackfin/configs/CM-BF527_defconfig index a041e7eba770..95146948166f 100644 --- a/arch/blackfin/configs/CM-BF527_defconfig +++ b/arch/blackfin/configs/CM-BF527_defconfig @@ -715,7 +715,7 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set # CONFIG_SENSORS_AD5252 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -742,7 +742,7 @@ CONFIG_SPI_BFIN=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set # CONFIG_W1 is not set diff --git a/arch/blackfin/configs/CM-BF548_defconfig b/arch/blackfin/configs/CM-BF548_defconfig index efd68bc78f35..efe9741b1f14 100644 --- a/arch/blackfin/configs/CM-BF548_defconfig +++ b/arch/blackfin/configs/CM-BF548_defconfig @@ -798,7 +798,7 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set # CONFIG_SENSORS_AD5252 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCF8575 is not set # CONFIG_SENSORS_PCA9543 is not set @@ -826,7 +826,7 @@ CONFIG_SPI_BFIN=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set # CONFIG_W1 is not set diff --git a/arch/blackfin/configs/H8606_defconfig b/arch/blackfin/configs/H8606_defconfig index 5d3901d23fd1..bd553da15db8 100644 --- a/arch/blackfin/configs/H8606_defconfig +++ b/arch/blackfin/configs/H8606_defconfig @@ -750,7 +750,7 @@ CONFIG_SPI_BFIN=y # # SPI Protocol Masters # -CONFIG_SPI_AT25=y +CONFIG_EEPROM_AT25=y CONFIG_SPI_SPIDEV=y # diff --git a/arch/blackfin/configs/IP0X_defconfig b/arch/blackfin/configs/IP0X_defconfig index e66f5daaa828..eae83b5de92f 100644 --- a/arch/blackfin/configs/IP0X_defconfig +++ b/arch/blackfin/configs/IP0X_defconfig @@ -803,7 +803,7 @@ CONFIG_SPI_BFIN=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig index ce5dde9de9db..ad096702ac16 100644 --- a/arch/blackfin/configs/PNAV-10_defconfig +++ b/arch/blackfin/configs/PNAV-10_defconfig @@ -755,9 +755,9 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set +# CONFIG_EEPROM_AT24 is not set # CONFIG_SENSORS_AD5252 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set CONFIG_SENSORS_PCF8574=m # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -781,7 +781,7 @@ CONFIG_SPI_BFIN=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig index 7c8250d6fa66..fa580affc9d6 100644 --- a/arch/blackfin/configs/SRV1_defconfig +++ b/arch/blackfin/configs/SRV1_defconfig @@ -798,7 +798,7 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_SENSORS_AD5252 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCF8575 is not set # CONFIG_SENSORS_PCA9543 is not set @@ -826,7 +826,7 @@ CONFIG_SPI_BFIN=y # # SPI Protocol Masters # -CONFIG_SPI_AT25=m +CONFIG_EEPROM_AT25=m # CONFIG_SPI_SPIDEV is not set # diff --git a/arch/blackfin/configs/TCM-BF537_defconfig b/arch/blackfin/configs/TCM-BF537_defconfig index 9af522c7dadf..97a1f1d20dcf 100644 --- a/arch/blackfin/configs/TCM-BF537_defconfig +++ b/arch/blackfin/configs/TCM-BF537_defconfig @@ -533,7 +533,7 @@ CONFIG_SPI_BFIN=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y diff --git a/arch/ia64/configs/bigsur_defconfig b/arch/ia64/configs/bigsur_defconfig index 6dd8655664f3..ace41096b47b 100644 --- a/arch/ia64/configs/bigsur_defconfig +++ b/arch/ia64/configs/bigsur_defconfig @@ -752,7 +752,7 @@ CONFIG_I2C_ALGOBIT=y # # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/ia64/configs/zx1_defconfig b/arch/ia64/configs/zx1_defconfig index 0a06b1333c95..514f0635dafe 100644 --- a/arch/ia64/configs/zx1_defconfig +++ b/arch/ia64/configs/zx1_defconfig @@ -841,7 +841,7 @@ CONFIG_I2C_ALGOPCF=y # # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/m32r/configs/m32104ut_defconfig b/arch/m32r/configs/m32104ut_defconfig index 9b5af6cd2e0b..6f54b00b3838 100644 --- a/arch/m32r/configs/m32104ut_defconfig +++ b/arch/m32r/configs/m32104ut_defconfig @@ -621,7 +621,7 @@ CONFIG_I2C_ELEKTOR=m # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig index a3bbbf067a3b..783da855a2e3 100644 --- a/arch/mips/configs/bigsur_defconfig +++ b/arch/mips/configs/bigsur_defconfig @@ -959,7 +959,7 @@ CONFIG_I2C_SIBYTE=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -CONFIG_SENSORS_EEPROM=y +CONFIG_EEPROM_LEGACY=y CONFIG_SENSORS_PCF8574=y # CONFIG_PCF8575 is not set CONFIG_SENSORS_PCF8591=y diff --git a/arch/mips/configs/emma2rh_defconfig b/arch/mips/configs/emma2rh_defconfig index 634bb4eaf132..fea9bc9865a3 100644 --- a/arch/mips/configs/emma2rh_defconfig +++ b/arch/mips/configs/emma2rh_defconfig @@ -996,7 +996,7 @@ CONFIG_I2C_CHARDEV=y # # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/mips/configs/fulong_defconfig b/arch/mips/configs/fulong_defconfig index b6698a232ae9..786a9bc9a696 100644 --- a/arch/mips/configs/fulong_defconfig +++ b/arch/mips/configs/fulong_defconfig @@ -1006,8 +1006,8 @@ CONFIG_I2C_VIAPRO=m # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/mips/configs/msp71xx_defconfig b/arch/mips/configs/msp71xx_defconfig index dd13db4d0fb9..84d6491b3d41 100644 --- a/arch/mips/configs/msp71xx_defconfig +++ b/arch/mips/configs/msp71xx_defconfig @@ -929,7 +929,7 @@ CONFIG_I2C_PMCMSP=y # # CONFIG_SENSORS_DS1337 is not set # CONFIG_SENSORS_DS1374 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set CONFIG_PMCTWILED=y # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index db9272677aa2..8426d3b9501c 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -1845,7 +1845,7 @@ CONFIG_I2C_VOODOO3=m CONFIG_SENSORS_DS1337=m CONFIG_SENSORS_DS1374=m # CONFIG_DS1682 is not set -CONFIG_SENSORS_EEPROM=m +CONFIG_EEPROM_LEGACY=m CONFIG_SENSORS_PCF8574=m CONFIG_SENSORS_PCA9539=m CONFIG_SENSORS_PCF8591=m @@ -1872,7 +1872,7 @@ CONFIG_SPI_BUTTERFLY=m # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_W1=m diff --git a/arch/mips/configs/pnx8335-stb225_defconfig b/arch/mips/configs/pnx8335-stb225_defconfig index d9536522cff5..2728caa6c2fb 100644 --- a/arch/mips/configs/pnx8335-stb225_defconfig +++ b/arch/mips/configs/pnx8335-stb225_defconfig @@ -640,8 +640,8 @@ CONFIG_I2C_CHARDEV=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig index c7c0864b8ce9..83d5c58662c8 100644 --- a/arch/mips/configs/rbtx49xx_defconfig +++ b/arch/mips/configs/rbtx49xx_defconfig @@ -522,7 +522,7 @@ CONFIG_SPI_TXX9=y # # SPI Protocol Masters # -CONFIG_SPI_AT25=y +CONFIG_EEPROM_AT25=y # CONFIG_SPI_TLE62X0 is not set CONFIG_HAVE_GPIO_LIB=y diff --git a/arch/powerpc/configs/44x/sam440ep_defconfig b/arch/powerpc/configs/44x/sam440ep_defconfig index 15f48e03ec2e..ce28cd6f0d4f 100644 --- a/arch/powerpc/configs/44x/sam440ep_defconfig +++ b/arch/powerpc/configs/44x/sam440ep_defconfig @@ -730,8 +730,8 @@ CONFIG_I2C_IBM_IIC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig index 59cbd2761ed7..031ac6d4212f 100644 --- a/arch/powerpc/configs/44x/warp_defconfig +++ b/arch/powerpc/configs/44x/warp_defconfig @@ -660,8 +660,8 @@ CONFIG_I2C_HELPER_AUTO=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -CONFIG_SENSORS_EEPROM=y +# CONFIG_EEPROM_AT24 is not set +CONFIG_EEPROM_LEGACY=y # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig index 3df627494b65..99c495ad9c75 100644 --- a/arch/powerpc/configs/52xx/cm5200_defconfig +++ b/arch/powerpc/configs/52xx/cm5200_defconfig @@ -629,8 +629,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig index 5b969f9c925e..9796220032fd 100644 --- a/arch/powerpc/configs/52xx/lite5200b_defconfig +++ b/arch/powerpc/configs/52xx/lite5200b_defconfig @@ -762,8 +762,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig index 3c0d4e561726..d081e0031057 100644 --- a/arch/powerpc/configs/52xx/motionpro_defconfig +++ b/arch/powerpc/configs/52xx/motionpro_defconfig @@ -629,8 +629,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -CONFIG_SENSORS_EEPROM=y +# CONFIG_EEPROM_AT24 is not set +CONFIG_EEPROM_LEGACY=y # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig index 9d0207783d60..b21b8e8c3a78 100644 --- a/arch/powerpc/configs/52xx/pcm030_defconfig +++ b/arch/powerpc/configs/52xx/pcm030_defconfig @@ -740,8 +740,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -CONFIG_SENSORS_EEPROM=m +# CONFIG_EEPROM_AT24 is not set +CONFIG_EEPROM_LEGACY=m # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig index bc190051e8d5..79954579f5ec 100644 --- a/arch/powerpc/configs/52xx/tqm5200_defconfig +++ b/arch/powerpc/configs/52xx/tqm5200_defconfig @@ -645,8 +645,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/83xx/asp8347_defconfig b/arch/powerpc/configs/83xx/asp8347_defconfig index cbecaf3d7906..b3d9d1a8dfaf 100644 --- a/arch/powerpc/configs/83xx/asp8347_defconfig +++ b/arch/powerpc/configs/83xx/asp8347_defconfig @@ -732,8 +732,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig index bfc32ea265a7..6293bb99b52e 100644 --- a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig +++ b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig @@ -858,8 +858,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -884,7 +884,7 @@ CONFIG_SPI_MPC83xx=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y diff --git a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig index aad0e1a98c55..c7cceb4536d8 100644 --- a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig +++ b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig @@ -919,8 +919,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -945,7 +945,7 @@ CONFIG_SPI_MPC83xx=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y diff --git a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig index 9cb8c8b956e4..5e3cd49a2e9c 100644 --- a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig +++ b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig @@ -730,8 +730,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig index 9cc976f010c9..90c482606880 100644 --- a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig +++ b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig @@ -745,8 +745,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -770,7 +770,7 @@ CONFIG_SPI_MPC83xx=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y diff --git a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig index 07a674f5344e..42b7f3403fb0 100644 --- a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig +++ b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig @@ -898,8 +898,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set CONFIG_SENSORS_PCF8574=y # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -923,7 +923,7 @@ CONFIG_SPI_MPC83xx=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y diff --git a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig index 426232cb0097..17c903b36576 100644 --- a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig +++ b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig @@ -778,8 +778,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set CONFIG_SENSORS_PCF8574=y # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -803,7 +803,7 @@ CONFIG_SPI_MPC83xx=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y diff --git a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig index 36e2e93a1c53..728fc95d5870 100644 --- a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig +++ b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig @@ -673,8 +673,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig index 80eb6c9a05c4..f7546e7cb762 100644 --- a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig +++ b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig @@ -805,8 +805,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig index b9b236806e9f..b94972f52241 100644 --- a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig +++ b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig @@ -723,8 +723,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -749,7 +749,7 @@ CONFIG_SPI_MPC83xx=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set CONFIG_SPI_SPIDEV=y # CONFIG_SPI_TLE62X0 is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y diff --git a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig index f6350d7e1688..ad97649bdc73 100644 --- a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig +++ b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig @@ -787,8 +787,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig index f447de16f75d..2e526beeeb90 100644 --- a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig +++ b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig @@ -789,8 +789,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/83xx/sbc834x_defconfig b/arch/powerpc/configs/83xx/sbc834x_defconfig index 8d2d7eeab5f5..54bb24a5e02a 100644 --- a/arch/powerpc/configs/83xx/sbc834x_defconfig +++ b/arch/powerpc/configs/83xx/sbc834x_defconfig @@ -640,8 +640,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/85xx/mpc8536_ds_defconfig b/arch/powerpc/configs/85xx/mpc8536_ds_defconfig index e243e14a6708..256055cb6d2f 100644 --- a/arch/powerpc/configs/85xx/mpc8536_ds_defconfig +++ b/arch/powerpc/configs/85xx/mpc8536_ds_defconfig @@ -849,8 +849,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -CONFIG_SENSORS_EEPROM=y +# CONFIG_EEPROM_AT24 is not set +CONFIG_EEPROM_LEGACY=y # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/85xx/mpc8544_ds_defconfig b/arch/powerpc/configs/85xx/mpc8544_ds_defconfig index f6cb01495ea6..5ac7e51e9f14 100644 --- a/arch/powerpc/configs/85xx/mpc8544_ds_defconfig +++ b/arch/powerpc/configs/85xx/mpc8544_ds_defconfig @@ -848,8 +848,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -CONFIG_SENSORS_EEPROM=y +# CONFIG_EEPROM_AT24 is not set +CONFIG_EEPROM_LEGACY=y # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/85xx/mpc8568mds_defconfig b/arch/powerpc/configs/85xx/mpc8568mds_defconfig index 597be8491812..ab5b0b58924c 100644 --- a/arch/powerpc/configs/85xx/mpc8568mds_defconfig +++ b/arch/powerpc/configs/85xx/mpc8568mds_defconfig @@ -723,8 +723,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/85xx/mpc8572_ds_defconfig b/arch/powerpc/configs/85xx/mpc8572_ds_defconfig index 32aeb79216f7..65ef823e08c6 100644 --- a/arch/powerpc/configs/85xx/mpc8572_ds_defconfig +++ b/arch/powerpc/configs/85xx/mpc8572_ds_defconfig @@ -861,8 +861,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -CONFIG_SENSORS_EEPROM=y +# CONFIG_EEPROM_AT24 is not set +CONFIG_EEPROM_LEGACY=y # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/85xx/stx_gp3_defconfig b/arch/powerpc/configs/85xx/stx_gp3_defconfig index 5a0cf58d2b8c..f1288a0c3040 100644 --- a/arch/powerpc/configs/85xx/stx_gp3_defconfig +++ b/arch/powerpc/configs/85xx/stx_gp3_defconfig @@ -877,8 +877,8 @@ CONFIG_I2C_HELPER_AUTO=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/85xx/tqm8540_defconfig b/arch/powerpc/configs/85xx/tqm8540_defconfig index f3e4f3481fda..b3b1de6a6a85 100644 --- a/arch/powerpc/configs/85xx/tqm8540_defconfig +++ b/arch/powerpc/configs/85xx/tqm8540_defconfig @@ -787,8 +787,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/85xx/tqm8541_defconfig b/arch/powerpc/configs/85xx/tqm8541_defconfig index c62489394535..a92a639cdf8a 100644 --- a/arch/powerpc/configs/85xx/tqm8541_defconfig +++ b/arch/powerpc/configs/85xx/tqm8541_defconfig @@ -796,8 +796,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/85xx/tqm8548_defconfig b/arch/powerpc/configs/85xx/tqm8548_defconfig index eef45b97dc3e..397d37fbe7e3 100644 --- a/arch/powerpc/configs/85xx/tqm8548_defconfig +++ b/arch/powerpc/configs/85xx/tqm8548_defconfig @@ -806,8 +806,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/85xx/tqm8555_defconfig b/arch/powerpc/configs/85xx/tqm8555_defconfig index 11b637e99a54..856ca6ab37b7 100644 --- a/arch/powerpc/configs/85xx/tqm8555_defconfig +++ b/arch/powerpc/configs/85xx/tqm8555_defconfig @@ -796,8 +796,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/85xx/tqm8560_defconfig b/arch/powerpc/configs/85xx/tqm8560_defconfig index 2519169b6d4b..56ed4213609c 100644 --- a/arch/powerpc/configs/85xx/tqm8560_defconfig +++ b/arch/powerpc/configs/85xx/tqm8560_defconfig @@ -796,8 +796,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig index 391874c7b436..840b09a07282 100644 --- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig +++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig @@ -1081,8 +1081,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # CONFIG_DS1682=y -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig b/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig index 72854a10dfa1..25bf4e95acb8 100644 --- a/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig +++ b/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig @@ -905,8 +905,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig index 41220ece603d..8e0f0e5180a4 100644 --- a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig +++ b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig @@ -851,8 +851,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -CONFIG_SENSORS_EEPROM=y +# CONFIG_EEPROM_AT24 is not set +CONFIG_EEPROM_LEGACY=y # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/86xx/sbc8641d_defconfig b/arch/powerpc/configs/86xx/sbc8641d_defconfig index a4342862f6ef..37d9f4ed80f5 100644 --- a/arch/powerpc/configs/86xx/sbc8641d_defconfig +++ b/arch/powerpc/configs/86xx/sbc8641d_defconfig @@ -958,8 +958,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig index 5078594cd1f5..74c6feabdd77 100644 --- a/arch/powerpc/configs/c2k_defconfig +++ b/arch/powerpc/configs/c2k_defconfig @@ -1124,8 +1124,8 @@ CONFIG_I2C_MV64XXX=m # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -CONFIG_SENSORS_EEPROM=m +# CONFIG_EEPROM_AT24 is not set +CONFIG_EEPROM_LEGACY=m CONFIG_SENSORS_PCF8574=m # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig index c420e47426f8..c6d2baa7aaeb 100644 --- a/arch/powerpc/configs/cell_defconfig +++ b/arch/powerpc/configs/cell_defconfig @@ -1012,7 +1012,7 @@ CONFIG_I2C_ALGOBIT=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/powerpc/configs/celleb_defconfig b/arch/powerpc/configs/celleb_defconfig index 9ba3c6fc2fef..d2123779512a 100644 --- a/arch/powerpc/configs/celleb_defconfig +++ b/arch/powerpc/configs/celleb_defconfig @@ -834,7 +834,7 @@ CONFIG_I2C_ALGOBIT=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig index 63b3c2372ce8..5094a65a4493 100644 --- a/arch/powerpc/configs/chrp32_defconfig +++ b/arch/powerpc/configs/chrp32_defconfig @@ -941,8 +941,8 @@ CONFIG_I2C_ALGOBIT=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index f85e71ccb989..fc905924c022 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig @@ -994,8 +994,8 @@ CONFIG_I2C_POWERMAC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/linkstation_defconfig b/arch/powerpc/configs/linkstation_defconfig index 54fa62481373..3a0ffd73b65c 100644 --- a/arch/powerpc/configs/linkstation_defconfig +++ b/arch/powerpc/configs/linkstation_defconfig @@ -1015,8 +1015,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -CONFIG_SENSORS_EEPROM=m +# CONFIG_EEPROM_AT24 is not set +CONFIG_EEPROM_LEGACY=m # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig index 045f1b008ce5..8b244003b9e1 100644 --- a/arch/powerpc/configs/maple_defconfig +++ b/arch/powerpc/configs/maple_defconfig @@ -866,8 +866,8 @@ CONFIG_I2C_AMD8111=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig index 15c5604d0b26..5ff3de205d6a 100644 --- a/arch/powerpc/configs/mpc5200_defconfig +++ b/arch/powerpc/configs/mpc5200_defconfig @@ -844,8 +844,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig index d582014b0a38..3e1272cfdd91 100644 --- a/arch/powerpc/configs/mpc83xx_defconfig +++ b/arch/powerpc/configs/mpc83xx_defconfig @@ -902,8 +902,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig index c87b53abc617..55e5ebd91cd0 100644 --- a/arch/powerpc/configs/mpc85xx_defconfig +++ b/arch/powerpc/configs/mpc85xx_defconfig @@ -860,8 +860,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -CONFIG_SENSORS_EEPROM=y +# CONFIG_EEPROM_AT24 is not set +CONFIG_EEPROM_LEGACY=y # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/mpc86xx_defconfig b/arch/powerpc/configs/mpc86xx_defconfig index 1736bbc281ec..21e0ccbd3f5c 100644 --- a/arch/powerpc/configs/mpc86xx_defconfig +++ b/arch/powerpc/configs/mpc86xx_defconfig @@ -852,8 +852,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -CONFIG_SENSORS_EEPROM=y +# CONFIG_EEPROM_AT24 is not set +CONFIG_EEPROM_LEGACY=y # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index 199e5f59d7a6..4f8681cc8d77 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig @@ -984,7 +984,7 @@ CONFIG_I2C_PASEMI=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -CONFIG_SENSORS_EEPROM=y +CONFIG_EEPROM_LEGACY=y # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCF8591 is not set diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig index de9b121820a6..5339bb44cce9 100644 --- a/arch/powerpc/configs/pmac32_defconfig +++ b/arch/powerpc/configs/pmac32_defconfig @@ -1232,8 +1232,8 @@ CONFIG_I2C_POWERMAC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/ppc40x_defconfig b/arch/powerpc/configs/ppc40x_defconfig index 4256e2c4534b..2d72ee7a8d60 100644 --- a/arch/powerpc/configs/ppc40x_defconfig +++ b/arch/powerpc/configs/ppc40x_defconfig @@ -745,8 +745,8 @@ CONFIG_I2C_IBM_IIC=m # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig index 034a1fbdc887..358b85f9270e 100644 --- a/arch/powerpc/configs/ppc44x_defconfig +++ b/arch/powerpc/configs/ppc44x_defconfig @@ -794,8 +794,8 @@ CONFIG_I2C_IBM_IIC=m # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index d4685d1c0be8..88c6295b76c1 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -1261,8 +1261,8 @@ CONFIG_I2C_PASEMI=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 01f05ec5abf3..7d044dfd9236 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -1801,8 +1801,8 @@ CONFIG_I2C_STUB=m # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -CONFIG_AT24=m -CONFIG_SENSORS_EEPROM=m +CONFIG_EEPROM_AT24=m +CONFIG_EEPROM_LEGACY=m CONFIG_SENSORS_PCF8574=m CONFIG_PCF8575=m CONFIG_SENSORS_PCA9539=m diff --git a/arch/powerpc/configs/prpmc2800_defconfig b/arch/powerpc/configs/prpmc2800_defconfig index 6046dc0cbd82..a257da608081 100644 --- a/arch/powerpc/configs/prpmc2800_defconfig +++ b/arch/powerpc/configs/prpmc2800_defconfig @@ -943,8 +943,8 @@ CONFIG_I2C_MV64XXX=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 1e520ab65118..61b100849715 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -1055,8 +1055,8 @@ CONFIG_I2C_ALGOBIT=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig index b3f5671972a9..7eb16ab7b71b 100644 --- a/arch/powerpc/configs/storcenter_defconfig +++ b/arch/powerpc/configs/storcenter_defconfig @@ -795,8 +795,8 @@ CONFIG_I2C_MPC=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/sh/configs/cayman_defconfig b/arch/sh/configs/cayman_defconfig index e21c0e8e22d9..92895013813d 100644 --- a/arch/sh/configs/cayman_defconfig +++ b/arch/sh/configs/cayman_defconfig @@ -700,8 +700,8 @@ CONFIG_I2C_HELPER_AUTO=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/sh/configs/edosk7760_defconfig b/arch/sh/configs/edosk7760_defconfig index 158006847ad6..e93d39621658 100644 --- a/arch/sh/configs/edosk7760_defconfig +++ b/arch/sh/configs/edosk7760_defconfig @@ -632,8 +632,8 @@ CONFIG_I2C_SH7760=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig index 30cac42f25e7..8958629d7f3d 100644 --- a/arch/sh/configs/migor_defconfig +++ b/arch/sh/configs/migor_defconfig @@ -684,8 +684,8 @@ CONFIG_I2C_SH_MOBILE=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/sh/configs/r7780mp_defconfig b/arch/sh/configs/r7780mp_defconfig index 2e65149e9502..60cb1b8a8742 100644 --- a/arch/sh/configs/r7780mp_defconfig +++ b/arch/sh/configs/r7780mp_defconfig @@ -836,8 +836,8 @@ CONFIG_I2C_HIGHLANDER=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/sh/configs/r7785rp_defconfig b/arch/sh/configs/r7785rp_defconfig index 043a8a509e09..de9ff781a63b 100644 --- a/arch/sh/configs/r7785rp_defconfig +++ b/arch/sh/configs/r7785rp_defconfig @@ -838,8 +838,8 @@ CONFIG_I2C_HIGHLANDER=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/sh/configs/rts7751r2d1_defconfig b/arch/sh/configs/rts7751r2d1_defconfig index 7d2a9e88838b..5d7d7ce8a80b 100644 --- a/arch/sh/configs/rts7751r2d1_defconfig +++ b/arch/sh/configs/rts7751r2d1_defconfig @@ -777,7 +777,7 @@ CONFIG_SPI_SH_SCI=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set # CONFIG_W1 is not set diff --git a/arch/sh/configs/rts7751r2dplus_defconfig b/arch/sh/configs/rts7751r2dplus_defconfig index f680d3eecdfb..801abeb1419d 100644 --- a/arch/sh/configs/rts7751r2dplus_defconfig +++ b/arch/sh/configs/rts7751r2dplus_defconfig @@ -777,7 +777,7 @@ CONFIG_SPI_SH_SCI=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set # CONFIG_W1 is not set diff --git a/arch/sh/configs/se7343_defconfig b/arch/sh/configs/se7343_defconfig index be246f381507..bb87dfbda841 100644 --- a/arch/sh/configs/se7343_defconfig +++ b/arch/sh/configs/se7343_defconfig @@ -661,8 +661,8 @@ CONFIG_I2C_SH_MOBILE=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/sh/configs/sh7785lcr_defconfig b/arch/sh/configs/sh7785lcr_defconfig index 07e33c285b93..45079dabf317 100644 --- a/arch/sh/configs/sh7785lcr_defconfig +++ b/arch/sh/configs/sh7785lcr_defconfig @@ -823,8 +823,8 @@ CONFIG_I2C_PCA_PLATFORM=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/sh/configs/shx3_defconfig b/arch/sh/configs/shx3_defconfig index ae5cbe237fff..a687b9f02ed6 100644 --- a/arch/sh/configs/shx3_defconfig +++ b/arch/sh/configs/shx3_defconfig @@ -653,8 +653,8 @@ CONFIG_I2C_HELPER_AUTO=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set @@ -678,7 +678,7 @@ CONFIG_SPI_MASTER=y # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set +# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set # CONFIG_W1 is not set diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index cde19ae78f5a..ade49941def2 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig @@ -867,8 +867,8 @@ CONFIG_I2C_ALGOBIT=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index b30a08ed8eb4..edba00d98ac3 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -1331,8 +1331,8 @@ CONFIG_I2C_I801=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 0e7dbc0a3e46..322dd2748fc9 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -1311,8 +1311,8 @@ CONFIG_I2C_I801=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_AT24 is not set -# CONFIG_SENSORS_EEPROM is not set +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set # CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig index 62aae334ee68..c76df8cda5ef 100644 --- a/drivers/misc/eeprom/Kconfig +++ b/drivers/misc/eeprom/Kconfig @@ -1,6 +1,6 @@ menu "EEPROM support" -config AT24 +config EEPROM_AT24 tristate "I2C EEPROMs from most vendors" depends on I2C && SYSFS && EXPERIMENTAL help @@ -26,7 +26,7 @@ config AT24 This driver can also be built as a module. If so, the module will be called at24. -config SPI_AT25 +config EEPROM_AT25 tristate "SPI EEPROMs from most vendors" depends on SPI && SYSFS help @@ -37,9 +37,9 @@ config SPI_AT25 This driver can also be built as a module. If so, the module will be called at25. -config SENSORS_EEPROM +config EEPROM_LEGACY tristate "Old I2C EEPROM reader" - depends on I2C && EXPERIMENTAL + depends on I2C && SYSFS help If you say yes here you get read-only access to the EEPROM data available on modern memory DIMMs and Sony Vaio laptops via I2C. Such diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile index 3b7af6df79a7..539dd8f88128 100644 --- a/drivers/misc/eeprom/Makefile +++ b/drivers/misc/eeprom/Makefile @@ -1,4 +1,4 @@ -obj-$(CONFIG_AT24) += at24.o -obj-$(CONFIG_SPI_AT25) += at25.o -obj-$(CONFIG_SENSORS_EEPROM) += eeprom.o +obj-$(CONFIG_EEPROM_AT24) += at24.o +obj-$(CONFIG_EEPROM_AT25) += at25.o +obj-$(CONFIG_EEPROM_LEGACY) += eeprom.o obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o -- cgit v1.2.3 From 57064d213d2e44654d4f13c66df135b5e7389a26 Mon Sep 17 00:00:00 2001 From: Seth Heasley Date: Fri, 23 Jan 2009 12:43:38 -0800 Subject: PCI: irq and pci_ids patch for Intel Tigerpoint DeviceIDs This patch adds the Intel Tigerpoint LPC Controller DeviceIDs. Signed-off-by: Seth Heasley Signed-off-by: Jesse Barnes --- arch/x86/pci/irq.c | 1 + include/linux/pci_ids.h | 1 + 2 files changed, 2 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 4064345cf144..fecbce6e7d7c 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -572,6 +572,7 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route case PCI_DEVICE_ID_INTEL_ICH7_1: case PCI_DEVICE_ID_INTEL_ICH7_30: case PCI_DEVICE_ID_INTEL_ICH7_31: + case PCI_DEVICE_ID_INTEL_TGP_LPC: case PCI_DEVICE_ID_INTEL_ESB2_0: case PCI_DEVICE_ID_INTEL_ICH8_0: case PCI_DEVICE_ID_INTEL_ICH8_1: diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index d56ad9c21c09..6f260b50253b 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2421,6 +2421,7 @@ #define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8 #define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9 #define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0 +#define PCI_DEVICE_ID_INTEL_TGP_LPC 0x27bc #define PCI_DEVICE_ID_INTEL_ICH7_31 0x27bd #define PCI_DEVICE_ID_INTEL_ICH7_17 0x27da #define PCI_DEVICE_ID_INTEL_ICH7_19 0x27dd -- cgit v1.2.3 From bf3647c44bc76c43c4b2ebb4c37a559e899ac70e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 29 Jan 2009 11:45:35 +0100 Subject: x86: tone down mtrr_trim_uncached_memory() warning kerneloops.org is reporting a lot of these warnings that come due to vmware not setting up any MTRRs for emulated CPUs: | Reported 709 times (14696 total reports) | BIOS bug (often in VMWare) where the MTRR's are set up incorrectly | or not at all | | This warning was last seen in version 2.6.29-rc2-git1, and first | seen in 2.6.24. | | More info: | http://www.kerneloops.org/searchweek.php?search=mtrr_trim_uncached_memory Keep a one-liner KERN_INFO about it - so that we have so notice if empty MTRRs are caused by native hardware/BIOS weirdness. Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mtrr/main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index d259e5d2e054..236a401b8259 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -1594,8 +1594,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) /* kvm/qemu doesn't have mtrr set right, don't trim them all */ if (!highest_pfn) { - WARN(!kvm_para_available(), KERN_WARNING - "WARNING: strange, CPU MTRRs all blank?\n"); + printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n"); return 0; } -- cgit v1.2.3 From 72410af921cbc9018da388ca1ddf75880a033ac1 Mon Sep 17 00:00:00 2001 From: Atsushi SAKAI Date: Fri, 16 Jan 2009 20:39:14 +0900 Subject: lguest: typos fix 3 points lguest_asm.S => i386_head.S LHCALL_BREAK => LHREQ_BREAK perferred => preferred Signed-off-by: Atsushi SAKAI Signed-off-by: Rusty Russell --- arch/x86/lguest/boot.c | 4 ++-- drivers/lguest/core.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index a7ed208f81e3..92f1c6f3e19d 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -931,7 +931,7 @@ static void lguest_restart(char *reason) * that we can fit comfortably. * * First we need assembly templates of each of the patchable Guest operations, - * and these are in lguest_asm.S. */ + * and these are in i386_head.S. */ /*G:060 We construct a table from the assembler templates: */ static const struct lguest_insns @@ -1093,7 +1093,7 @@ __init void lguest_init(void) acpi_ht = 0; #endif - /* We set the perferred console to "hvc". This is the "hypervisor + /* We set the preferred console to "hvc". This is the "hypervisor * virtual console" driver written by the PowerPC people, which we also * adapted for lguest's use. */ add_preferred_console("hvc", 0, NULL); diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index 90663e01a56e..60156dfdc608 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -224,7 +224,7 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) break; /* If the Guest asked to be stopped, we sleep. The Guest's - * clock timer or LHCALL_BREAK from the Waker will wake us. */ + * clock timer or LHREQ_BREAK from the Waker will wake us. */ if (cpu->halted) { set_current_state(TASK_INTERRUPTIBLE); schedule(); -- cgit v1.2.3 From 5872fb94f85d2e4fdef94657bd14e1a492df9825 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Thu, 29 Jan 2009 16:28:02 -0800 Subject: Documentation: move DMA-mapping.txt to Doc/PCI/ Move DMA-mapping.txt to Documentation/PCI/. DMA-mapping.txt was supposed to be moved from Documentation/ to Documentation/PCI/. The 00-INDEX files in those two directories were updated, along with a few other text files, but the file itself somehow escaped being moved, so move it and update more text files and source files with its new location. Signed-off-by: Randy Dunlap Acked-by: Greg Kroah-Hartman cc: Jesse Barnes Signed-off-by: Linus Torvalds --- Documentation/DMA-API.txt | 2 +- Documentation/IO-mapping.txt | 4 ++-- Documentation/block/biodoc.txt | 5 +++-- Documentation/usb/dma.txt | 11 ++++++----- arch/ia64/hp/common/sba_iommu.c | 12 ++++++------ arch/parisc/include/asm/dma-mapping.h | 2 +- arch/parisc/kernel/pci-dma.c | 2 +- arch/x86/include/asm/dma-mapping.h | 4 ++-- arch/x86/kernel/pci-gart_64.c | 2 +- drivers/parisc/sba_iommu.c | 18 +++++++++--------- drivers/staging/altpciechdma/altpciechdma.c | 4 ++-- include/media/videobuf-dma-sg.h | 2 +- 12 files changed, 35 insertions(+), 33 deletions(-) (limited to 'arch/x86') diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index 52441694fe03..2a3fcc55e981 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt @@ -5,7 +5,7 @@ This document describes the DMA API. For a more gentle introduction phrased in terms of the pci_ equivalents (and actual examples) see -DMA-mapping.txt +Documentation/PCI/PCI-DMA-mapping.txt. This API is split into two pieces. Part I describes the API and the corresponding pci_ API. Part II describes the extensions to the API diff --git a/Documentation/IO-mapping.txt b/Documentation/IO-mapping.txt index 86edb61bdee6..78a440695e11 100644 --- a/Documentation/IO-mapping.txt +++ b/Documentation/IO-mapping.txt @@ -1,6 +1,6 @@ [ NOTE: The virt_to_bus() and bus_to_virt() functions have been - superseded by the functionality provided by the PCI DMA - interface (see Documentation/DMA-mapping.txt). They continue + superseded by the functionality provided by the PCI DMA interface + (see Documentation/PCI/PCI-DMA-mapping.txt). They continue to be documented below for historical purposes, but new code must not use them. --davidm 00/12/12 ] diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt index 3c5434c83daf..5d2480d33b43 100644 --- a/Documentation/block/biodoc.txt +++ b/Documentation/block/biodoc.txt @@ -186,8 +186,9 @@ a virtual address mapping (unlike the earlier scheme of virtual address do not have a corresponding kernel virtual address space mapping) and low-memory pages. -Note: Please refer to DMA-mapping.txt for a discussion on PCI high mem DMA -aspects and mapping of scatter gather lists, and support for 64 bit PCI. +Note: Please refer to Documentation/PCI/PCI-DMA-mapping.txt for a discussion +on PCI high mem DMA aspects and mapping of scatter gather lists, and support +for 64 bit PCI. Special handling is required only for cases where i/o needs to happen on pages at physical memory addresses beyond what the device can support. In these diff --git a/Documentation/usb/dma.txt b/Documentation/usb/dma.txt index e8b50b7de9d9..cfdcd16e3abf 100644 --- a/Documentation/usb/dma.txt +++ b/Documentation/usb/dma.txt @@ -6,8 +6,9 @@ in the kernel usb programming guide (kerneldoc, from the source code). API OVERVIEW The big picture is that USB drivers can continue to ignore most DMA issues, -though they still must provide DMA-ready buffers (see DMA-mapping.txt). -That's how they've worked through the 2.4 (and earlier) kernels. +though they still must provide DMA-ready buffers (see +Documentation/PCI/PCI-DMA-mapping.txt). That's how they've worked through +the 2.4 (and earlier) kernels. OR: they can now be DMA-aware. @@ -62,8 +63,8 @@ and effects like cache-trashing can impose subtle penalties. force a consistent memory access ordering by using memory barriers. It's not using a streaming DMA mapping, so it's good for small transfers on systems where the I/O would otherwise thrash an IOMMU mapping. (See - Documentation/DMA-mapping.txt for definitions of "coherent" and "streaming" - DMA mappings.) + Documentation/PCI/PCI-DMA-mapping.txt for definitions of "coherent" and + "streaming" DMA mappings.) Asking for 1/Nth of a page (as well as asking for N pages) is reasonably space-efficient. @@ -93,7 +94,7 @@ WORKING WITH EXISTING BUFFERS Existing buffers aren't usable for DMA without first being mapped into the DMA address space of the device. However, most buffers passed to your driver can safely be used with such DMA mapping. (See the first section -of DMA-mapping.txt, titled "What memory is DMA-able?") +of Documentation/PCI/PCI-DMA-mapping.txt, titled "What memory is DMA-able?") - When you're using scatterlists, you can map everything at once. On some systems, this kicks in an IOMMU and turns the scatterlists into single diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index d98f0f4ff83f..6d5e6c5630e3 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -906,7 +906,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) * @dir: R/W or both. * @attrs: optional dma attributes * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ dma_addr_t sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, @@ -1024,7 +1024,7 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) * @dir: R/W or both. * @attrs: optional dma attributes * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, int dir, struct dma_attrs *attrs) @@ -1102,7 +1102,7 @@ EXPORT_SYMBOL(sba_unmap_single_attrs); * @size: number of bytes mapped in driver buffer. * @dma_handle: IOVA of new buffer. * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ void * sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) @@ -1165,7 +1165,7 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp * @vaddr: virtual address IOVA of "consistent" buffer. * @dma_handler: IO virtual address of "consistent" buffer. * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { @@ -1420,7 +1420,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, * @dir: R/W or both. * @attrs: optional dma attributes * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, int dir, struct dma_attrs *attrs) @@ -1512,7 +1512,7 @@ EXPORT_SYMBOL(sba_map_sg_attrs); * @dir: R/W or both. * @attrs: optional dma attributes * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, int dir, struct dma_attrs *attrs) diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h index 53af696f23d2..da6943380908 100644 --- a/arch/parisc/include/asm/dma-mapping.h +++ b/arch/parisc/include/asm/dma-mapping.h @@ -5,7 +5,7 @@ #include #include -/* See Documentation/DMA-mapping.txt */ +/* See Documentation/PCI/PCI-DMA-mapping.txt */ struct hppa_dma_ops { int (*dma_supported)(struct device *dev, u64 mask); void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag); diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index ccd61b9567a6..df47895db828 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -2,7 +2,7 @@ ** PARISC 1.1 Dynamic DMA mapping support. ** This implementation is for PA-RISC platforms that do not support ** I/O TLBs (aka DMA address translation hardware). -** See Documentation/DMA-mapping.txt for interface definitions. +** See Documentation/PCI/PCI-DMA-mapping.txt for interface definitions. ** ** (c) Copyright 1999,2000 Hewlett-Packard Company ** (c) Copyright 2000 Grant Grundler diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 4035357f5b9d..132a134d12f2 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -2,8 +2,8 @@ #define _ASM_X86_DMA_MAPPING_H /* - * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for - * documentation. + * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and + * Documentation/DMA-API.txt for documentation. */ #include diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 00c2bcd41463..d5768b1af080 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -5,7 +5,7 @@ * This allows to use PCI devices that only support 32bit addresses on systems * with more than 4GB. * - * See Documentation/DMA-mapping.txt for the interface specification. + * See Documentation/PCI/PCI-DMA-mapping.txt for the interface specification. * * Copyright 2002 Andi Kleen, SuSE Labs. * Subject to the GNU General Public License v2 only. diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 3fac8f81d59d..a70cf16ee1ad 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -668,7 +668,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) * @dev: instance of PCI owned by the driver that's asking * @mask: number of address bits this PCI device can handle * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ static int sba_dma_supported( struct device *dev, u64 mask) { @@ -680,8 +680,8 @@ static int sba_dma_supported( struct device *dev, u64 mask) return(0); } - /* Documentation/DMA-mapping.txt tells drivers to try 64-bit first, - * then fall back to 32-bit if that fails. + /* Documentation/PCI/PCI-DMA-mapping.txt tells drivers to try 64-bit + * first, then fall back to 32-bit if that fails. * We are just "encouraging" 32-bit DMA masks here since we can * never allow IOMMU bypass unless we add special support for ZX1. */ @@ -706,7 +706,7 @@ static int sba_dma_supported( struct device *dev, u64 mask) * @size: number of bytes to map in driver buffer. * @direction: R/W or both. * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ static dma_addr_t sba_map_single(struct device *dev, void *addr, size_t size, @@ -785,7 +785,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, * @size: number of bytes mapped in driver buffer. * @direction: R/W or both. * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ static void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, @@ -861,7 +861,7 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, * @size: number of bytes mapped in driver buffer. * @dma_handle: IOVA of new buffer. * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ static void *sba_alloc_consistent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) @@ -892,7 +892,7 @@ static void *sba_alloc_consistent(struct device *hwdev, size_t size, * @vaddr: virtual address IOVA of "consistent" buffer. * @dma_handler: IO virtual address of "consistent" buffer. * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ static void sba_free_consistent(struct device *hwdev, size_t size, void *vaddr, @@ -927,7 +927,7 @@ int dump_run_sg = 0; * @nents: number of entries in list * @direction: R/W or both. * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ static int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, @@ -1011,7 +1011,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, * @nents: number of entries in list * @direction: R/W or both. * - * See Documentation/DMA-mapping.txt + * See Documentation/PCI/PCI-DMA-mapping.txt */ static void sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, diff --git a/drivers/staging/altpciechdma/altpciechdma.c b/drivers/staging/altpciechdma/altpciechdma.c index 8e2b4ca0651d..f516140ca976 100644 --- a/drivers/staging/altpciechdma/altpciechdma.c +++ b/drivers/staging/altpciechdma/altpciechdma.c @@ -531,7 +531,7 @@ static int __devinit dma_test(struct ape_dev *ape, struct pci_dev *dev) goto fail; /* allocate and map coherently-cached memory for a DMA-able buffer */ - /* @see 2.6.26.2/Documentation/DMA-mapping.txt line 318 */ + /* @see Documentation/PCI/PCI-DMA-mapping.txt, near line 318 */ buffer_virt = (u8 *)pci_alloc_consistent(dev, PAGE_SIZE * 4, &buffer_bus); if (!buffer_virt) { printk(KERN_DEBUG "Could not allocate coherent DMA buffer.\n"); @@ -846,7 +846,7 @@ static int __devinit probe(struct pci_dev *dev, const struct pci_device_id *id) #if 1 // @todo For now, disable 64-bit, because I do not understand the implications (DAC!) /* query for DMA transfer */ - /* @see Documentation/DMA-mapping.txt */ + /* @see Documentation/PCI/PCI-DMA-mapping.txt */ if (!pci_set_dma_mask(dev, DMA_64BIT_MASK)) { pci_set_consistent_dma_mask(dev, DMA_64BIT_MASK); /* use 64-bit DMA */ diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h index 90edd22d343c..dda47f0082e9 100644 --- a/include/media/videobuf-dma-sg.h +++ b/include/media/videobuf-dma-sg.h @@ -49,7 +49,7 @@ struct scatterlist* videobuf_pages_to_sg(struct page **pages, int nr_pages, * does memory allocation too using vmalloc_32(). * * videobuf_dma_*() - * see Documentation/DMA-mapping.txt, these functions to + * see Documentation/PCI/PCI-DMA-mapping.txt, these functions to * basically the same. The map function does also build a * scatterlist for the buffer (and unmap frees it ...) * -- cgit v1.2.3 From 999721ca6d0c2540341acb73ac9048cbd6b05d3a Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Fri, 30 Jan 2009 22:44:58 +0530 Subject: headers_check fix: x86, e820.h fix the following 'make headers_check' warning: usr/include/asm/e820.h:44: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/x86/include/asm/e820.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index 3d8ceddbd407..00d41ce4c844 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h @@ -49,6 +49,7 @@ #define E820_RESERVED_KERN 128 #ifndef __ASSEMBLY__ +#include struct e820entry { __u64 addr; /* start of memory segment */ __u64 size; /* size of memory segment */ -- cgit v1.2.3 From cef3767852a9b1a7ff4a8dfe0969e2d32eb728df Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Fri, 30 Jan 2009 22:46:08 +0530 Subject: headers_check fix: x86, kvm.h fix the following 'make headers_check' warnings: usr/include/asm/kvm.h:9: include of is preferred over usr/include/asm/kvm.h:16: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/x86/include/asm/kvm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index b95162af0bf6..d2e3bf3608af 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h @@ -6,7 +6,7 @@ * */ -#include +#include #include /* Architectural interrupt line count. */ -- cgit v1.2.3 From 999b697b9d8b15756e65da72c816ef4363a945a5 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Fri, 30 Jan 2009 22:47:27 +0530 Subject: headers_check fix: x86, mce.h fix the following 'make headers_check' warnings: usr/include/asm/mce.h:7: include of is preferred over usr/include/asm/mce.h:29: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/x86/include/asm/mce.h | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 1d6e17c2f23a..32c6e17b960b 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -3,8 +3,8 @@ #ifdef __x86_64__ +#include #include -#include /* * Machine Check support for x86 @@ -115,8 +115,6 @@ extern int mce_notify_user(void); #endif /* !CONFIG_X86_32 */ - - #ifdef CONFIG_X86_MCE extern void mcheck_init(struct cpuinfo_x86 *c); #else @@ -126,5 +124,4 @@ extern void stop_mce(void); extern void restart_mce(void); #endif /* __KERNEL__ */ - #endif /* _ASM_X86_MCE_H */ -- cgit v1.2.3 From d122072cc079d299e5191c9cbb6162ba8791624c Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Fri, 30 Jan 2009 22:48:17 +0530 Subject: headers_check fix: x86, mtrr.h fix the following 'make headers_check' warning: usr/include/asm/mtrr.h:61: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/x86/include/asm/mtrr.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h index 14080d22edb3..a51ada8467de 100644 --- a/arch/x86/include/asm/mtrr.h +++ b/arch/x86/include/asm/mtrr.h @@ -23,6 +23,7 @@ #ifndef _ASM_X86_MTRR_H #define _ASM_X86_MTRR_H +#include #include #include -- cgit v1.2.3 From 420ab35eef206d147973d26db14b5618868726be Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Fri, 30 Jan 2009 22:52:16 +0530 Subject: headers_check fix: x86, ptrace-abi.h fix the following 'make headers_check' warnings: usr/include/asm/ptrace-abi.h:86: include of is preferred over usr/include/asm/ptrace-abi.h:93: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/x86/include/asm/ptrace-abi.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/ptrace-abi.h b/arch/x86/include/asm/ptrace-abi.h index 25f1bb8fc626..8e0f8d199e05 100644 --- a/arch/x86/include/asm/ptrace-abi.h +++ b/arch/x86/include/asm/ptrace-abi.h @@ -83,7 +83,7 @@ #ifdef CONFIG_X86_PTRACE_BTS #ifndef __ASSEMBLY__ -#include +#include /* configuration/status structure used in PTRACE_BTS_CONFIG and PTRACE_BTS_STATUS commands. -- cgit v1.2.3 From e59afe6a21dce7bb3c63ba4f894a3195ae3d5529 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Fri, 30 Jan 2009 22:53:49 +0530 Subject: headers_check fix: x86, sigcontext.h fix the following 'make headers_check' warnings: usr/include/asm/sigcontext.h:5: include of is preferred over usr/include/asm/sigcontext.h:24: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/x86/include/asm/sigcontext.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h index 0afcb5e58acc..ec666491aaa4 100644 --- a/arch/x86/include/asm/sigcontext.h +++ b/arch/x86/include/asm/sigcontext.h @@ -2,7 +2,7 @@ #define _ASM_X86_SIGCONTEXT_H #include -#include +#include #define FP_XSTATE_MAGIC1 0x46505853U #define FP_XSTATE_MAGIC2 0x46505845U -- cgit v1.2.3 From 2de548faa78c650bb20c4680ee3a225cca33a45d Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Fri, 30 Jan 2009 22:55:20 +0530 Subject: headers_check fix: x86, sigcontext32.h fix the following 'make headers_check' warning: usr/include/asm/sigcontext32.h:20: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/x86/include/asm/sigcontext32.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/sigcontext32.h b/arch/x86/include/asm/sigcontext32.h index 6126188cf3a9..ad1478c4ae12 100644 --- a/arch/x86/include/asm/sigcontext32.h +++ b/arch/x86/include/asm/sigcontext32.h @@ -1,6 +1,8 @@ #ifndef _ASM_X86_SIGCONTEXT32_H #define _ASM_X86_SIGCONTEXT32_H +#include + /* signal context for 32bit programs. */ #define X86_FXSR_MAGIC 0x0000 -- cgit v1.2.3 From 7cff3608d2553a045b676fa81b0cf54e4f2cc5ce Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Fri, 30 Jan 2009 22:57:38 +0530 Subject: headers_check fix: x86, swab.h fix the following 'make headers_check' warnings: usr/include/asm/swab.h:4: include of is preferred over usr/include/asm/swab.h:7: found __[us]{8,16,32,64} type without #include Signed-off-by: Jaswinder Singh Rajput --- arch/x86/include/asm/swab.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/swab.h b/arch/x86/include/asm/swab.h index 306d4178ffc9..557cd9f00661 100644 --- a/arch/x86/include/asm/swab.h +++ b/arch/x86/include/asm/swab.h @@ -1,7 +1,7 @@ #ifndef _ASM_X86_SWAB_H #define _ASM_X86_SWAB_H -#include +#include #include static inline __attribute_const__ __u32 __arch_swab32(__u32 val) -- cgit v1.2.3 From 7fc49f19813030f2e15ad2ccec5cb701f7f4a3ec Mon Sep 17 00:00:00 2001 From: Andreas Schwab Date: Tue, 27 Jan 2009 21:45:57 +0100 Subject: x86 setup: fix asm constraints in vesa_store_edid Impact: fix potential miscompile (currently believed non-manifest) As the comment explains, the VBE DDC call can clobber any register. Tell the compiler about that fact. Signed-off-by: Andreas Schwab Signed-off-by: H. Peter Anvin Signed-off-by: Ingo Molnar --- arch/x86/boot/video-vesa.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c index 75115849af33..4a58c8ce3f69 100644 --- a/arch/x86/boot/video-vesa.c +++ b/arch/x86/boot/video-vesa.c @@ -269,9 +269,8 @@ void vesa_store_edid(void) we genuinely have to assume all registers are destroyed here. */ asm("pushw %%es; movw %2,%%es; "INT10"; popw %%es" - : "+a" (ax), "+b" (bx) - : "c" (cx), "D" (di) - : "esi"); + : "+a" (ax), "+b" (bx), "+c" (cx), "+D" (di) + : : "esi", "edx"); if (ax != 0x004f) return; /* No EDID */ @@ -285,9 +284,9 @@ void vesa_store_edid(void) dx = 0; /* EDID block number */ di =(size_t) &boot_params.edid_info; /* (ES:)Pointer to block */ asm(INT10 - : "+a" (ax), "+b" (bx), "+d" (dx), "=m" (boot_params.edid_info) - : "c" (cx), "D" (di) - : "esi"); + : "+a" (ax), "+b" (bx), "+d" (dx), "=m" (boot_params.edid_info), + "+c" (cx), "+D" (di) + : : "esi"); #endif /* CONFIG_FIRMWARE_EDID */ } -- cgit v1.2.3 From 92ab78315c638515d0e81b0c70b2082f713582d9 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Sat, 31 Jan 2009 17:24:43 +0100 Subject: x86/Voyager: make it build and boot [ mingo@elte.hu: these fixes are a subset of changes cherry-picked from: git://git.kernel.org:/pub/scm/linux/kernel/git/jejb/voyager-2.6.git They fix various problems that recent x86 changes caused in the Voyager subarchitecture: both APIC changes and cpumask changes and certain cleanups caused subarch assumptions to break. Most of these changes are obsolete as the subarch code has been removed from the x86 development tree - but we merge them upstream to make Voyager build and boot. ] Signed-off-by: James Bottomley Signed-off-by: Ingo Molnar --- arch/x86/kernel/irqinit_32.c | 12 ------------ arch/x86/mach-default/setup.c | 12 ++++++++++++ arch/x86/mach-voyager/setup.c | 12 +++++++++++- arch/x86/mach-voyager/voyager_smp.c | 25 ++++++++++++------------- 4 files changed, 35 insertions(+), 26 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index 1507ad4e674d..10a09c2f1828 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c @@ -78,15 +78,6 @@ void __init init_ISA_irqs(void) } } -/* - * IRQ2 is cascade interrupt to second interrupt controller - */ -static struct irqaction irq2 = { - .handler = no_action, - .mask = CPU_MASK_NONE, - .name = "cascade", -}; - DEFINE_PER_CPU(vector_irq_t, vector_irq) = { [0 ... IRQ0_VECTOR - 1] = -1, [IRQ0_VECTOR] = 0, @@ -178,9 +169,6 @@ void __init native_init_IRQ(void) alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); #endif - if (!acpi_ioapic) - setup_irq(2, &irq2); - /* setup after call gates are initialised (usually add in * the architecture specific gates) */ diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c index df167f265622..a265a7c63190 100644 --- a/arch/x86/mach-default/setup.c +++ b/arch/x86/mach-default/setup.c @@ -38,6 +38,15 @@ void __init pre_intr_init_hook(void) init_ISA_irqs(); } +/* + * IRQ2 is cascade interrupt to second interrupt controller + */ +static struct irqaction irq2 = { + .handler = no_action, + .mask = CPU_MASK_NONE, + .name = "cascade", +}; + /** * intr_init_hook - post gate setup interrupt initialisation * @@ -53,6 +62,9 @@ void __init intr_init_hook(void) if (x86_quirks->arch_intr_init()) return; } + if (!acpi_ioapic) + setup_irq(2, &irq2); + } /** diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c index a580b9562e76..d914a7996a66 100644 --- a/arch/x86/mach-voyager/setup.c +++ b/arch/x86/mach-voyager/setup.c @@ -33,13 +33,23 @@ void __init intr_init_hook(void) setup_irq(2, &irq2); } -void __init pre_setup_arch_hook(void) +static void voyager_disable_tsc(void) { /* Voyagers run their CPUs from independent clocks, so disable * the TSC code because we can't sync them */ setup_clear_cpu_cap(X86_FEATURE_TSC); } +void __init pre_setup_arch_hook(void) +{ + voyager_disable_tsc(); +} + +void __init pre_time_init_hook(void) +{ + voyager_disable_tsc(); +} + void __init trap_init_hook(void) { } diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 9840b7ec749a..7ffcdeec4631 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -81,7 +81,7 @@ static void enable_local_vic_irq(unsigned int irq); static void disable_local_vic_irq(unsigned int irq); static void before_handle_vic_irq(unsigned int irq); static void after_handle_vic_irq(unsigned int irq); -static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask); +static void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask); static void ack_vic_irq(unsigned int irq); static void vic_enable_cpi(void); static void do_boot_cpu(__u8 cpuid); @@ -211,8 +211,6 @@ static __u32 cpu_booted_map; static cpumask_t smp_commenced_mask = CPU_MASK_NONE; /* This is for the new dynamic CPU boot code */ -cpumask_t cpu_callin_map = CPU_MASK_NONE; -cpumask_t cpu_callout_map = CPU_MASK_NONE; /* The per processor IRQ masks (these are usually kept in sync) */ static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; @@ -378,7 +376,7 @@ void __init find_smp_config(void) cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24; - cpu_possible_map = phys_cpu_present_map; + init_cpu_possible(&phys_cpu_present_map); printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_addr(phys_cpu_present_map)[0]); /* Here we set up the VIC to enable SMP */ @@ -1599,16 +1597,16 @@ static void after_handle_vic_irq(unsigned int irq) * change the mask and then do an interrupt enable CPI to re-enable on * the selected processors */ -void set_vic_irq_affinity(unsigned int irq, cpumask_t mask) +void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask) { /* Only extended processors handle interrupts */ unsigned long real_mask; unsigned long irq_mask = 1 << irq; int cpu; - real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; + real_mask = cpus_addr(*mask)[0] & voyager_extended_vic_processors; - if (cpus_addr(mask)[0] == 0) + if (cpus_addr(*mask)[0] == 0) /* can't have no CPUs to accept the interrupt -- extremely * bad things will happen */ return; @@ -1750,10 +1748,11 @@ static void __cpuinit voyager_smp_prepare_boot_cpu(void) init_gdt(smp_processor_id()); switch_to_new_gdt(); - cpu_set(smp_processor_id(), cpu_online_map); - cpu_set(smp_processor_id(), cpu_callout_map); - cpu_set(smp_processor_id(), cpu_possible_map); - cpu_set(smp_processor_id(), cpu_present_map); + cpu_online_map = cpumask_of_cpu(smp_processor_id()); + cpu_callout_map = cpumask_of_cpu(smp_processor_id()); + cpu_callin_map = CPU_MASK_NONE; + cpu_present_map = cpumask_of_cpu(smp_processor_id()); + } static int __cpuinit voyager_cpu_up(unsigned int cpu) @@ -1783,9 +1782,9 @@ void __init smp_setup_processor_id(void) x86_write_percpu(cpu_number, hard_smp_processor_id()); } -static void voyager_send_call_func(cpumask_t callmask) +static void voyager_send_call_func(const struct cpumask *callmask) { - __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id()); + __u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id()); send_CPI(mask, VIC_CALL_FUNCTION_CPI); } -- cgit v1.2.3 From 9a8ecae87a2b698964b1db9ea504ba1099f479fc Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Sat, 31 Jan 2009 20:12:14 -0500 Subject: x86: add cache descriptors for Intel Core i7 Signed-off-by: Dave Jones Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/intel_cacheinfo.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 48533d77be78..da299eb85fc0 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -36,8 +36,11 @@ static struct _cache_table cache_table[] __cpuinitdata = { { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ + { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */ { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ + { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */ + { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */ { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */ @@ -85,6 +88,18 @@ static struct _cache_table cache_table[] __cpuinitdata = { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */ { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */ + { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */ + { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */ + { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */ + { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */ + { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */ + { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */ + { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */ + { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */ + { 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */ + { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */ + { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */ + { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */ { 0x00, 0, 0} }; -- cgit v1.2.3 From 10b888d6cec2688e65e9e128b14bf98ecd199da2 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sat, 31 Jan 2009 14:50:07 -0800 Subject: irq, x86: fix lock status with numa_migrate_irq_desc Eric Paris reported: > I have an hp dl785g5 which is unable to successfully run > 2.6.29-0.66.rc3.fc11.x86_64 or 2.6.29-rc2-next-20090126. During bootup > (early in userspace daemons starting) I get the below BUG, which quickly > renders the machine dead. I assume it is because sparse_irq_lock never > gets released when the BUG kills that task. Adjust lock sequence when migrating a descriptor with CONFIG_NUMA_MIGRATE_IRQ_DESC enabled. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/io_apic.c | 5 +++-- kernel/irq/numa_migrate.c | 7 ++++++- 2 files changed, 9 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 1c4a1302536c..9b0c480c383b 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -2528,14 +2528,15 @@ static void irq_complete_move(struct irq_desc **descp) vector = ~get_irq_regs()->orig_ax; me = smp_processor_id(); + + if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) { #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC *descp = desc = move_irq_desc(desc, me); /* get the new one */ cfg = desc->chip_data; #endif - - if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) send_cleanup_vector(cfg); + } } #else static inline void irq_complete_move(struct irq_desc **descp) {} diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index ecf765c6a77a..acd88356ac76 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c @@ -71,7 +71,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, desc = irq_desc_ptrs[irq]; if (desc && old_desc != desc) - goto out_unlock; + goto out_unlock; node = cpu_to_node(cpu); desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); @@ -84,10 +84,15 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, init_copy_one_irq_desc(irq, old_desc, desc, cpu); irq_desc_ptrs[irq] = desc; + spin_unlock_irqrestore(&sparse_irq_lock, flags); /* free the old one */ free_one_irq_desc(old_desc, desc); + spin_unlock(&old_desc->lock); kfree(old_desc); + spin_lock(&desc->lock); + + return desc; out_unlock: spin_unlock_irqrestore(&sparse_irq_lock, flags); -- cgit v1.2.3 From a67798cd7bb130bf37f5ffb28f3260f4c10232db Mon Sep 17 00:00:00 2001 From: Martin Hicks Date: Fri, 30 Jan 2009 10:50:54 -0600 Subject: x86: push old stack address on irqstack for unwinder Impact: Fixes dumpstack and KDB on 64 bits This re-adds the old stack pointer to the top of the irqstack to help with unwinding. It was removed in commit d99015b1abbad743aa049b439c1e1dede6d0fa49 as part of the save_args out-of-line work. Both dumpstack and KDB require this information. Signed-off-by: Martin Hicks Signed-off-by: H. Peter Anvin --- arch/x86/kernel/entry_64.S | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index e28c7a987793..a1346217e43c 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -346,6 +346,7 @@ ENTRY(save_args) popq_cfi %rax /* move return address... */ mov %gs:pda_irqstackptr,%rsp EMPTY_FRAME 0 + pushq_cfi %rbp /* backlink for unwinder */ pushq_cfi %rax /* ... to the new stack */ /* * We entered an interrupt context - irqs are off: -- cgit v1.2.3 From 06fc732c33a7ff5e4c91bcf4a6ca86b5e335ad9a Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Tue, 3 Feb 2009 16:01:46 -0800 Subject: xen: disable interrupts before saving in percpu Impact: Fix race condition xen_mc_batch has a small preempt race where it takes the address of a percpu variable immediately before disabling interrupts, thereby leaving a small window in which we may migrate to another cpu and save the flags in the wrong percpu variable. Disable interrupts before saving the old flags in a percpu. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: H. Peter Anvin --- arch/x86/xen/multicalls.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h index 858938241616..fa3e10725d98 100644 --- a/arch/x86/xen/multicalls.h +++ b/arch/x86/xen/multicalls.h @@ -19,8 +19,10 @@ DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags); paired with xen_mc_issue() */ static inline void xen_mc_batch(void) { + unsigned long flags; /* need to disable interrupts until this entry is complete */ - local_irq_save(__get_cpu_var(xen_mc_irq_flags)); + local_irq_save(flags); + __get_cpu_var(xen_mc_irq_flags) = flags; } static inline struct multicall_space xen_mc_entry(size_t args) -- cgit v1.2.3 From 858770619debfb9269add63e4ba8b7c6b5538dd1 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 3 Feb 2009 16:24:22 +0100 Subject: x86: APIC: enable workaround on AMD Fam10h CPUs Impact: fix to enable APIC for AMD Fam10h on chipsets with a missing/b0rked ACPI MP table (MADT) Booting a 32bit kernel on an AMD Fam10h CPU running on chipsets with missing/b0rked MP table leads to a hang pretty early in the boot process due to the APIC not being initialized. Fix that by falling back to the default APIC base address in 32bit code, as it is done in the 64bit codepath. Signed-off-by: Borislav Petkov Signed-off-by: H. Peter Anvin --- arch/x86/kernel/apic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 4b6df2469fe3..115449f869ee 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c @@ -1436,7 +1436,7 @@ static int __init detect_init_APIC(void) switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_AMD: if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) || - (boot_cpu_data.x86 == 15)) + (boot_cpu_data.x86 >= 15)) break; goto no_apic; case X86_VENDOR_INTEL: -- cgit v1.2.3