diff options
author | Venki Pallipadi <venkatesh.pallipadi@intel.com> | 2008-01-30 13:32:01 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 13:32:01 +0100 |
commit | bde6f5f59c2b2b48a7a849c129d5b48838fe77ee (patch) | |
tree | 4fa3befdfa227db56770a0dc85b8fc18be232f70 /include | |
parent | x86: add some pirq debugging (diff) | |
download | linux-bde6f5f59c2b2b48a7a849c129d5b48838fe77ee.tar.xz linux-bde6f5f59c2b2b48a7a849c129d5b48838fe77ee.zip |
x86: voluntary leave_mm before entering ACPI C3
Aviod TLB flush IPIs during C3 states by voluntary leave_mm()
before entering C3.
The performance impact of TLB flush on C3 should not be significant with
respect to C3 wakeup latency. Also, CPUs tend to flush TLB in hardware while in
C3 anyways.
On a 8 logical CPU system, running make -j2, the number of tlbflush IPIs goes
down from 40 per second to ~ 0. Total number of interrupts during the run
of this workload was ~1200 per second, which makes it ~3% savings in wakeups.
There was no measurable performance or power impact however.
[ akpm@linux-foundation.org: symbol export fixes. ]
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-ia64/acpi.h | 2 | ||||
-rw-r--r-- | include/asm-x86/acpi.h | 3 | ||||
-rw-r--r-- | include/asm-x86/mmu.h | 8 | ||||
-rw-r--r-- | include/asm-x86/mmu_context_32.h | 2 |
4 files changed, 13 insertions, 2 deletions
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h index 81bcd5e51789..cd1cc39b5599 100644 --- a/include/asm-ia64/acpi.h +++ b/include/asm-ia64/acpi.h @@ -127,6 +127,8 @@ extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS]; extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; #endif +#define acpi_unlazy_tlb(x) + #endif /*__KERNEL__*/ #endif /*_ASM_ACPI_H*/ diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h index 2feb0c494be7..98a9ca266531 100644 --- a/include/asm-x86/acpi.h +++ b/include/asm-x86/acpi.h @@ -27,6 +27,7 @@ #include <asm/numa.h> #include <asm/processor.h> +#include <asm/mmu.h> #define COMPILER_DEPENDENT_INT64 long long #define COMPILER_DEPENDENT_UINT64 unsigned long long @@ -167,4 +168,6 @@ static inline void acpi_fake_nodes(const struct bootnode *fake_nodes, } #endif +#define acpi_unlazy_tlb(x) leave_mm(x) + #endif /*__X86_ASM_ACPI_H*/ diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h index 3f922c8e1c88..efa962c38897 100644 --- a/include/asm-x86/mmu.h +++ b/include/asm-x86/mmu.h @@ -20,4 +20,12 @@ typedef struct { void *vdso; } mm_context_t; +#ifdef CONFIG_SMP +void leave_mm(int cpu); +#else +static inline void leave_mm(int cpu) +{ +} +#endif + #endif /* _ASM_X86_MMU_H */ diff --git a/include/asm-x86/mmu_context_32.h b/include/asm-x86/mmu_context_32.h index 7eb0b0b1fb3c..8198d1cca1f3 100644 --- a/include/asm-x86/mmu_context_32.h +++ b/include/asm-x86/mmu_context_32.h @@ -32,8 +32,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) #endif } -void leave_mm(unsigned long cpu); - static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) |