diff options
author | Vineet Gupta <Vineet.Gupta1@synopsys.com> | 2013-11-13 00:08:45 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-13 04:09:13 +0100 |
commit | 616c05d110bb4ef8203f49c9d2476874077c2f6a (patch) | |
tree | eb5bc88a430509fcdb8afe8ee9f344c903d1894d /arch | |
parent | jump_label: unlikely(x) > 0 (diff) | |
download | linux-616c05d110bb4ef8203f49c9d2476874077c2f6a.tar.xz linux-616c05d110bb4ef8203f49c9d2476874077c2f6a.zip |
sh: move fpu_counter into ARCH specific thread_struct
Only a couple of arches (sh/x86) use fpu_counter in task_struct so it can
be moved out into ARCH specific thread_struct, reducing the size of
task_struct for other arches.
Compile tested sh defconfig + sh4-linux-gcc (4.6.3)
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Cc: Paul Mundt <paul.mundt@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sh/include/asm/fpu.h | 2 | ||||
-rw-r--r-- | arch/sh/include/asm/processor_32.h | 10 | ||||
-rw-r--r-- | arch/sh/include/asm/processor_64.h | 10 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/fpu.c | 2 | ||||
-rw-r--r-- | arch/sh/kernel/process_32.c | 6 |
5 files changed, 25 insertions, 5 deletions
diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h index 06c4281aab65..09fc2bc8a790 100644 --- a/arch/sh/include/asm/fpu.h +++ b/arch/sh/include/asm/fpu.h @@ -46,7 +46,7 @@ static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) save_fpu(tsk); release_fpu(regs); } else - tsk->fpu_counter = 0; + tsk->thread.fpu_counter = 0; } static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index e699a12cdcca..18e0377f72bb 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -111,6 +111,16 @@ struct thread_struct { /* Extended processor state */ union thread_xstate *xstate; + + /* + * fpu_counter contains the number of consecutive context switches + * that the FPU is used. If this is over a threshold, the lazy fpu + * saving becomes unlazy to save the trap. This is an unsigned char + * so that after 256 times the counter wraps and the behavior turns + * lazy again; this to deal with bursty apps that only use FPU for + * a short time + */ + unsigned char fpu_counter; }; #define INIT_THREAD { \ diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h index 1cc7d3197143..eedd4f625d07 100644 --- a/arch/sh/include/asm/processor_64.h +++ b/arch/sh/include/asm/processor_64.h @@ -126,6 +126,16 @@ struct thread_struct { /* floating point info */ union thread_xstate *xstate; + + /* + * fpu_counter contains the number of consecutive context switches + * that the FPU is used. If this is over a threshold, the lazy fpu + * saving becomes unlazy to save the trap. This is an unsigned char + * so that after 256 times the counter wraps and the behavior turns + * lazy again; this to deal with bursty apps that only use FPU for + * a short time + */ + unsigned char fpu_counter; }; #define INIT_MMAP \ diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c index f8f7af51c128..4e332244ea75 100644 --- a/arch/sh/kernel/cpu/fpu.c +++ b/arch/sh/kernel/cpu/fpu.c @@ -44,7 +44,7 @@ void __fpu_state_restore(void) restore_fpu(tsk); task_thread_info(tsk)->status |= TS_USEDFPU; - tsk->fpu_counter++; + tsk->thread.fpu_counter++; } void fpu_state_restore(struct pt_regs *regs) diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index ebd3933005b4..2885fc9d9dcd 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -156,7 +156,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, #endif ti->addr_limit = KERNEL_DS; ti->status &= ~TS_USEDFPU; - p->fpu_counter = 0; + p->thread.fpu_counter = 0; return 0; } *childregs = *current_pt_regs(); @@ -189,7 +189,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next) unlazy_fpu(prev, task_pt_regs(prev)); /* we're going to use this soon, after a few expensive things */ - if (next->fpu_counter > 5) + if (next->thread.fpu_counter > 5) prefetch(next_t->xstate); #ifdef CONFIG_MMU @@ -207,7 +207,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next) * restore of the math state immediately to avoid the trap; the * chances of needing FPU soon are obviously high now */ - if (next->fpu_counter > 5) + if (next->thread.fpu_counter > 5) __fpu_state_restore(); return prev; |