summaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/include/asm/fpu.h2
-rw-r--r--arch/sh/include/asm/processor_32.h10
-rw-r--r--arch/sh/include/asm/processor_64.h10
-rw-r--r--arch/sh/kernel/cpu/fpu.c2
-rw-r--r--arch/sh/kernel/process_32.c6
-rw-r--r--arch/sh/kernel/process_64.c4
-rw-r--r--arch/sh/mm/init.c2
7 files changed, 28 insertions, 8 deletions
diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h
index 06c4281aab65..09fc2bc8a790 100644
--- a/arch/sh/include/asm/fpu.h
+++ b/arch/sh/include/asm/fpu.h
@@ -46,7 +46,7 @@ static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
save_fpu(tsk);
release_fpu(regs);
} else
- tsk->fpu_counter = 0;
+ tsk->thread.fpu_counter = 0;
}
static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index e699a12cdcca..18e0377f72bb 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -111,6 +111,16 @@ struct thread_struct {
/* Extended processor state */
union thread_xstate *xstate;
+
+ /*
+ * fpu_counter contains the number of consecutive context switches
+ * that the FPU is used. If this is over a threshold, the lazy fpu
+ * saving becomes unlazy to save the trap. This is an unsigned char
+ * so that after 256 times the counter wraps and the behavior turns
+ * lazy again; this to deal with bursty apps that only use FPU for
+ * a short time
+ */
+ unsigned char fpu_counter;
};
#define INIT_THREAD { \
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h
index 1cc7d3197143..eedd4f625d07 100644
--- a/arch/sh/include/asm/processor_64.h
+++ b/arch/sh/include/asm/processor_64.h
@@ -126,6 +126,16 @@ struct thread_struct {
/* floating point info */
union thread_xstate *xstate;
+
+ /*
+ * fpu_counter contains the number of consecutive context switches
+ * that the FPU is used. If this is over a threshold, the lazy fpu
+ * saving becomes unlazy to save the trap. This is an unsigned char
+ * so that after 256 times the counter wraps and the behavior turns
+ * lazy again; this to deal with bursty apps that only use FPU for
+ * a short time
+ */
+ unsigned char fpu_counter;
};
#define INIT_MMAP \
diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c
index f8f7af51c128..4e332244ea75 100644
--- a/arch/sh/kernel/cpu/fpu.c
+++ b/arch/sh/kernel/cpu/fpu.c
@@ -44,7 +44,7 @@ void __fpu_state_restore(void)
restore_fpu(tsk);
task_thread_info(tsk)->status |= TS_USEDFPU;
- tsk->fpu_counter++;
+ tsk->thread.fpu_counter++;
}
void fpu_state_restore(struct pt_regs *regs)
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index ebd3933005b4..2885fc9d9dcd 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -156,7 +156,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
#endif
ti->addr_limit = KERNEL_DS;
ti->status &= ~TS_USEDFPU;
- p->fpu_counter = 0;
+ p->thread.fpu_counter = 0;
return 0;
}
*childregs = *current_pt_regs();
@@ -189,7 +189,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
unlazy_fpu(prev, task_pt_regs(prev));
/* we're going to use this soon, after a few expensive things */
- if (next->fpu_counter > 5)
+ if (next->thread.fpu_counter > 5)
prefetch(next_t->xstate);
#ifdef CONFIG_MMU
@@ -207,7 +207,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
* restore of the math state immediately to avoid the trap; the
* chances of needing FPU soon are obviously high now
*/
- if (next->fpu_counter > 5)
+ if (next->thread.fpu_counter > 5)
__fpu_state_restore();
return prev;
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index 174d124b419e..e2062e643341 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -374,7 +374,7 @@ asmlinkage void ret_from_kernel_thread(void);
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long arg, struct task_struct *p)
{
- struct pt_regs *childregs, *regs = current_pt_regs();
+ struct pt_regs *childregs;
#ifdef CONFIG_SH_FPU
/* can't happen for a kernel thread */
@@ -393,7 +393,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
if (unlikely(p->flags & PF_KTHREAD)) {
memset(childregs, 0, sizeof(struct pt_regs));
childregs->regs[2] = (unsigned long)arg;
- childregs->regs[3] = (unsigned long)fn;
+ childregs->regs[3] = (unsigned long)usp;
childregs->sr = (1 << 30); /* not user_mode */
childregs->sr |= SR_FD; /* Invalidate FPU flag */
p->thread.pc = (unsigned long) ret_from_kernel_thread;
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 33890fd267cb..2d089fe2cba9 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -231,7 +231,7 @@ static void __init bootmem_init_one_node(unsigned int nid)
if (!p->node_spanned_pages)
return;
- end_pfn = p->node_start_pfn + p->node_spanned_pages;
+ end_pfn = pgdat_end_pfn(p);
total_pages = bootmem_bootmap_pages(p->node_spanned_pages);