summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process.c
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2008-03-10 23:28:04 +0100
committerIngo Molnar <mingo@elte.hu>2008-04-19 19:19:55 +0200
commit61c4628b538608c1a85211ed8438136adfeb9a95 (patch)
tree290a695299a363153bc692e6d705ac680d64359e /arch/x86/kernel/process.c
parentx86: rename find_max_pfn() to propagate_e820_map() (diff)
downloadlinux-61c4628b538608c1a85211ed8438136adfeb9a95.tar.xz
linux-61c4628b538608c1a85211ed8438136adfeb9a95.zip
x86, fpu: split FPU state from task struct - v5
Split the FPU save area from the task struct. This allows easy migration of FPU context, and it's generally cleaner. It also allows the following two optimizations: 1) only allocate when the application actually uses FPU, so in the first lazy FPU trap. This could save memory for non-fpu using apps. Next patch does this lazy allocation. 2) allocate the right size for the actual cpu rather than 512 bytes always. Patches enabling xsave/xrstor support (coming shortly) will take advantage of this. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/process.c')
-rw-r--r--arch/x86/kernel/process.c35
1 files changed, 35 insertions, 0 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
new file mode 100644
index 000000000000..ead24efbcba0
--- /dev/null
+++ b/arch/x86/kernel/process.c
@@ -0,0 +1,35 @@
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+static struct kmem_cache *task_xstate_cachep;
+
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+ *dst = *src;
+ dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
+ if (!dst->thread.xstate)
+ return -ENOMEM;
+ WARN_ON((unsigned long)dst->thread.xstate & 15);
+ memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
+ return 0;
+}
+
+void free_thread_info(struct thread_info *ti)
+{
+ kmem_cache_free(task_xstate_cachep, ti->task->thread.xstate);
+ ti->task->thread.xstate = NULL;
+
+ free_pages((unsigned long)(ti), get_order(THREAD_SIZE));
+}
+
+void arch_task_cache_init(void)
+{
+ task_xstate_cachep =
+ kmem_cache_create("task_xstate", xstate_size,
+ __alignof__(union thread_xstate),
+ SLAB_PANIC, NULL);
+}