diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/um/Makefile | 1 | ||||
-rw-r--r-- | arch/x86/um/asm/barrier.h | 11 | ||||
-rw-r--r-- | arch/x86/um/asm/elf.h | 2 | ||||
-rw-r--r-- | arch/x86/um/ldt.c | 227 | ||||
-rw-r--r-- | arch/x86/um/shared/sysdep/faultinfo_32.h | 3 | ||||
-rw-r--r-- | arch/x86/um/shared/sysdep/faultinfo_64.h | 3 | ||||
-rw-r--r-- | arch/x86/um/shared/sysdep/skas_ptrace.h | 22 |
7 files changed, 49 insertions, 220 deletions
diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile index eafa324eb7a5..acb384d24669 100644 --- a/arch/x86/um/Makefile +++ b/arch/x86/um/Makefile @@ -21,7 +21,6 @@ obj-$(CONFIG_BINFMT_ELF) += elfcore.o subarch-y = ../lib/string_32.o ../lib/atomic64_32.o ../lib/atomic64_cx8_32.o subarch-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += ../lib/rwsem.o -subarch-$(CONFIG_HIGHMEM) += ../mm/highmem_32.o else diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h index 8ffd2146fa6a..7e8a1a650435 100644 --- a/arch/x86/um/asm/barrier.h +++ b/arch/x86/um/asm/barrier.h @@ -36,22 +36,11 @@ #endif /* CONFIG_X86_PPRO_FENCE */ #define dma_wmb() barrier() -#ifdef CONFIG_SMP - -#define smp_mb() mb() -#define smp_rmb() dma_rmb() -#define smp_wmb() barrier() -#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) - -#else /* CONFIG_SMP */ - #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() #define set_mb(var, value) do { var = value; barrier(); } while (0) -#endif /* CONFIG_SMP */ - #define read_barrier_depends() do { } while (0) #define smp_read_barrier_depends() do { } while (0) diff --git a/arch/x86/um/asm/elf.h b/arch/x86/um/asm/elf.h index 25a1022dd793..0a656b727b1a 100644 --- a/arch/x86/um/asm/elf.h +++ b/arch/x86/um/asm/elf.h @@ -210,7 +210,7 @@ extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu); #define ELF_EXEC_PAGESIZE 4096 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) extern long elf_aux_hwcap; #define ELF_HWCAP (elf_aux_hwcap) diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c index 8e08176f0bcb..5c0b711d2433 100644 --- a/arch/x86/um/ldt.c +++ b/arch/x86/um/ldt.c @@ -8,9 +8,7 @@ #include <linux/slab.h> #include <asm/unistd.h> #include <os.h> -#include <proc_mm.h> #include <skas.h> -#include <skas_ptrace.h> #include <sysdep/tls.h> extern int modify_ldt(int func, void *ptr, unsigned long bytecount); @@ -19,105 +17,20 @@ static long write_ldt_entry(struct mm_id *mm_idp, int func, struct user_desc *desc, void **addr, int done) { long res; - - if (proc_mm) { - /* - * This is a special handling for the case, that the mm to - * modify isn't current->active_mm. - * If this is called directly by modify_ldt, - * (current->active_mm->context.skas.u == mm_idp) - * will be true. So no call to __switch_mm(mm_idp) is done. - * If this is called in case of init_new_ldt or PTRACE_LDT, - * mm_idp won't belong to current->active_mm, but child->mm. - * So we need to switch child's mm into our userspace, then - * later switch back. - * - * Note: I'm unsure: should interrupts be disabled here? - */ - if (!current->active_mm || current->active_mm == &init_mm || - mm_idp != ¤t->active_mm->context.id) - __switch_mm(mm_idp); - } - - if (ptrace_ldt) { - struct ptrace_ldt ldt_op = (struct ptrace_ldt) { - .func = func, - .ptr = desc, - .bytecount = sizeof(*desc)}; - u32 cpu; - int pid; - - if (!proc_mm) - pid = mm_idp->u.pid; - else { - cpu = get_cpu(); - pid = userspace_pid[cpu]; - } - - res = os_ptrace_ldt(pid, 0, (unsigned long) &ldt_op); - - if (proc_mm) - put_cpu(); - } - else { - void *stub_addr; - res = syscall_stub_data(mm_idp, (unsigned long *)desc, - (sizeof(*desc) + sizeof(long) - 1) & - ~(sizeof(long) - 1), - addr, &stub_addr); - if (!res) { - unsigned long args[] = { func, - (unsigned long)stub_addr, - sizeof(*desc), - 0, 0, 0 }; - res = run_syscall_stub(mm_idp, __NR_modify_ldt, args, - 0, addr, done); - } + void *stub_addr; + res = syscall_stub_data(mm_idp, (unsigned long *)desc, + (sizeof(*desc) + sizeof(long) - 1) & + ~(sizeof(long) - 1), + addr, &stub_addr); + if (!res) { + unsigned long args[] = { func, + (unsigned long)stub_addr, + sizeof(*desc), + 0, 0, 0 }; + res = run_syscall_stub(mm_idp, __NR_modify_ldt, args, + 0, addr, done); } - if (proc_mm) { - /* - * This is the second part of special handling, that makes - * PTRACE_LDT possible to implement. - */ - if (current->active_mm && current->active_mm != &init_mm && - mm_idp != ¤t->active_mm->context.id) - __switch_mm(¤t->active_mm->context.id); - } - - return res; -} - -static long read_ldt_from_host(void __user * ptr, unsigned long bytecount) -{ - int res, n; - struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) { - .func = 0, - .bytecount = bytecount, - .ptr = kmalloc(bytecount, GFP_KERNEL)}; - u32 cpu; - - if (ptrace_ldt.ptr == NULL) - return -ENOMEM; - - /* - * This is called from sys_modify_ldt only, so userspace_pid gives - * us the right number - */ - - cpu = get_cpu(); - res = os_ptrace_ldt(userspace_pid[cpu], 0, (unsigned long) &ptrace_ldt); - put_cpu(); - if (res < 0) - goto out; - - n = copy_to_user(ptr, ptrace_ldt.ptr, res); - if (n != 0) - res = -EFAULT; - - out: - kfree(ptrace_ldt.ptr); - return res; } @@ -145,9 +58,6 @@ static int read_ldt(void __user * ptr, unsigned long bytecount) bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; err = bytecount; - if (ptrace_ldt) - return read_ldt_from_host(ptr, bytecount); - mutex_lock(&ldt->lock); if (ldt->entry_count <= LDT_DIRECT_ENTRIES) { size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES; @@ -229,17 +139,11 @@ static int write_ldt(void __user * ptr, unsigned long bytecount, int func) goto out; } - if (!ptrace_ldt) - mutex_lock(&ldt->lock); + mutex_lock(&ldt->lock); err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1); if (err) goto out_unlock; - else if (ptrace_ldt) { - /* With PTRACE_LDT available, this is used as a flag only */ - ldt->entry_count = 1; - goto out; - } if (ldt_info.entry_number >= ldt->entry_count && ldt_info.entry_number >= LDT_DIRECT_ENTRIES) { @@ -393,91 +297,56 @@ long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm) int i; long page, err=0; void *addr = NULL; - struct proc_mm_op copy; - if (!ptrace_ldt) - mutex_init(&new_mm->arch.ldt.lock); + mutex_init(&new_mm->arch.ldt.lock); if (!from_mm) { memset(&desc, 0, sizeof(desc)); /* - * We have to initialize a clean ldt. + * Now we try to retrieve info about the ldt, we + * inherited from the host. All ldt-entries found + * will be reset in the following loop */ - if (proc_mm) { - /* - * If the new mm was created using proc_mm, host's - * default-ldt currently is assigned, which normally - * contains the call-gates for lcall7 and lcall27. - * To remove these gates, we simply write an empty - * entry as number 0 to the host. - */ - err = write_ldt_entry(&new_mm->id, 1, &desc, &addr, 1); - } - else{ - /* - * Now we try to retrieve info about the ldt, we - * inherited from the host. All ldt-entries found - * will be reset in the following loop - */ - ldt_get_host_info(); - for (num_p=host_ldt_entries; *num_p != -1; num_p++) { - desc.entry_number = *num_p; - err = write_ldt_entry(&new_mm->id, 1, &desc, - &addr, *(num_p + 1) == -1); - if (err) - break; - } + ldt_get_host_info(); + for (num_p=host_ldt_entries; *num_p != -1; num_p++) { + desc.entry_number = *num_p; + err = write_ldt_entry(&new_mm->id, 1, &desc, + &addr, *(num_p + 1) == -1); + if (err) + break; } new_mm->arch.ldt.entry_count = 0; goto out; } - if (proc_mm) { - /* - * We have a valid from_mm, so we now have to copy the LDT of - * from_mm to new_mm, because using proc_mm an new mm with - * an empty/default LDT was created in new_mm() - */ - copy = ((struct proc_mm_op) { .op = MM_COPY_SEGMENTS, - .u = - { .copy_segments = - from_mm->id.u.mm_fd } } ); - i = os_write_file(new_mm->id.u.mm_fd, ©, sizeof(copy)); - if (i != sizeof(copy)) - printk(KERN_ERR "new_mm : /proc/mm copy_segments " - "failed, err = %d\n", -i); - } - - if (!ptrace_ldt) { - /* - * Our local LDT is used to supply the data for - * modify_ldt(READLDT), if PTRACE_LDT isn't available, - * i.e., we have to use the stub for modify_ldt, which - * can't handle the big read buffer of up to 64kB. - */ - mutex_lock(&from_mm->arch.ldt.lock); - if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES) - memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries, - sizeof(new_mm->arch.ldt.u.entries)); - else { - i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE; - while (i-->0) { - page = __get_free_page(GFP_KERNEL|__GFP_ZERO); - if (!page) { - err = -ENOMEM; - break; - } - new_mm->arch.ldt.u.pages[i] = - (struct ldt_entry *) page; - memcpy(new_mm->arch.ldt.u.pages[i], - from_mm->arch.ldt.u.pages[i], PAGE_SIZE); + /* + * Our local LDT is used to supply the data for + * modify_ldt(READLDT), if PTRACE_LDT isn't available, + * i.e., we have to use the stub for modify_ldt, which + * can't handle the big read buffer of up to 64kB. + */ + mutex_lock(&from_mm->arch.ldt.lock); + if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES) + memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries, + sizeof(new_mm->arch.ldt.u.entries)); + else { + i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE; + while (i-->0) { + page = __get_free_page(GFP_KERNEL|__GFP_ZERO); + if (!page) { + err = -ENOMEM; + break; } + new_mm->arch.ldt.u.pages[i] = + (struct ldt_entry *) page; + memcpy(new_mm->arch.ldt.u.pages[i], + from_mm->arch.ldt.u.pages[i], PAGE_SIZE); } - new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count; - mutex_unlock(&from_mm->arch.ldt.lock); } + new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count; + mutex_unlock(&from_mm->arch.ldt.lock); out: return err; @@ -488,7 +357,7 @@ void free_ldt(struct mm_context *mm) { int i; - if (!ptrace_ldt && mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) { + if (mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) { i = mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE; while (i-- > 0) free_page((long) mm->arch.ldt.u.pages[i]); diff --git a/arch/x86/um/shared/sysdep/faultinfo_32.h b/arch/x86/um/shared/sysdep/faultinfo_32.h index a26086b8a800..b6f2437ec29c 100644 --- a/arch/x86/um/shared/sysdep/faultinfo_32.h +++ b/arch/x86/um/shared/sysdep/faultinfo_32.h @@ -27,9 +27,6 @@ struct faultinfo { /* This is Page Fault */ #define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14) -/* SKAS3 has no trap_no on i386, but get_skas_faultinfo() sets it to 0. */ -#define SEGV_MAYBE_FIXABLE(fi) ((fi)->trap_no == 0 && ptrace_faultinfo) - #define PTRACE_FULL_FAULTINFO 0 #endif diff --git a/arch/x86/um/shared/sysdep/faultinfo_64.h b/arch/x86/um/shared/sysdep/faultinfo_64.h index f811cbe15d62..ee88f88974ea 100644 --- a/arch/x86/um/shared/sysdep/faultinfo_64.h +++ b/arch/x86/um/shared/sysdep/faultinfo_64.h @@ -27,9 +27,6 @@ struct faultinfo { /* This is Page Fault */ #define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14) -/* No broken SKAS API, which doesn't pass trap_no, here. */ -#define SEGV_MAYBE_FIXABLE(fi) 0 - #define PTRACE_FULL_FAULTINFO 1 #endif diff --git a/arch/x86/um/shared/sysdep/skas_ptrace.h b/arch/x86/um/shared/sysdep/skas_ptrace.h deleted file mode 100644 index 453febe98993..000000000000 --- a/arch/x86/um/shared/sysdep/skas_ptrace.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) - * Licensed under the GPL - */ - -#ifndef __SYSDEP_X86_SKAS_PTRACE_H -#define __SYSDEP_X86_SKAS_PTRACE_H - -struct ptrace_faultinfo { - int is_write; - unsigned long addr; -}; - -struct ptrace_ldt { - int func; - void *ptr; - unsigned long bytecount; -}; - -#define PTRACE_LDT 54 - -#endif |