diff options
Diffstat (limited to 'arch/um/kernel')
-rw-r--r-- | arch/um/kernel/physmem.c | 228 | ||||
-rw-r--r-- | arch/um/kernel/skas/tlb.c | 21 | ||||
-rw-r--r-- | arch/um/kernel/tlb.c | 42 |
3 files changed, 26 insertions, 265 deletions
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c index df1ad3ba130c..3ba6e4c841da 100644 --- a/arch/um/kernel/physmem.c +++ b/arch/um/kernel/physmem.c @@ -21,229 +21,8 @@ #include "kern.h" #include "init.h" -struct phys_desc { - struct rb_node rb; - int fd; - __u64 offset; - void *virt; - unsigned long phys; - struct list_head list; -}; - -static struct rb_root phys_mappings = RB_ROOT; - -static struct rb_node **find_rb(void *virt) -{ - struct rb_node **n = &phys_mappings.rb_node; - struct phys_desc *d; - - while(*n != NULL){ - d = rb_entry(*n, struct phys_desc, rb); - if(d->virt == virt) - return n; - - if(d->virt > virt) - n = &(*n)->rb_left; - else - n = &(*n)->rb_right; - } - - return n; -} - -static struct phys_desc *find_phys_mapping(void *virt) -{ - struct rb_node **n = find_rb(virt); - - if(*n == NULL) - return NULL; - - return rb_entry(*n, struct phys_desc, rb); -} - -static void insert_phys_mapping(struct phys_desc *desc) -{ - struct rb_node **n = find_rb(desc->virt); - - if(*n != NULL) - panic("Physical remapping for %p already present", - desc->virt); - - rb_link_node(&desc->rb, rb_parent(*n), n); - rb_insert_color(&desc->rb, &phys_mappings); -} - -LIST_HEAD(descriptor_mappings); - -struct desc_mapping { - int fd; - struct list_head list; - struct list_head pages; -}; - -static struct desc_mapping *find_mapping(int fd) -{ - struct desc_mapping *desc; - struct list_head *ele; - - list_for_each(ele, &descriptor_mappings){ - desc = list_entry(ele, struct desc_mapping, list); - if(desc->fd == fd) - return desc; - } - - return NULL; -} - -static struct desc_mapping *descriptor_mapping(int fd) -{ - struct desc_mapping *desc; - - desc = find_mapping(fd); - if(desc != NULL) - return desc; - - desc = kmalloc(sizeof(*desc), GFP_ATOMIC); - if(desc == NULL) - return NULL; - - *desc = ((struct desc_mapping) - { .fd = fd, - .list = LIST_HEAD_INIT(desc->list), - .pages = LIST_HEAD_INIT(desc->pages) }); - list_add(&desc->list, &descriptor_mappings); - - return desc; -} - -int physmem_subst_mapping(void *virt, int fd, __u64 offset, int w) -{ - struct desc_mapping *fd_maps; - struct phys_desc *desc; - unsigned long phys; - int err; - - fd_maps = descriptor_mapping(fd); - if(fd_maps == NULL) - return -ENOMEM; - - phys = __pa(virt); - desc = find_phys_mapping(virt); - if(desc != NULL) - panic("Address 0x%p is already substituted\n", virt); - - err = -ENOMEM; - desc = kmalloc(sizeof(*desc), GFP_ATOMIC); - if(desc == NULL) - goto out; - - *desc = ((struct phys_desc) - { .fd = fd, - .offset = offset, - .virt = virt, - .phys = __pa(virt), - .list = LIST_HEAD_INIT(desc->list) }); - insert_phys_mapping(desc); - - list_add(&desc->list, &fd_maps->pages); - - virt = (void *) ((unsigned long) virt & PAGE_MASK); - err = os_map_memory(virt, fd, offset, PAGE_SIZE, 1, w, 0); - if(!err) - goto out; - - rb_erase(&desc->rb, &phys_mappings); - kfree(desc); - out: - return err; -} - static int physmem_fd = -1; -static void remove_mapping(struct phys_desc *desc) -{ - void *virt = desc->virt; - int err; - - rb_erase(&desc->rb, &phys_mappings); - list_del(&desc->list); - kfree(desc); - - err = os_map_memory(virt, physmem_fd, __pa(virt), PAGE_SIZE, 1, 1, 0); - if(err) - panic("Failed to unmap block device page from physical memory, " - "errno = %d", -err); -} - -int physmem_remove_mapping(void *virt) -{ - struct phys_desc *desc; - - virt = (void *) ((unsigned long) virt & PAGE_MASK); - desc = find_phys_mapping(virt); - if(desc == NULL) - return 0; - - remove_mapping(desc); - return 1; -} - -void physmem_forget_descriptor(int fd) -{ - struct desc_mapping *desc; - struct phys_desc *page; - struct list_head *ele, *next; - __u64 offset; - void *addr; - int err; - - desc = find_mapping(fd); - if(desc == NULL) - return; - - list_for_each_safe(ele, next, &desc->pages){ - page = list_entry(ele, struct phys_desc, list); - offset = page->offset; - addr = page->virt; - remove_mapping(page); - err = os_seek_file(fd, offset); - if(err) - panic("physmem_forget_descriptor - failed to seek " - "to %lld in fd %d, error = %d\n", - offset, fd, -err); - err = os_read_file(fd, addr, PAGE_SIZE); - if(err < 0) - panic("physmem_forget_descriptor - failed to read " - "from fd %d to 0x%p, error = %d\n", - fd, addr, -err); - } - - list_del(&desc->list); - kfree(desc); -} - -EXPORT_SYMBOL(physmem_forget_descriptor); -EXPORT_SYMBOL(physmem_remove_mapping); -EXPORT_SYMBOL(physmem_subst_mapping); - -void arch_free_page(struct page *page, int order) -{ - void *virt; - int i; - - for(i = 0; i < (1 << order); i++){ - virt = __va(page_to_phys(page + i)); - physmem_remove_mapping(virt); - } -} - -int is_remapped(void *virt) -{ - struct phys_desc *desc = find_phys_mapping(virt); - - return desc != NULL; -} - /* Changed during early boot */ unsigned long high_physmem; @@ -350,14 +129,9 @@ void setup_physmem(unsigned long start, unsigned long reserve_end, int phys_mapping(unsigned long phys, __u64 *offset_out) { - struct phys_desc *desc = find_phys_mapping(__va(phys & PAGE_MASK)); int fd = -1; - if(desc != NULL){ - fd = desc->fd; - *offset_out = desc->offset; - } - else if(phys < physmem_size){ + if(phys < physmem_size){ fd = physmem_fd; *offset_out = phys; } diff --git a/arch/um/kernel/skas/tlb.c b/arch/um/kernel/skas/tlb.c index c43901aa9368..b3d722ddde31 100644 --- a/arch/um/kernel/skas/tlb.c +++ b/arch/um/kernel/skas/tlb.c @@ -27,9 +27,9 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last, switch(op->type){ case MMAP: ret = map(&mmu->skas.id, op->u.mmap.addr, - op->u.mmap.len, op->u.mmap.r, op->u.mmap.w, - op->u.mmap.x, op->u.mmap.fd, - op->u.mmap.offset, finished, flush); + op->u.mmap.len, op->u.mmap.prot, + op->u.mmap.fd, op->u.mmap.offset, finished, + flush); break; case MUNMAP: ret = unmap(&mmu->skas.id, op->u.munmap.addr, @@ -37,8 +37,7 @@ static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last, break; case MPROTECT: ret = protect(&mmu->skas.id, op->u.mprotect.addr, - op->u.mprotect.len, op->u.mprotect.r, - op->u.mprotect.w, op->u.mprotect.x, + op->u.mprotect.len, op->u.mprotect.prot, finished, flush); break; default: @@ -102,10 +101,10 @@ void flush_tlb_page_skas(struct vm_area_struct *vma, unsigned long address) pte_t *pte; struct mm_struct *mm = vma->vm_mm; void *flush = NULL; - int r, w, x, err = 0; + int r, w, x, prot, err = 0; struct mm_id *mm_id; - pgd = pgd_offset(vma->vm_mm, address); + pgd = pgd_offset(mm, address); if(!pgd_present(*pgd)) goto kill; @@ -130,19 +129,21 @@ void flush_tlb_page_skas(struct vm_area_struct *vma, unsigned long address) } mm_id = &mm->context.skas.id; + prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | + (x ? UM_PROT_EXEC : 0)); if(pte_newpage(*pte)){ if(pte_present(*pte)){ unsigned long long offset; int fd; fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset); - err = map(mm_id, address, PAGE_SIZE, r, w, x, fd, - offset, 1, &flush); + err = map(mm_id, address, PAGE_SIZE, prot, fd, offset, + 1, &flush); } else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush); } else if(pte_newprot(*pte)) - err = protect(mm_id, address, PAGE_SIZE, r, w, x, 1, &flush); + err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush); if(err) goto kill; diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index 4a39d50d2d62..8a8d52851443 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c @@ -17,7 +17,7 @@ #include "os.h" static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, - int r, int w, int x, struct host_vm_op *ops, int *index, + unsigned int prot, struct host_vm_op *ops, int *index, int last_filled, union mm_context *mmu, void **flush, int (*do_ops)(union mm_context *, struct host_vm_op *, int, int, void **)) @@ -31,8 +31,7 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, last = &ops[*index]; if((last->type == MMAP) && (last->u.mmap.addr + last->u.mmap.len == virt) && - (last->u.mmap.r == r) && (last->u.mmap.w == w) && - (last->u.mmap.x == x) && (last->u.mmap.fd == fd) && + (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) && (last->u.mmap.offset + last->u.mmap.len == offset)){ last->u.mmap.len += len; return 0; @@ -48,9 +47,7 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, .u = { .mmap = { .addr = virt, .len = len, - .r = r, - .w = w, - .x = x, + .prot = prot, .fd = fd, .offset = offset } } }); @@ -87,8 +84,8 @@ static int add_munmap(unsigned long addr, unsigned long len, return ret; } -static int add_mprotect(unsigned long addr, unsigned long len, int r, int w, - int x, struct host_vm_op *ops, int *index, +static int add_mprotect(unsigned long addr, unsigned long len, + unsigned int prot, struct host_vm_op *ops, int *index, int last_filled, union mm_context *mmu, void **flush, int (*do_ops)(union mm_context *, struct host_vm_op *, int, int, void **)) @@ -100,8 +97,7 @@ static int add_mprotect(unsigned long addr, unsigned long len, int r, int w, last = &ops[*index]; if((last->type == MPROTECT) && (last->u.mprotect.addr + last->u.mprotect.len == addr) && - (last->u.mprotect.r == r) && (last->u.mprotect.w == w) && - (last->u.mprotect.x == x)){ + (last->u.mprotect.prot == prot)){ last->u.mprotect.len += len; return 0; } @@ -116,9 +112,7 @@ static int add_mprotect(unsigned long addr, unsigned long len, int r, int w, .u = { .mprotect = { .addr = addr, .len = len, - .r = r, - .w = w, - .x = x } } }); + .prot = prot } } }); return ret; } @@ -133,7 +127,7 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr, void **)) { pte_t *pte; - int r, w, x, ret = 0; + int r, w, x, prot, ret = 0; pte = pte_offset_kernel(pmd, addr); do { @@ -146,19 +140,19 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr, } else if (!pte_dirty(*pte)) { w = 0; } + prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | + (x ? UM_PROT_EXEC : 0)); if(force || pte_newpage(*pte)){ if(pte_present(*pte)) ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK, - PAGE_SIZE, r, w, x, ops, - op_index, last_op, mmu, flush, - do_ops); + PAGE_SIZE, prot, ops, op_index, + last_op, mmu, flush, do_ops); else ret = add_munmap(addr, PAGE_SIZE, ops, op_index, last_op, mmu, flush, do_ops); } else if(pte_newprot(*pte)) - ret = add_mprotect(addr, PAGE_SIZE, r, w, x, ops, - op_index, last_op, mmu, flush, - do_ops); + ret = add_mprotect(addr, PAGE_SIZE, prot, ops, op_index, + last_op, mmu, flush, do_ops); *pte = pte_mkuptodate(*pte); } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret)); return ret; @@ -377,14 +371,6 @@ pte_t *addr_pte(struct task_struct *task, unsigned long addr) return(pte_offset_map(pmd, addr)); } -void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) -{ - address &= PAGE_MASK; - - CHOOSE_MODE(flush_tlb_range(vma, address, address + PAGE_SIZE), - flush_tlb_page_skas(vma, address)); -} - void flush_tlb_all(void) { flush_tlb_mm(current->mm); |