diff options
author | Bernd Schmidt <bernds_cb1@t-online.de> | 2009-09-22 02:03:57 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-22 16:17:43 +0200 |
commit | eb8cdec4a984fde123a91250dcc9e0bddf5eafdc (patch) | |
tree | 9f97b5949e6e63ae947363149b62ed224dad5ab9 /mm/nommu.c | |
parent | pcmcia: cleanup/fixup patch for sa1100_jornada_pcmcia driver (diff) | |
download | linux-eb8cdec4a984fde123a91250dcc9e0bddf5eafdc.tar.xz linux-eb8cdec4a984fde123a91250dcc9e0bddf5eafdc.zip |
nommu: add support for Memory Protection Units (MPU)
Some architectures (like the Blackfin arch) implement some of the
"simpler" features that one would expect out of a MMU such as memory
protection.
In our case, we actually get read/write/exec protection down to the page
boundary so processes can't stomp on each other let alone the kernel.
There is a performance decrease (which depends greatly on the workload)
however as the hardware/software interaction was not optimized at design
time.
Signed-off-by: Bernd Schmidt <bernds_cb1@t-online.de>
Signed-off-by: Bryan Wu <cooloney@kernel.org>
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Acked-by: David Howells <dhowells@redhat.com>
Acked-by: Greg Ungerer <gerg@snapgear.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/nommu.c')
-rw-r--r-- | mm/nommu.c | 21 |
1 files changed, 21 insertions, 0 deletions
diff --git a/mm/nommu.c b/mm/nommu.c index 2d02ca17ce18..1a4473faac48 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -33,6 +33,7 @@ #include <asm/uaccess.h> #include <asm/tlb.h> #include <asm/tlbflush.h> +#include <asm/mmu_context.h> #include "internal.h" static inline __attribute__((format(printf, 1, 2))) @@ -623,6 +624,22 @@ static void put_nommu_region(struct vm_region *region) } /* + * update protection on a vma + */ +static void protect_vma(struct vm_area_struct *vma, unsigned long flags) +{ +#ifdef CONFIG_MPU + struct mm_struct *mm = vma->vm_mm; + long start = vma->vm_start & PAGE_MASK; + while (start < vma->vm_end) { + protect_page(mm, start, flags); + start += PAGE_SIZE; + } + update_protections(mm); +#endif +} + +/* * add a VMA into a process's mm_struct in the appropriate place in the list * and tree and add to the address space's page tree also if not an anonymous * page @@ -641,6 +658,8 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) mm->map_count++; vma->vm_mm = mm; + protect_vma(vma, vma->vm_flags); + /* add the VMA to the mapping */ if (vma->vm_file) { mapping = vma->vm_file->f_mapping; @@ -703,6 +722,8 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) kenter("%p", vma); + protect_vma(vma, 0); + mm->map_count--; if (mm->mmap_cache == vma) mm->mmap_cache = NULL; |