diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-10-30 02:16:27 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-30 05:40:40 +0100 |
commit | 705e87c0c3c38424f7f30556c85bc20e808d2f59 (patch) | |
tree | 7a237e6266f4801385e1226cc497b47e3a2458bd /mm | |
parent | [PATCH] mm: page fault handler locking (diff) | |
download | linux-705e87c0c3c38424f7f30556c85bc20e808d2f59.tar.xz linux-705e87c0c3c38424f7f30556c85bc20e808d2f59.zip |
[PATCH] mm: pte_offset_map_lock loops
Convert those common loops using page_table_lock on the outside and
pte_offset_map within to use just pte_offset_map_lock within instead.
These all hold mmap_sem (some exclusively, some not), so at no level can a
page table be whipped away from beneath them. But whereas pte_alloc loops
tested with the "atomic" pmd_present, these loops are testing with pmd_none,
which on i386 PAE tests both lower and upper halves.
That's now unsafe, so add a cast into pmd_none to test only the vital lower
half: we lose a little sensitivity to a corrupt middle directory, but not
enough to worry about. It appears that i386 and UML were the only
architectures vulnerable in this way, and pgd and pud no problem.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mempolicy.c | 7 | ||||
-rw-r--r-- | mm/mprotect.c | 7 | ||||
-rw-r--r-- | mm/msync.c | 21 | ||||
-rw-r--r-- | mm/swapfile.c | 20 |
4 files changed, 21 insertions, 34 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 11d824f282f1..902d4c9eccdc 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -228,9 +228,9 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, { pte_t *orig_pte; pte_t *pte; + spinlock_t *ptl; - spin_lock(&vma->vm_mm->page_table_lock); - orig_pte = pte = pte_offset_map(pmd, addr); + orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); do { unsigned long pfn; unsigned int nid; @@ -246,8 +246,7 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, if (!node_isset(nid, *nodes)) break; } while (pte++, addr += PAGE_SIZE, addr != end); - pte_unmap(orig_pte); - spin_unlock(&vma->vm_mm->page_table_lock); + pte_unmap_unlock(orig_pte, ptl); return addr != end; } diff --git a/mm/mprotect.c b/mm/mprotect.c index 672a76fddd5e..17a2b52b753b 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -29,8 +29,9 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t newprot) { pte_t *pte; + spinlock_t *ptl; - pte = pte_offset_map(pmd, addr); + pte = pte_offset_map_lock(mm, pmd, addr, &ptl); do { if (pte_present(*pte)) { pte_t ptent; @@ -44,7 +45,7 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, lazy_mmu_prot_update(ptent); } } while (pte++, addr += PAGE_SIZE, addr != end); - pte_unmap(pte - 1); + pte_unmap_unlock(pte - 1, ptl); } static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud, @@ -88,7 +89,6 @@ static void change_protection(struct vm_area_struct *vma, BUG_ON(addr >= end); pgd = pgd_offset(mm, addr); flush_cache_range(vma, addr, end); - spin_lock(&mm->page_table_lock); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) @@ -96,7 +96,6 @@ static void change_protection(struct vm_area_struct *vma, change_pud_range(mm, pgd, addr, next, newprot); } while (pgd++, addr = next, addr != end); flush_tlb_range(vma, start, end); - spin_unlock(&mm->page_table_lock); } static int diff --git a/mm/msync.c b/mm/msync.c index 860395486060..0e040e9c39d8 100644 --- a/mm/msync.c +++ b/mm/msync.c @@ -17,28 +17,22 @@ #include <asm/pgtable.h> #include <asm/tlbflush.h> -/* - * Called with mm->page_table_lock held to protect against other - * threads/the swapper from ripping pte's out from under us. - */ - static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end) { - struct mm_struct *mm = vma->vm_mm; pte_t *pte; + spinlock_t *ptl; int progress = 0; again: - pte = pte_offset_map(pmd, addr); + pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); do { unsigned long pfn; struct page *page; if (progress >= 64) { progress = 0; - if (need_resched() || - need_lockbreak(&mm->page_table_lock)) + if (need_resched() || need_lockbreak(ptl)) break; } progress++; @@ -58,8 +52,8 @@ again: set_page_dirty(page); progress += 3; } while (pte++, addr += PAGE_SIZE, addr != end); - pte_unmap(pte - 1); - cond_resched_lock(&mm->page_table_lock); + pte_unmap_unlock(pte - 1, ptl); + cond_resched(); if (addr != end) goto again; } @@ -97,7 +91,6 @@ static inline void msync_pud_range(struct vm_area_struct *vma, pgd_t *pgd, static void msync_page_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end) { - struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; unsigned long next; @@ -110,16 +103,14 @@ static void msync_page_range(struct vm_area_struct *vma, return; BUG_ON(addr >= end); - pgd = pgd_offset(mm, addr); + pgd = pgd_offset(vma->vm_mm, addr); flush_cache_range(vma, addr, end); - spin_lock(&mm->page_table_lock); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; msync_pud_range(vma, pgd, addr, next); } while (pgd++, addr = next, addr != end); - spin_unlock(&mm->page_table_lock); } /* diff --git a/mm/swapfile.c b/mm/swapfile.c index 296e0bbf7836..510f0039b000 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -401,8 +401,6 @@ void free_swap_and_cache(swp_entry_t entry) * No need to decide whether this PTE shares the swap entry with others, * just let do_wp_page work it out if a write is requested later - to * force COW, vm_page_prot omits write permission from any private vma. - * - * vma->vm_mm->page_table_lock is held. */ static void unuse_pte(struct vm_area_struct *vma, pte_t *pte, unsigned long addr, swp_entry_t entry, struct page *page) @@ -424,23 +422,25 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, swp_entry_t entry, struct page *page) { - pte_t *pte; pte_t swp_pte = swp_entry_to_pte(entry); + pte_t *pte; + spinlock_t *ptl; + int found = 0; - pte = pte_offset_map(pmd, addr); + pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); do { /* * swapoff spends a _lot_ of time in this loop! * Test inline before going to call unuse_pte. */ if (unlikely(pte_same(*pte, swp_pte))) { - unuse_pte(vma, pte, addr, entry, page); - pte_unmap(pte); - return 1; + unuse_pte(vma, pte++, addr, entry, page); + found = 1; + break; } } while (pte++, addr += PAGE_SIZE, addr != end); - pte_unmap(pte - 1); - return 0; + pte_unmap_unlock(pte - 1, ptl); + return found; } static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, @@ -522,12 +522,10 @@ static int unuse_mm(struct mm_struct *mm, down_read(&mm->mmap_sem); lock_page(page); } - spin_lock(&mm->page_table_lock); for (vma = mm->mmap; vma; vma = vma->vm_next) { if (vma->anon_vma && unuse_vma(vma, entry, page)) break; } - spin_unlock(&mm->page_table_lock); up_read(&mm->mmap_sem); /* * Currently unuse_mm cannot fail, but leave error handling |