diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-02-12 09:27:56 +0100 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-02-12 09:27:56 +0100 |
commit | 41480ae7a383dcffa497decdd97b3cb2caaa18ec (patch) | |
tree | f1e3afce2cbd0bbc544cd86a73e5b3093eb081c4 /mm/mlock.c | |
parent | sh: Restrict old CMT timer code to SH-2/SH-2A. (diff) | |
parent | serial: sh-sci: fix overrun error handling for SH7785 SCIF. (diff) | |
download | linux-41480ae7a383dcffa497decdd97b3cb2caaa18ec.tar.xz linux-41480ae7a383dcffa497decdd97b3cb2caaa18ec.zip |
Merge branch 'sh/stable-updates'
Diffstat (limited to 'mm/mlock.c')
-rw-r--r-- | mm/mlock.c | 48 |
1 files changed, 4 insertions, 44 deletions
diff --git a/mm/mlock.c b/mm/mlock.c index 2904a347e476..037161d61b4e 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -294,14 +294,10 @@ static inline int __mlock_posix_error_return(long retval) * * return number of pages [> 0] to be removed from locked_vm on success * of "special" vmas. - * - * return negative error if vma spanning @start-@range disappears while - * mmap semaphore is dropped. Unlikely? */ long mlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - struct mm_struct *mm = vma->vm_mm; int nr_pages = (end - start) / PAGE_SIZE; BUG_ON(!(vma->vm_flags & VM_LOCKED)); @@ -314,20 +310,11 @@ long mlock_vma_pages_range(struct vm_area_struct *vma, if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current))) { - long error; - downgrade_write(&mm->mmap_sem); - - error = __mlock_vma_pages_range(vma, start, end, 1); - up_read(&mm->mmap_sem); - /* vma can change or disappear */ - down_write(&mm->mmap_sem); - vma = find_vma(mm, start); - /* non-NULL vma must contain @start, but need to check @end */ - if (!vma || end > vma->vm_end) - return -ENOMEM; + __mlock_vma_pages_range(vma, start, end, 1); - return 0; /* hide other errors from mmap(), et al */ + /* Hide errors from mmap() and other callers */ + return 0; } /* @@ -438,41 +425,14 @@ success: vma->vm_flags = newflags; if (lock) { - /* - * mmap_sem is currently held for write. Downgrade the write - * lock to a read lock so that other faults, mmap scans, ... - * while we fault in all pages. - */ - downgrade_write(&mm->mmap_sem); - ret = __mlock_vma_pages_range(vma, start, end, 1); - /* - * Need to reacquire mmap sem in write mode, as our callers - * expect this. We have no support for atomically upgrading - * a sem to write, so we need to check for ranges while sem - * is unlocked. - */ - up_read(&mm->mmap_sem); - /* vma can change or disappear */ - down_write(&mm->mmap_sem); - *prev = find_vma(mm, start); - /* non-NULL *prev must contain @start, but need to check @end */ - if (!(*prev) || end > (*prev)->vm_end) - ret = -ENOMEM; - else if (ret > 0) { + if (ret > 0) { mm->locked_vm -= ret; ret = 0; } else ret = __mlock_posix_error_return(ret); /* translate if needed */ } else { - /* - * TODO: for unlocking, pages will already be resident, so - * we don't need to wait for allocations/reclaim/pagein, ... - * However, unlocking a very large region can still take a - * while. Should we downgrade the semaphore for both lock - * AND unlock ? - */ __mlock_vma_pages_range(vma, start, end, 0); } |