diff options
author | Michel Lespinasse <walken@google.com> | 2020-06-09 06:33:25 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-09 18:39:14 +0200 |
commit | d8ed45c5dcd455fc5848d47f86883a1b872ac0d0 (patch) | |
tree | f9270b32da5f3f7be73b086c99d3dfc29a13161a /arch/powerpc/mm/fault.c | |
parent | DMA reservations: use the new mmap locking API (diff) | |
download | linux-d8ed45c5dcd455fc5848d47f86883a1b872ac0d0.tar.xz linux-d8ed45c5dcd455fc5848d47f86883a1b872ac0d0.zip |
mmap locking API: use coccinelle to convert mmap_sem rwsem call sites
This change converts the existing mmap_sem rwsem calls to use the new mmap
locking API instead.
The change is generated using coccinelle with the following rule:
// spatch --sp-file mmap_lock_api.cocci --in-place --include-headers --dir .
@@
expression mm;
@@
(
-init_rwsem
+mmap_init_lock
|
-down_write
+mmap_write_lock
|
-down_write_killable
+mmap_write_lock_killable
|
-down_write_trylock
+mmap_write_trylock
|
-up_write
+mmap_write_unlock
|
-downgrade_write
+mmap_write_downgrade
|
-down_read
+mmap_read_lock
|
-down_read_killable
+mmap_read_lock_killable
|
-down_read_trylock
+mmap_read_trylock
|
-up_read
+mmap_read_unlock
)
-(&mm->mmap_sem)
+(mm)
Signed-off-by: Michel Lespinasse <walken@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Reviewed-by: Laurent Dufour <ldufour@linux.ibm.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Liam Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ying Han <yinghan@google.com>
Link: http://lkml.kernel.org/r/20200520052908.204642-5-walken@google.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/powerpc/mm/fault.c')
-rw-r--r-- | arch/powerpc/mm/fault.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 39d23a557bef..ff3653e67c7b 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -108,7 +108,7 @@ static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code) * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return __bad_area_nosemaphore(regs, address, si_code); } @@ -144,7 +144,7 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, */ pkey = vma_pkey(vma); - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* * If we are in kernel mode, bail out with a SEGV, this will @@ -551,12 +551,12 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, * source. If this is invalid we can skip the address space check, * thus avoiding the deadlock. */ - if (unlikely(!down_read_trylock(&mm->mmap_sem))) { + if (unlikely(!mmap_read_trylock(mm))) { if (!is_user && !search_exception_tables(regs->nip)) return bad_area_nosemaphore(regs, address); retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); } else { /* * The above down_read_trylock() might have succeeded in @@ -580,7 +580,7 @@ retry: if (!must_retry) return bad_area(regs, address); - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (fault_in_pages_readable((const char __user *)regs->nip, sizeof(unsigned int))) return bad_area_nosemaphore(regs, address); @@ -625,7 +625,7 @@ good_area: } } - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); if (unlikely(fault & VM_FAULT_ERROR)) return mm_fault_error(regs, address, fault); |