summaryrefslogtreecommitdiffstats
path: root/drivers/android
diff options
context:
space:
mode:
authorCarlos Llamas <cmllamas@google.com>2023-12-01 18:21:56 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-12-05 01:23:41 +0100
commite50f4e6cc9bfaca655d3b6a3506d27cf2caa1d40 (patch)
treeb06e69452b05c9ec524ca181604f55c038659478 /drivers/android
parentbinder: avoid user addresses in debug logs (diff)
downloadlinux-e50f4e6cc9bfaca655d3b6a3506d27cf2caa1d40.tar.xz
linux-e50f4e6cc9bfaca655d3b6a3506d27cf2caa1d40.zip
binder: reverse locking order in shrinker callback
The locking order currently requires the alloc->mutex to be acquired first followed by the mmap lock. However, the alloc->mutex is converted into a spinlock in subsequent commits so the order needs to be reversed to avoid nesting the sleeping mmap lock under the spinlock. The shrinker's callback binder_alloc_free_page() is the only place that needs to be reordered since other functions have been refactored and no longer nest these locks. Some minor cosmetic changes are also included in this patch. Signed-off-by: Carlos Llamas <cmllamas@google.com> Link: https://lore.kernel.org/r/20231201172212.1813387-28-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android')
-rw-r--r--drivers/android/binder_alloc.c46
1 files changed, 22 insertions, 24 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 5783675f2970..a3e56637db4f 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -1061,35 +1061,39 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
void *cb_arg)
__must_hold(lock)
{
- struct mm_struct *mm = NULL;
- struct binder_lru_page *page = container_of(item,
- struct binder_lru_page,
- lru);
- struct binder_alloc *alloc;
+ struct binder_lru_page *page = container_of(item, typeof(*page), lru);
+ struct binder_alloc *alloc = page->alloc;
+ struct mm_struct *mm = alloc->mm;
+ struct vm_area_struct *vma;
+ struct page *page_to_free;
unsigned long page_addr;
size_t index;
- struct vm_area_struct *vma;
- alloc = page->alloc;
+ if (!mmget_not_zero(mm))
+ goto err_mmget;
+ if (!mmap_read_trylock(mm))
+ goto err_mmap_read_lock_failed;
if (!mutex_trylock(&alloc->mutex))
goto err_get_alloc_mutex_failed;
-
if (!page->page_ptr)
goto err_page_already_freed;
index = page - alloc->pages;
page_addr = alloc->buffer + index * PAGE_SIZE;
- mm = alloc->mm;
- if (!mmget_not_zero(mm))
- goto err_mmget;
- if (!mmap_read_trylock(mm))
- goto err_mmap_read_lock_failed;
vma = vma_lookup(mm, page_addr);
if (vma && vma != binder_alloc_get_vma(alloc))
goto err_invalid_vma;
+ trace_binder_unmap_kernel_start(alloc, index);
+
+ page_to_free = page->page_ptr;
+ page->page_ptr = NULL;
+
+ trace_binder_unmap_kernel_end(alloc, index);
+
list_lru_isolate(lru, item);
+ mutex_unlock(&alloc->mutex);
spin_unlock(lock);
if (vma) {
@@ -1099,28 +1103,22 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
+
mmap_read_unlock(mm);
mmput_async(mm);
-
- trace_binder_unmap_kernel_start(alloc, index);
-
- __free_page(page->page_ptr);
- page->page_ptr = NULL;
-
- trace_binder_unmap_kernel_end(alloc, index);
+ __free_page(page_to_free);
spin_lock(lock);
- mutex_unlock(&alloc->mutex);
return LRU_REMOVED_RETRY;
err_invalid_vma:
+err_page_already_freed:
+ mutex_unlock(&alloc->mutex);
+err_get_alloc_mutex_failed:
mmap_read_unlock(mm);
err_mmap_read_lock_failed:
mmput_async(mm);
err_mmget:
-err_page_already_freed:
- mutex_unlock(&alloc->mutex);
-err_get_alloc_mutex_failed:
return LRU_SKIP;
}