summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Galbraith <umgwanakikbuti@gmail.com>2022-01-22 07:14:17 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2022-01-22 07:33:37 +0100
commita37265995c867a4e413761d846cef0445b08d6d5 (patch)
tree994b5c69c3d69573702b62d045868f235b75ace5
parentzsmalloc: replace per zpage lock with pool->migrate_lock (diff)
downloadlinux-a37265995c867a4e413761d846cef0445b08d6d5.tar.xz
linux-a37265995c867a4e413761d846cef0445b08d6d5.zip
zsmalloc: replace get_cpu_var with local_lock
The usage of get_cpu_var() in zs_map_object() is problematic because it disables preemption and makes it impossible to acquire any sleeping lock on PREEMPT_RT such as a spinlock_t. Replace the get_cpu_var() usage with a local_lock_t which is embedded struct mapping_area. It ensures that the access the struct is synchronized against all users on the same CPU. [minchan: remove the bit_spin_lock part and change the title] Link: https://lkml.kernel.org/r/20211115185909.3949505-10-minchan@kernel.org Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Minchan Kim <minchan@kernel.org> Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Sergey Senozhatsky <senozhatsky@chromium.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/zsmalloc.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 1b26fa6b9fa8..9152fbde33b5 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -65,6 +65,7 @@
#include <linux/wait.h>
#include <linux/pagemap.h>
#include <linux/fs.h>
+#include <linux/local_lock.h>
#define ZSPAGE_MAGIC 0x58
@@ -276,6 +277,7 @@ struct zspage {
};
struct mapping_area {
+ local_lock_t lock;
char *vm_buf; /* copy buffer for objects that span pages */
char *vm_addr; /* address of kmap_atomic()'ed pages */
enum zs_mapmode vm_mm; /* mapping mode */
@@ -451,7 +453,9 @@ MODULE_ALIAS("zpool-zsmalloc");
#endif /* CONFIG_ZPOOL */
/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
-static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
+static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = {
+ .lock = INIT_LOCAL_LOCK(lock),
+};
static __maybe_unused int is_first_page(struct page *page)
{
@@ -1269,7 +1273,8 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
class = zspage_class(pool, zspage);
off = (class->size * obj_idx) & ~PAGE_MASK;
- area = &get_cpu_var(zs_map_area);
+ local_lock(&zs_map_area.lock);
+ area = this_cpu_ptr(&zs_map_area);
area->vm_mm = mm;
if (off + class->size <= PAGE_SIZE) {
/* this object is contained entirely within a page */
@@ -1320,7 +1325,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
__zs_unmap_object(area, pages, off, class->size);
}
- put_cpu_var(zs_map_area);
+ local_unlock(&zs_map_area.lock);
migrate_read_unlock(zspage);
}