summaryrefslogtreecommitdiffstats
path: root/mm/memory-failure.c
diff options
context:
space:
mode:
authorJames Morse <james.morse@arm.com>2020-05-01 18:45:41 +0200
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2020-05-19 19:51:10 +0200
commit062022315e8ad9e0628515dfc756ab54b5fdb26b (patch)
treead804a60ffac8836c902c50e815f4d51468ebaed /mm/memory-failure.c
parentLinux 5.7-rc6 (diff)
downloadlinux-062022315e8ad9e0628515dfc756ab54b5fdb26b.tar.xz
linux-062022315e8ad9e0628515dfc756ab54b5fdb26b.zip
mm/memory-failure: Add memory_failure_queue_kick()
The GHES code calls memory_failure_queue() from IRQ context to schedule work on the current CPU so that memory_failure() can sleep. For synchronous memory errors the arch code needs to know any signals that memory_failure() will trigger are pending before it returns to user-space, possibly when exiting from the IRQ. Add a helper to kick the memory failure queue, to ensure the scheduled work has happened. This has to be called from process context, so may have been migrated from the original cpu. Pass the cpu the work was queued on. Change memory_failure_work_func() to permit being called on the 'wrong' cpu. Signed-off-by: James Morse <james.morse@arm.com> Tested-by: Tyler Baicar <baicar@os.amperecomputing.com> Acked-by: Naoya Horiguchi <naoya.horiguchi@nec.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'mm/memory-failure.c')
-rw-r--r--mm/memory-failure.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index a96364be8ab4..c4afb407bf0f 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1493,7 +1493,7 @@ static void memory_failure_work_func(struct work_struct *work)
unsigned long proc_flags;
int gotten;
- mf_cpu = this_cpu_ptr(&memory_failure_cpu);
+ mf_cpu = container_of(work, struct memory_failure_cpu, work);
for (;;) {
spin_lock_irqsave(&mf_cpu->lock, proc_flags);
gotten = kfifo_get(&mf_cpu->fifo, &entry);
@@ -1507,6 +1507,19 @@ static void memory_failure_work_func(struct work_struct *work)
}
}
+/*
+ * Process memory_failure work queued on the specified CPU.
+ * Used to avoid return-to-userspace racing with the memory_failure workqueue.
+ */
+void memory_failure_queue_kick(int cpu)
+{
+ struct memory_failure_cpu *mf_cpu;
+
+ mf_cpu = &per_cpu(memory_failure_cpu, cpu);
+ cancel_work_sync(&mf_cpu->work);
+ memory_failure_work_func(&mf_cpu->work);
+}
+
static int __init memory_failure_init(void)
{
struct memory_failure_cpu *mf_cpu;