summaryrefslogtreecommitdiffstats
path: root/fs/eventfd.c
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw@amazon.co.uk>2020-10-27 14:55:21 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2020-11-15 15:49:10 +0100
commit28f1326710555bbe666f64452d08f2d7dd657cae (patch)
tree0500d5c7ff66eda5051b0221b04c73b28c8d8942 /fs/eventfd.c
parentkvm/eventfd: Use priority waitqueue to catch events before userspace (diff)
downloadlinux-28f1326710555bbe666f64452d08f2d7dd657cae.tar.xz
linux-28f1326710555bbe666f64452d08f2d7dd657cae.zip
eventfd: Export eventfd_ctx_do_read()
Where events are consumed in the kernel, for example by KVM's irqfd_wakeup() and VFIO's virqfd_wakeup(), they currently lack a mechanism to drain the eventfd's counter. Since the wait queue is already locked while the wakeup functions are invoked, all they really need to do is call eventfd_ctx_do_read(). Add a check for the lock, and export it for them. Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Message-Id: <20201027135523.646811-2-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'fs/eventfd.c')
-rw-r--r--fs/eventfd.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/fs/eventfd.c b/fs/eventfd.c
index df466ef81ddd..e265b6dd4f34 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -182,11 +182,14 @@ static __poll_t eventfd_poll(struct file *file, poll_table *wait)
return events;
}
-static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
+void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
{
+ lockdep_assert_held(&ctx->wqh.lock);
+
*cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
ctx->count -= *cnt;
}
+EXPORT_SYMBOL_GPL(eventfd_ctx_do_read);
/**
* eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.