summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2018-11-16 20:37:06 +0100
committerMatthew Wilcox <willy@infradead.org>2018-11-16 22:38:50 +0100
commitc5bbd4515a05f8acb7e6ab6297044a529762cbf5 (patch)
treed48233766456a2fb166f583c87d2fb4b36d618ec /fs
parentdax: Make sure the unlocking entry isn't locked (diff)
downloadlinux-c5bbd4515a05f8acb7e6ab6297044a529762cbf5.tar.xz
linux-c5bbd4515a05f8acb7e6ab6297044a529762cbf5.zip
dax: Reinstate RCU protection of inode
For the device-dax case, it is possible that the inode can go away underneath us. The rcu_read_lock() was there to prevent it from being freed, and not (as I thought) to protect the tree. Bring back the rcu_read_lock() protection. Also add a little kernel-doc; while this function is not exported to modules, it is used from outside dax.c Reported-by: Dan Williams <dan.j.williams@intel.com> Fixes: 9f32d221301c ("dax: Convert dax_lock_mapping_entry to XArray") Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/dax.c22
1 files changed, 19 insertions, 3 deletions
diff --git a/fs/dax.c b/fs/dax.c
index 7944417f5a71..ce87d21b3805 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -353,16 +353,27 @@ static struct page *dax_busy_page(void *entry)
return NULL;
}
+/*
+ * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
+ * @page: The page whose entry we want to lock
+ *
+ * Context: Process context.
+ * Return: %true if the entry was locked or does not need to be locked.
+ */
bool dax_lock_mapping_entry(struct page *page)
{
XA_STATE(xas, NULL, 0);
void *entry;
+ bool locked;
+ /* Ensure page->mapping isn't freed while we look at it */
+ rcu_read_lock();
for (;;) {
struct address_space *mapping = READ_ONCE(page->mapping);
+ locked = false;
if (!dax_mapping(mapping))
- return false;
+ break;
/*
* In the device-dax case there's no need to lock, a
@@ -371,8 +382,9 @@ bool dax_lock_mapping_entry(struct page *page)
* otherwise we would not have a valid pfn_to_page()
* translation.
*/
+ locked = true;
if (S_ISCHR(mapping->host->i_mode))
- return true;
+ break;
xas.xa = &mapping->i_pages;
xas_lock_irq(&xas);
@@ -383,14 +395,18 @@ bool dax_lock_mapping_entry(struct page *page)
xas_set(&xas, page->index);
entry = xas_load(&xas);
if (dax_is_locked(entry)) {
+ rcu_read_unlock();
entry = get_unlocked_entry(&xas);
xas_unlock_irq(&xas);
+ rcu_read_lock();
continue;
}
dax_lock_entry(&xas, entry);
xas_unlock_irq(&xas);
- return true;
+ break;
}
+ rcu_read_unlock();
+ return locked;
}
void dax_unlock_mapping_entry(struct page *page)