summaryrefslogtreecommitdiffstats
path: root/fs/dcache.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c141
1 files changed, 103 insertions, 38 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 93f4f5ee07bf..bb0c4d0038db 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2240,6 +2240,7 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
}
res = d_splice_alias(inode, found);
if (res) {
+ d_lookup_done(found);
dput(found);
return res;
}
@@ -2247,10 +2248,16 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
}
EXPORT_SYMBOL(d_add_ci);
-
-static inline bool d_same_name(const struct dentry *dentry,
- const struct dentry *parent,
- const struct qstr *name)
+/**
+ * d_same_name - compare dentry name with case-exact name
+ * @parent: parent dentry
+ * @dentry: the negative dentry that was passed to the parent's lookup func
+ * @name: the case-exact name to be associated with the returned dentry
+ *
+ * Return: true if names are same, or false
+ */
+bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
+ const struct qstr *name)
{
if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
if (dentry->d_name.len != name->len)
@@ -2261,6 +2268,49 @@ static inline bool d_same_name(const struct dentry *dentry,
dentry->d_name.len, dentry->d_name.name,
name) == 0;
}
+EXPORT_SYMBOL_GPL(d_same_name);
+
+/*
+ * This is __d_lookup_rcu() when the parent dentry has
+ * DCACHE_OP_COMPARE, which makes things much nastier.
+ */
+static noinline struct dentry *__d_lookup_rcu_op_compare(
+ const struct dentry *parent,
+ const struct qstr *name,
+ unsigned *seqp)
+{
+ u64 hashlen = name->hash_len;
+ struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
+ struct hlist_bl_node *node;
+ struct dentry *dentry;
+
+ hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
+ int tlen;
+ const char *tname;
+ unsigned seq;
+
+seqretry:
+ seq = raw_seqcount_begin(&dentry->d_seq);
+ if (dentry->d_parent != parent)
+ continue;
+ if (d_unhashed(dentry))
+ continue;
+ if (dentry->d_name.hash != hashlen_hash(hashlen))
+ continue;
+ tlen = dentry->d_name.len;
+ tname = dentry->d_name.name;
+ /* we want a consistent (name,len) pair */
+ if (read_seqcount_retry(&dentry->d_seq, seq)) {
+ cpu_relax();
+ goto seqretry;
+ }
+ if (parent->d_op->d_compare(dentry, tlen, tname, name) != 0)
+ continue;
+ *seqp = seq;
+ return dentry;
+ }
+ return NULL;
+}
/**
* __d_lookup_rcu - search for a dentry (racy, store-free)
@@ -2308,6 +2358,9 @@ struct dentry *__d_lookup_rcu(const struct dentry *parent,
* Keep the two functions in sync.
*/
+ if (unlikely(parent->d_flags & DCACHE_OP_COMPARE))
+ return __d_lookup_rcu_op_compare(parent, name, seqp);
+
/*
* The hash list is protected using RCU.
*
@@ -2324,7 +2377,6 @@ struct dentry *__d_lookup_rcu(const struct dentry *parent,
hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
unsigned seq;
-seqretry:
/*
* The dentry sequence count protects us from concurrent
* renames, and thus protects parent and name fields.
@@ -2347,28 +2399,10 @@ seqretry:
continue;
if (d_unhashed(dentry))
continue;
-
- if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
- int tlen;
- const char *tname;
- if (dentry->d_name.hash != hashlen_hash(hashlen))
- continue;
- tlen = dentry->d_name.len;
- tname = dentry->d_name.name;
- /* we want a consistent (name,len) pair */
- if (read_seqcount_retry(&dentry->d_seq, seq)) {
- cpu_relax();
- goto seqretry;
- }
- if (parent->d_op->d_compare(dentry,
- tlen, tname, name) != 0)
- continue;
- } else {
- if (dentry->d_name.hash_len != hashlen)
- continue;
- if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
- continue;
- }
+ if (dentry->d_name.hash_len != hashlen)
+ continue;
+ if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
+ continue;
*seqp = seq;
return dentry;
}
@@ -2563,7 +2597,15 @@ EXPORT_SYMBOL(d_rehash);
static inline unsigned start_dir_add(struct inode *dir)
{
-
+ /*
+ * The caller holds a spinlock (dentry::d_lock). On !PREEMPT_RT
+ * kernels spin_lock() implicitly disables preemption, but not on
+ * PREEMPT_RT. So for RT it has to be done explicitly to protect
+ * the sequence count write side critical section against a reader
+ * or another writer preempting, which would result in a live lock.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
for (;;) {
unsigned n = dir->i_dir_seq;
if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
@@ -2572,9 +2614,13 @@ static inline unsigned start_dir_add(struct inode *dir)
}
}
-static inline void end_dir_add(struct inode *dir, unsigned n)
+static inline void end_dir_add(struct inode *dir, unsigned int n,
+ wait_queue_head_t *d_wait)
{
smp_store_release(&dir->i_dir_seq, n + 2);
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
+ wake_up_all(d_wait);
}
static void d_wait_lookup(struct dentry *dentry)
@@ -2701,32 +2747,50 @@ mismatch:
}
EXPORT_SYMBOL(d_alloc_parallel);
-void __d_lookup_done(struct dentry *dentry)
+/*
+ * - Unhash the dentry
+ * - Retrieve and clear the waitqueue head in dentry
+ * - Return the waitqueue head
+ */
+static wait_queue_head_t *__d_lookup_unhash(struct dentry *dentry)
{
- struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent,
- dentry->d_name.hash);
+ wait_queue_head_t *d_wait;
+ struct hlist_bl_head *b;
+
+ lockdep_assert_held(&dentry->d_lock);
+
+ b = in_lookup_hash(dentry->d_parent, dentry->d_name.hash);
hlist_bl_lock(b);
dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
- wake_up_all(dentry->d_wait);
+ d_wait = dentry->d_wait;
dentry->d_wait = NULL;
hlist_bl_unlock(b);
INIT_HLIST_NODE(&dentry->d_u.d_alias);
INIT_LIST_HEAD(&dentry->d_lru);
+ return d_wait;
+}
+
+void __d_lookup_unhash_wake(struct dentry *dentry)
+{
+ spin_lock(&dentry->d_lock);
+ wake_up_all(__d_lookup_unhash(dentry));
+ spin_unlock(&dentry->d_lock);
}
-EXPORT_SYMBOL(__d_lookup_done);
+EXPORT_SYMBOL(__d_lookup_unhash_wake);
/* inode->i_lock held if inode is non-NULL */
static inline void __d_add(struct dentry *dentry, struct inode *inode)
{
+ wait_queue_head_t *d_wait;
struct inode *dir = NULL;
unsigned n;
spin_lock(&dentry->d_lock);
if (unlikely(d_in_lookup(dentry))) {
dir = dentry->d_parent->d_inode;
n = start_dir_add(dir);
- __d_lookup_done(dentry);
+ d_wait = __d_lookup_unhash(dentry);
}
if (inode) {
unsigned add_flags = d_flags_for_inode(inode);
@@ -2738,7 +2802,7 @@ static inline void __d_add(struct dentry *dentry, struct inode *inode)
}
__d_rehash(dentry);
if (dir)
- end_dir_add(dir, n);
+ end_dir_add(dir, n, d_wait);
spin_unlock(&dentry->d_lock);
if (inode)
spin_unlock(&inode->i_lock);
@@ -2885,6 +2949,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
bool exchange)
{
struct dentry *old_parent, *p;
+ wait_queue_head_t *d_wait;
struct inode *dir = NULL;
unsigned n;
@@ -2915,7 +2980,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
if (unlikely(d_in_lookup(target))) {
dir = target->d_parent->d_inode;
n = start_dir_add(dir);
- __d_lookup_done(target);
+ d_wait = __d_lookup_unhash(target);
}
write_seqcount_begin(&dentry->d_seq);
@@ -2951,7 +3016,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
write_seqcount_end(&dentry->d_seq);
if (dir)
- end_dir_add(dir, n);
+ end_dir_add(dir, n, d_wait);
if (dentry->d_parent != old_parent)
spin_unlock(&dentry->d_parent->d_lock);