summaryrefslogtreecommitdiffstats
path: root/fs/fscache/cookie.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2013-05-10 20:50:26 +0200
committerDavid Howells <dhowells@redhat.com>2013-06-19 15:16:47 +0200
commit1362729b169b7903c7e739dbe7904994b0d8c47f (patch)
tree31c6d2a831b94a1ed0b4efe2d3ff3d1a21c654e2 /fs/fscache/cookie.c
parentFS-Cache: Fix object state machine to have separate work and wait states (diff)
downloadlinux-1362729b169b7903c7e739dbe7904994b0d8c47f.tar.xz
linux-1362729b169b7903c7e739dbe7904994b0d8c47f.zip
FS-Cache: Simplify cookie retention for fscache_objects, fixing oops
Simplify the way fscache cache objects retain their cookie. The way I implemented the cookie storage handling made synchronisation a pain (ie. the object state machine can't rely on the cookie actually still being there). Instead of the the object being detached from the cookie and the cookie being freed in __fscache_relinquish_cookie(), we defer both operations: (*) The detachment of the object from the list in the cookie now takes place in fscache_drop_object() and is thus governed by the object state machine (fscache_detach_from_cookie() has been removed). (*) The release of the cookie is now in fscache_object_destroy() - which is called by the cache backend just before it frees the object. This means that the fscache_cookie struct is now available to the cache all the way through from ->alloc_object() to ->drop_object() and ->put_object() - meaning that it's no longer necessary to take object->lock to guarantee access. However, __fscache_relinquish_cookie() doesn't wait for the object to go all the way through to destruction before letting the netfs proceed. That would massively slow down the netfs. Since __fscache_relinquish_cookie() leaves the cookie around, in must therefore break all attachments to the netfs - which includes ->def, ->netfs_data and any outstanding page read/writes. To handle this, struct fscache_cookie now has an n_active counter: (1) This starts off initialised to 1. (2) Any time the cache needs to get at the netfs data, it calls fscache_use_cookie() to increment it - if it is not zero. If it was zero, then access is not permitted. (3) When the cache has finished with the data, it calls fscache_unuse_cookie() to decrement it. This does a wake-up on it if it reaches 0. (4) __fscache_relinquish_cookie() decrements n_active and then waits for it to reach 0. The initialisation to 1 in step (1) ensures that we only get wake ups when we're trying to get rid of the cookie. This leaves __fscache_relinquish_cookie() a lot simpler. *** This fixes a problem in the current code whereby if fscache_invalidate() is followed sufficiently quickly by fscache_relinquish_cookie() then it is possible for __fscache_relinquish_cookie() to have detached the cookie from the object and cleared the pointer before a thread is dispatched to process the invalidation state in the object state machine. Since the pending write clearance was deferred to the invalidation state to make it asynchronous, we need to either wait in relinquishment for the stores tree to be cleared in the invalidation state or we need to handle the clearance in relinquishment. Further, if the relinquishment code does clear the tree, then the invalidation state need to make the clearance contingent on still having the cookie to hand (since that's where the tree is rooted) and we have to prevent the cookie from disappearing for the duration. This can lead to an oops like the following: BUG: unable to handle kernel NULL pointer dereference at 000000000000000c ... RIP: 0010:[<ffffffff8151023e>] _spin_lock+0xe/0x30 ... CR2: 000000000000000c ... ... Process kslowd002 (...) .... Call Trace: [<ffffffffa01c3278>] fscache_invalidate_writes+0x38/0xd0 [fscache] [<ffffffff810096f0>] ? __switch_to+0xd0/0x320 [<ffffffff8105e759>] ? find_busiest_queue+0x69/0x150 [<ffffffff8110ddd4>] ? slow_work_enqueue+0x104/0x180 [<ffffffffa01c1303>] fscache_object_slow_work_execute+0x5e3/0x9d0 [fscache] [<ffffffff81096b67>] ? bit_waitqueue+0x17/0xd0 [<ffffffff8110e233>] slow_work_execute+0x233/0x310 [<ffffffff8110e515>] slow_work_thread+0x205/0x360 [<ffffffff81096ca0>] ? autoremove_wake_function+0x0/0x40 [<ffffffff8110e310>] ? slow_work_thread+0x0/0x360 [<ffffffff81096936>] kthread+0x96/0xa0 [<ffffffff8100c0ca>] child_rip+0xa/0x20 [<ffffffff810968a0>] ? kthread+0x0/0xa0 [<ffffffff8100c0c0>] ? child_rip+0x0/0x20 The parameter to fscache_invalidate_writes() was object->cookie which is NULL. Signed-off-by: David Howells <dhowells@redhat.com> Tested-By: Milosz Tanski <milosz@adfin.com> Acked-by: Jeff Layton <jlayton@redhat.com>
Diffstat (limited to 'fs/fscache/cookie.c')
-rw-r--r--fs/fscache/cookie.c80
1 files changed, 26 insertions, 54 deletions
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index eee436646989..0e91a3c9fdb2 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -95,6 +95,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
atomic_set(&cookie->usage, 1);
atomic_set(&cookie->n_children, 0);
+ /* We keep the active count elevated until relinquishment to prevent an
+ * attempt to wake up every time the object operations queue quiesces.
+ */
+ atomic_set(&cookie->n_active, 1);
+
atomic_inc(&parent->usage);
atomic_inc(&parent->n_children);
@@ -177,7 +182,6 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
cookie->flags =
(1 << FSCACHE_COOKIE_LOOKING_UP) |
- (1 << FSCACHE_COOKIE_CREATING) |
(1 << FSCACHE_COOKIE_NO_DATA_YET);
/* ask the cache to allocate objects for this cookie and its parent
@@ -467,7 +471,6 @@ EXPORT_SYMBOL(__fscache_update_cookie);
*/
void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
{
- struct fscache_cache *cache;
struct fscache_object *object;
fscache_stat(&fscache_n_relinquishes);
@@ -480,8 +483,11 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
return;
}
- _enter("%p{%s,%p},%d",
- cookie, cookie->def->name, cookie->netfs_data, retire);
+ _enter("%p{%s,%p,%d},%d",
+ cookie, cookie->def->name, cookie->netfs_data,
+ atomic_read(&cookie->n_active), retire);
+
+ ASSERTCMP(atomic_read(&cookie->n_active), >, 0);
if (atomic_read(&cookie->n_children) != 0) {
printk(KERN_ERR "FS-Cache: Cookie '%s' still has children\n",
@@ -489,62 +495,28 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
BUG();
}
- /* wait for the cookie to finish being instantiated (or to fail) */
- if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
- fscache_stat(&fscache_n_relinquishes_waitcrt);
- wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
- fscache_wait_bit, TASK_UNINTERRUPTIBLE);
- }
+ /* No further netfs-accessing operations on this cookie permitted */
+ set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags);
+ if (retire)
+ set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags);
-try_again:
spin_lock(&cookie->lock);
-
- /* break links with all the active objects */
- while (!hlist_empty(&cookie->backing_objects)) {
- int n_reads;
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object,
- cookie_link);
-
- _debug("RELEASE OBJ%x", object->debug_id);
-
- set_bit(FSCACHE_COOKIE_WAITING_ON_READS, &cookie->flags);
- n_reads = atomic_read(&object->n_reads);
- if (n_reads) {
- int n_ops = object->n_ops;
- int n_in_progress = object->n_in_progress;
- spin_unlock(&cookie->lock);
- printk(KERN_ERR "FS-Cache:"
- " Cookie '%s' still has %d outstanding reads (%d,%d)\n",
- cookie->def->name,
- n_reads, n_ops, n_in_progress);
- wait_on_bit(&cookie->flags, FSCACHE_COOKIE_WAITING_ON_READS,
- fscache_wait_bit, TASK_UNINTERRUPTIBLE);
- printk("Wait finished\n");
- goto try_again;
- }
-
- /* detach each cache object from the object cookie */
- spin_lock(&object->lock);
- hlist_del_init(&object->cookie_link);
-
- cache = object->cache;
- object->cookie = NULL;
- if (retire)
- set_bit(FSCACHE_OBJECT_RETIRE, &object->flags);
+ hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
- spin_unlock(&object->lock);
-
- if (atomic_dec_and_test(&cookie->usage))
- /* the cookie refcount shouldn't be reduced to 0 yet */
- BUG();
}
+ spin_unlock(&cookie->lock);
- /* detach pointers back to the netfs */
+ /* Wait for cessation of activity requiring access to the netfs (when
+ * n_active reaches 0).
+ */
+ if (!atomic_dec_and_test(&cookie->n_active))
+ wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
+ TASK_UNINTERRUPTIBLE);
+
+ /* Clear pointers back to the netfs */
cookie->netfs_data = NULL;
cookie->def = NULL;
-
- spin_unlock(&cookie->lock);
+ BUG_ON(cookie->stores.rnode);
if (cookie->parent) {
ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0);
@@ -552,7 +524,7 @@ try_again:
atomic_dec(&cookie->parent->n_children);
}
- /* finally dispose of the cookie */
+ /* Dispose of the netfs's link to the cookie */
ASSERTCMP(atomic_read(&cookie->usage), >, 0);
fscache_cookie_put(cookie);