diff options
author | Benjamin LaHaise <bcrl@kvack.org> | 2013-08-05 19:21:43 +0200 |
---|---|---|
committer | Benjamin LaHaise <bcrl@kvack.org> | 2013-08-05 19:21:43 +0200 |
commit | da90382c2ec367aac88ff6aa76afb659ee0e4235 (patch) | |
tree | b0fd73537a51048b61a23732559100371d777b07 /fs/aio.c | |
parent | aio: be defensive to ensure request batching is non-zero instead of BUG_ON() (diff) | |
download | linux-da90382c2ec367aac88ff6aa76afb659ee0e4235.tar.xz linux-da90382c2ec367aac88ff6aa76afb659ee0e4235.zip |
aio: fix error handling and rcu usage in "convert the ioctx list to table lookup v3"
In the patch "aio: convert the ioctx list to table lookup v3", incorrect
handling in the ioctx_alloc() error path was introduced that lead to an
ioctx being added via ioctx_add_table() while freed when the ioctx_alloc()
call returned -EAGAIN due to hitting the aio_max_nr limit. Fix this by
only calling ioctx_add_table() as the last step in ioctx_alloc().
Also, several unnecessary rcu_dereference() calls were added that lead to
RCU warnings where the system was already protected by a spin lock for
accessing mm->ioctx_table.
Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
Diffstat (limited to 'fs/aio.c')
-rw-r--r-- | fs/aio.c | 17 |
1 files changed, 9 insertions, 8 deletions
@@ -475,7 +475,7 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) struct aio_ring *ring; spin_lock(&mm->ioctx_lock); - table = rcu_dereference(mm->ioctx_table); + table = mm->ioctx_table; while (1) { if (table) @@ -503,7 +503,7 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) table->nr = new_nr; spin_lock(&mm->ioctx_lock); - old = rcu_dereference(mm->ioctx_table); + old = mm->ioctx_table; if (!old) { rcu_assign_pointer(mm->ioctx_table, table); @@ -579,10 +579,6 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) if (ctx->req_batch < 1) ctx->req_batch = 1; - err = ioctx_add_table(ctx, mm); - if (err) - goto out_cleanup_noerr; - /* limit the number of system wide aios */ spin_lock(&aio_nr_lock); if (aio_nr + nr_events > (aio_max_nr * 2UL) || @@ -595,13 +591,18 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ + err = ioctx_add_table(ctx, mm); + if (err) + goto out_cleanup_put; + pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", ctx, ctx->user_id, mm, ctx->nr_events); return ctx; +out_cleanup_put: + percpu_ref_put(&ctx->users); out_cleanup: err = -EAGAIN; -out_cleanup_noerr: aio_free_ring(ctx); out_freepcpu: free_percpu(ctx->cpu); @@ -626,7 +627,7 @@ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx) struct kioctx_table *table; spin_lock(&mm->ioctx_lock); - table = rcu_dereference(mm->ioctx_table); + table = mm->ioctx_table; WARN_ON(ctx != table->table[ctx->id]); table->table[ctx->id] = NULL; |