summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/async-thread.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-10-01 01:24:06 +0200
committerChris Mason <chris.mason@oracle.com>2008-10-01 01:36:34 +0200
commit75ccf47d13bfb66de7faf596bfe497b9af7aaa40 (patch)
treee1b06bc9afec8f6b48cc2fb00c5e1e4d4dda2e0b /fs/btrfs/async-thread.c
parentBtrfs: fix seekiness due to finding the wrong block group (diff)
downloadlinux-75ccf47d13bfb66de7faf596bfe497b9af7aaa40.tar.xz
linux-75ccf47d13bfb66de7faf596bfe497b9af7aaa40.zip
Btrfs: fix multi-device code to use raid policies set by mkfs
When reading in block groups, a global mask of the available raid policies should be adjusted based on the types of block groups found on disk. This global mask is then used to decide which raid policy to use for new block groups. The recent allocator changes dropped the call that updated the global mask, making all the block groups allocated at run time single striped onto a single drive. This also fixes the async worker threads to set any thread that uses the requeue mechanism as busy. This allows us to avoid blocking on get_request_wait for the async bio submission threads. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/async-thread.c')
-rw-r--r--fs/btrfs/async-thread.c14
1 files changed, 13 insertions, 1 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 04fb9702d14c..d82efd722a48 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -302,8 +302,20 @@ int btrfs_requeue_work(struct btrfs_work *work)
spin_lock_irqsave(&worker->lock, flags);
atomic_inc(&worker->num_pending);
list_add_tail(&work->list, &worker->pending);
- check_busy_worker(worker);
+
+ /* by definition we're busy, take ourselves off the idle
+ * list
+ */
+ if (worker->idle) {
+ spin_lock_irqsave(&worker->workers->lock, flags);
+ worker->idle = 0;
+ list_move_tail(&worker->worker_list,
+ &worker->workers->worker_list);
+ spin_unlock_irqrestore(&worker->workers->lock, flags);
+ }
+
spin_unlock_irqrestore(&worker->lock, flags);
+
out:
return 0;
}