From adb4ddbbfb90c302e78da68b3f015588ca45d7f3 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Wed, 24 Oct 2007 10:54:38 +0200 Subject: block: use lock bitops for the tag map. The block queue tag map can use lock bitops. Signed-off-by: Nick Piggin Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) (limited to 'block/ll_rw_blk.c') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index d8616e6ebd92..a8a181072bf8 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -1057,18 +1057,16 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq) bqt->tag_index[tag] = NULL; - /* - * We use test_and_clear_bit's memory ordering properties here. - * The tag_map bit acts as a lock for tag_index[bit], so we need - * a barrer before clearing the bit (precisely: release semantics). - * Could use clear_bit_unlock when it is merged. - */ - if (unlikely(!test_and_clear_bit(tag, bqt->tag_map))) { + if (unlikely(!test_bit(tag, bqt->tag_map))) { printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", __FUNCTION__, tag); return; } - + /* + * The tag_map bit acts as a lock for tag_index[bit], so we need + * unlock memory barrier semantics. + */ + clear_bit_unlock(tag, bqt->tag_map); bqt->busy--; } @@ -1114,10 +1112,10 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) if (tag >= bqt->max_depth) return 1; - } while (test_and_set_bit(tag, bqt->tag_map)); + } while (test_and_set_bit_lock(tag, bqt->tag_map)); /* - * We rely on test_and_set_bit providing lock memory ordering semantics - * (could use test_and_set_bit_lock when it is merged). + * We need lock ordering semantics given by test_and_set_bit_lock. + * See blk_queue_end_tag for details. */ rq->cmd_flags |= REQ_QUEUED; -- cgit v1.2.3