From dd941252a81b02b5915e2db160fe02c972875846 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 14 Sep 2007 08:41:12 +0200 Subject: [PATCH] shared tag queue barrier comment Should add some comments for the tag barriers (they won't be so important if we can switch over to the explicit _lock bitops, but for now we should make it clear). Jens' original patch said a barrier after the test_and_clear_bit was also required. I can't see why (and it would prevent the use of the _lock bitop). Acked-by: Jens Axboe Signed-off-by: Linus Torvalds -- --- block/ll_rw_blk.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index cd20367061..ed39313c40 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -1085,6 +1085,12 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq) bqt->tag_index[tag] = NULL; + /* + * We use test_and_clear_bit's memory ordering properties here. + * The tag_map bit acts as a lock for tag_index[bit], so we need + * a barrer before clearing the bit (precisely: release semantics). + * Could use clear_bit_unlock when it is merged. + */ if (unlikely(!test_and_clear_bit(tag, bqt->tag_map))) { printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", __FUNCTION__, tag); @@ -1137,6 +1143,10 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) return 1; } while (test_and_set_bit(tag, bqt->tag_map)); + /* + * We rely on test_and_set_bit providing lock memory ordering semantics + * (could use test_and_set_bit_lock when it is merged). + */ rq->cmd_flags |= REQ_QUEUED; rq->tag = tag; -- 2.39.5