]> err.no Git - linux-2.6/blobdiff - block/ll_rw_blk.c
Pull video-2.6.24 into release branch
[linux-2.6] / block / ll_rw_blk.c
index d8616e6ebd92bfbd21087aab4b93959492a3d821..3b927be038501f178577da45ebb447c6dc27b90f 100644 (file)
@@ -791,7 +791,6 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
        retval = atomic_dec_and_test(&bqt->refcnt);
        if (retval) {
                BUG_ON(bqt->busy);
-               BUG_ON(!list_empty(&bqt->busy_list));
 
                kfree(bqt->tag_index);
                bqt->tag_index = NULL;
@@ -903,7 +902,6 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
        if (init_tag_map(q, tags, depth))
                goto fail;
 
-       INIT_LIST_HEAD(&tags->busy_list);
        tags->busy = 0;
        atomic_set(&tags->refcnt, 1);
        return tags;
@@ -954,6 +952,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
         */
        q->queue_tags = tags;
        q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
+       INIT_LIST_HEAD(&q->tag_busy_list);
        return 0;
 fail:
        kfree(tags);
@@ -1057,18 +1056,16 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
 
        bqt->tag_index[tag] = NULL;
 
-       /*
-        * We use test_and_clear_bit's memory ordering properties here.
-        * The tag_map bit acts as a lock for tag_index[bit], so we need
-        * a barrer before clearing the bit (precisely: release semantics).
-        * Could use clear_bit_unlock when it is merged.
-        */
-       if (unlikely(!test_and_clear_bit(tag, bqt->tag_map))) {
+       if (unlikely(!test_bit(tag, bqt->tag_map))) {
                printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
                       __FUNCTION__, tag);
                return;
        }
-
+       /*
+        * The tag_map bit acts as a lock for tag_index[bit], so we need
+        * unlock memory barrier semantics.
+        */
+       clear_bit_unlock(tag, bqt->tag_map);
        bqt->busy--;
 }
 
@@ -1114,17 +1111,17 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
                if (tag >= bqt->max_depth)
                        return 1;
 
-       } while (test_and_set_bit(tag, bqt->tag_map));
+       } while (test_and_set_bit_lock(tag, bqt->tag_map));
        /*
-        * We rely on test_and_set_bit providing lock memory ordering semantics
-        * (could use test_and_set_bit_lock when it is merged).
+        * We need lock ordering semantics given by test_and_set_bit_lock.
+        * See blk_queue_end_tag for details.
         */
 
        rq->cmd_flags |= REQ_QUEUED;
        rq->tag = tag;
        bqt->tag_index[tag] = rq;
        blkdev_dequeue_request(rq);
-       list_add(&rq->queuelist, &bqt->busy_list);
+       list_add(&rq->queuelist, &q->tag_busy_list);
        bqt->busy++;
        return 0;
 }
@@ -1145,24 +1142,10 @@ EXPORT_SYMBOL(blk_queue_start_tag);
  **/
 void blk_queue_invalidate_tags(struct request_queue *q)
 {
-       struct blk_queue_tag *bqt = q->queue_tags;
        struct list_head *tmp, *n;
-       struct request *rq;
 
-       list_for_each_safe(tmp, n, &bqt->busy_list) {
-               rq = list_entry_rq(tmp);
-
-               if (rq->tag == -1) {
-                       printk(KERN_ERR
-                              "%s: bad tag found on list\n", __FUNCTION__);
-                       list_del_init(&rq->queuelist);
-                       rq->cmd_flags &= ~REQ_QUEUED;
-               } else
-                       blk_queue_end_tag(q, rq);
-
-               rq->cmd_flags &= ~REQ_STARTED;
-               __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
-       }
+       list_for_each_safe(tmp, n, &q->tag_busy_list)
+               blk_requeue_request(q, list_entry_rq(tmp));
 }
 
 EXPORT_SYMBOL(blk_queue_invalidate_tags);
@@ -1373,7 +1356,7 @@ new_segment:
        } /* segments in rq */
 
        if (sg)
-               __sg_mark_end(sg);
+               sg_mark_end(sg);
 
        return nsegs;
 }
@@ -1638,15 +1621,7 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
 {
        struct request_queue *q = bdi->unplug_io_data;
 
-       /*
-        * devices don't necessarily have an ->unplug_fn defined
-        */
-       if (q->unplug_fn) {
-               blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
-                                       q->rq.count[READ] + q->rq.count[WRITE]);
-
-               q->unplug_fn(q);
-       }
+       blk_unplug(q);
 }
 
 static void blk_unplug_work(struct work_struct *work)
@@ -1670,6 +1645,20 @@ static void blk_unplug_timeout(unsigned long data)
        kblockd_schedule_work(&q->unplug_work);
 }
 
+void blk_unplug(struct request_queue *q)
+{
+       /*
+        * devices don't necessarily have an ->unplug_fn defined
+        */
+       if (q->unplug_fn) {
+               blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
+                                       q->rq.count[READ] + q->rq.count[WRITE]);
+
+               q->unplug_fn(q);
+       }
+}
+EXPORT_SYMBOL(blk_unplug);
+
 /**
  * blk_start_queue - restart a previously stopped queue
  * @q:    The &struct request_queue in question
@@ -3225,6 +3214,7 @@ static inline void __generic_make_request(struct bio *bio)
        sector_t old_sector;
        int ret, nr_sectors = bio_sectors(bio);
        dev_t old_dev;
+       int err = -EIO;
 
        might_sleep();
 
@@ -3252,7 +3242,7 @@ static inline void __generic_make_request(struct bio *bio)
                                bdevname(bio->bi_bdev, b),
                                (long long) bio->bi_sector);
 end_io:
-                       bio_endio(bio, -EIO);
+                       bio_endio(bio, err);
                        break;
                }
 
@@ -3287,6 +3277,10 @@ end_io:
 
                if (bio_check_eod(bio, nr_sectors))
                        goto end_io;
+               if (bio_empty_barrier(bio) && !q->prepare_flush_fn) {
+                       err = -EOPNOTSUPP;
+                       goto end_io;
+               }
 
                ret = q->make_request_fn(q, bio);
        } while (ret);