]> err.no Git - linux-2.6/blobdiff - drivers/mmc/card/queue.c
Merge branch 'kvm-updates-2.6.27' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6] / drivers / mmc / card / queue.c
index 1b9c9b6da5b7349b39148129c7a6ce4a709e7717..3dee97e7d165f3e64f3898b1fb84c335cc4a1f08 100644 (file)
@@ -94,8 +94,8 @@ static void mmc_request(struct request_queue *q)
                printk(KERN_ERR "MMC: killing requests for dead queue\n");
                while ((req = elv_next_request(q)) != NULL) {
                        do {
-                               ret = end_that_request_chunk(req, 0,
-                                       req->current_nr_sectors << 9);
+                               ret = __blk_end_request(req, -EIO,
+                                                       blk_rq_cur_bytes(req));
                        } while (ret);
                }
                return;
@@ -148,7 +148,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
                        printk(KERN_WARNING "%s: unable to allocate "
                                "bounce buffer\n", mmc_card_name(card));
                } else {
-                       blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
+                       blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
                        blk_queue_max_sectors(mq->queue, bouncesz / 512);
                        blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
                        blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
@@ -180,12 +180,13 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
                blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
                blk_queue_max_segment_size(mq->queue, host->max_seg_size);
 
-               mq->sg = kzalloc(sizeof(struct scatterlist) *
+               mq->sg = kmalloc(sizeof(struct scatterlist) *
                        host->max_phys_segs, GFP_KERNEL);
                if (!mq->sg) {
                        ret = -ENOMEM;
                        goto cleanup_queue;
                }
+               sg_init_table(mq->sg, host->max_phys_segs);
        }
 
        init_MUTEX(&mq->thread_sem);
@@ -289,55 +290,15 @@ void mmc_queue_resume(struct mmc_queue *mq)
        }
 }
 
-static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
-       struct scatterlist *src, unsigned int src_len)
-{
-       unsigned int chunk;
-       char *dst_buf, *src_buf;
-       unsigned int dst_size, src_size;
-
-       dst_buf = NULL;
-       src_buf = NULL;
-       dst_size = 0;
-       src_size = 0;
-
-       while (src_len) {
-               BUG_ON(dst_len == 0);
-
-               if (dst_size == 0) {
-                       dst_buf = sg_virt(dst);
-                       dst_size = dst->length;
-               }
-
-               if (src_size == 0) {
-                       src_buf = sg_virt(src);
-                       src_size = src->length;
-               }
-
-               chunk = min(dst_size, src_size);
-
-               memcpy(dst_buf, src_buf, chunk);
-
-               dst_buf += chunk;
-               src_buf += chunk;
-               dst_size -= chunk;
-               src_size -= chunk;
-
-               if (dst_size == 0) {
-                       dst++;
-                       dst_len--;
-               }
-
-               if (src_size == 0) {
-                       src++;
-                       src_len--;
-               }
-       }
-}
-
+/*
+ * Prepare the sg list(s) to be handed of to the host driver
+ */
 unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
 {
        unsigned int sg_len;
+       size_t buflen;
+       struct scatterlist *sg;
+       int i;
 
        if (!mq->bounce_buf)
                return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
@@ -348,47 +309,52 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
 
        mq->bounce_sg_len = sg_len;
 
-       /*
-        * Shortcut in the event we only get a single entry.
-        */
-       if (sg_len == 1) {
-               memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist));
-               return 1;
-       }
+       buflen = 0;
+       for_each_sg(mq->bounce_sg, sg, sg_len, i)
+               buflen += sg->length;
 
-       sg_init_one(mq->sg, mq->bounce_buf, 0);
-
-       while (sg_len) {
-               mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
-               sg_len--;
-       }
+       sg_init_one(mq->sg, mq->bounce_buf, buflen);
 
        return 1;
 }
 
+/*
+ * If writing, bounce the data to the buffer before the request
+ * is sent to the host driver
+ */
 void mmc_queue_bounce_pre(struct mmc_queue *mq)
 {
+       unsigned long flags;
+
        if (!mq->bounce_buf)
                return;
 
-       if (mq->bounce_sg_len == 1)
-               return;
        if (rq_data_dir(mq->req) != WRITE)
                return;
 
-       copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len);
+       local_irq_save(flags);
+       sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
+               mq->bounce_buf, mq->sg[0].length);
+       local_irq_restore(flags);
 }
 
+/*
+ * If reading, bounce the data from the buffer after the request
+ * has been handled by the host driver
+ */
 void mmc_queue_bounce_post(struct mmc_queue *mq)
 {
+       unsigned long flags;
+
        if (!mq->bounce_buf)
                return;
 
-       if (mq->bounce_sg_len == 1)
-               return;
        if (rq_data_dir(mq->req) != READ)
                return;
 
-       copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1);
+       local_irq_save(flags);
+       sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
+               mq->bounce_buf, mq->sg[0].length);
+       local_irq_restore(flags);
 }