X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=block%2Fblk-map.c;h=3c942bd6422a2ba639597ace2ad9f3768ccf1456;hb=02539d71fa98d5737bb668b02286c76241e4bac9;hp=09f7fd0bcb73000d4899f2b96dc088d852d41aab;hpb=99642e211a789df34347c3f10f0301462837f0c5;p=linux-2.6 diff --git a/block/blk-map.c b/block/blk-map.c index 09f7fd0bcb..3c942bd642 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -5,6 +5,7 @@ #include #include #include +#include /* for struct sg_iovec */ #include "blk.h" @@ -19,7 +20,6 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq, rq->biotail->bi_next = bio; rq->biotail = bio; - rq->raw_data_len += bio->bi_size; rq->data_len += bio->bi_size; } return 0; @@ -44,6 +44,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, void __user *ubuf, unsigned int len) { unsigned long uaddr; + unsigned int alignment; struct bio *bio, *orig_bio; int reading, ret; @@ -54,8 +55,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, * direct dma. else, set up kernel bounce buffers */ uaddr = (unsigned long) ubuf; - if (!(uaddr & queue_dma_alignment(q)) && - !(len & queue_dma_alignment(q))) + alignment = queue_dma_alignment(q) | q->dma_pad_mask; + if (!(uaddr & alignment) && !(len & alignment)) bio = bio_map_user(q, NULL, uaddr, len, reading); else bio = bio_copy_user(q, uaddr, len, reading); @@ -140,23 +141,8 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, ubuf += ret; } - /* - * __blk_rq_map_user() copies the buffers if starting address - * or length isn't aligned. As the copied buffer is always - * page aligned, we know that there's enough room for padding. - * Extend the last bio and update rq->data_len accordingly. - * - * On unmap, bio_uncopy_user() will use unmodified - * bio_map_data pointed to by bio->bi_private. - */ - if (len & queue_dma_alignment(q)) { - unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1; - struct bio *bio = rq->biotail; - - bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len; - bio->bi_size += pad_len; - rq->data_len += pad_len; - } + if (!bio_flagged(bio, BIO_USER_MAPPED)) + rq->cmd_flags |= REQ_COPY_USER; rq->buffer = rq->data = NULL; return 0; @@ -192,15 +178,26 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct sg_iovec *iov, int iov_count, unsigned int len) { struct bio *bio; + int i, read = rq_data_dir(rq) == READ; + int unaligned = 0; if (!iov || iov_count <= 0) return -EINVAL; - /* we don't allow misaligned data like bio_map_user() does. If the - * user is using sg, they're expected to know the alignment constraints - * and respect them accordingly */ - bio = bio_map_user_iov(q, NULL, iov, iov_count, - rq_data_dir(rq) == READ); + for (i = 0; i < iov_count; i++) { + unsigned long uaddr = (unsigned long)iov[i].iov_base; + + if (uaddr & queue_dma_alignment(q)) { + unaligned = 1; + break; + } + } + + if (unaligned || (q->dma_pad_mask & len)) + bio = bio_copy_user_iov(q, iov, iov_count, read); + else + bio = bio_map_user_iov(q, NULL, iov, iov_count, read); + if (IS_ERR(bio)) return PTR_ERR(bio); @@ -210,12 +207,14 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, return -EINVAL; } + if (!bio_flagged(bio, BIO_USER_MAPPED)) + rq->cmd_flags |= REQ_COPY_USER; + bio_get(bio); blk_rq_bio_prep(q, rq, bio); rq->buffer = rq->data = NULL; return 0; } -EXPORT_SYMBOL(blk_rq_map_user_iov); /** * blk_rq_unmap_user - unmap a request with user data