2 * Functions related to mapping data to requests
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
11 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
15 blk_rq_bio_prep(q, rq, bio);
16 else if (!ll_back_merge_fn(q, rq, bio))
19 rq->biotail->bi_next = bio;
22 rq->data_len += bio->bi_size;
26 EXPORT_SYMBOL(blk_rq_append_bio);
28 static int __blk_rq_unmap_user(struct bio *bio)
33 if (bio_flagged(bio, BIO_USER_MAPPED))
36 ret = bio_uncopy_user(bio);
42 static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
43 void __user *ubuf, unsigned int len)
46 struct bio *bio, *orig_bio;
49 reading = rq_data_dir(rq) == READ;
52 * if alignment requirement is satisfied, map in user pages for
53 * direct dma. else, set up kernel bounce buffers
55 uaddr = (unsigned long) ubuf;
56 if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
57 bio = bio_map_user(q, NULL, uaddr, len, reading);
59 bio = bio_copy_user(q, uaddr, len, reading);
65 blk_queue_bounce(q, &bio);
68 * We link the bounce buffer in and could have to traverse it
69 * later so we have to get a ref to prevent it from being freed
73 ret = blk_rq_append_bio(q, rq, bio);
77 /* if it was boucned we must call the end io function */
79 __blk_rq_unmap_user(orig_bio);
85 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
86 * @q: request queue where request should be inserted
87 * @rq: request structure to fill
88 * @ubuf: the user buffer
89 * @len: length of user data
92 * Data will be mapped directly for zero copy io, if possible. Otherwise
93 * a kernel bounce buffer is used.
95 * A matching blk_rq_unmap_user() must be issued at the end of io, while
96 * still in process context.
98 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
99 * before being submitted to the device, as pages mapped may be out of
100 * reach. It's the callers responsibility to make sure this happens. The
101 * original bio must be passed back in to blk_rq_unmap_user() for proper
104 int blk_rq_map_user(struct request_queue *q, struct request *rq,
105 void __user *ubuf, unsigned long len)
107 unsigned long bytes_read = 0;
108 struct bio *bio = NULL;
111 if (len > (q->max_hw_sectors << 9))
116 while (bytes_read != len) {
117 unsigned long map_len, end, start;
119 map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
120 end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
122 start = (unsigned long)ubuf >> PAGE_SHIFT;
125 * A bad offset could cause us to require BIO_MAX_PAGES + 1
126 * pages. If this happens we just lower the requested
127 * mapping len by a page so that we can fit
129 if (end - start > BIO_MAX_PAGES)
130 map_len -= PAGE_SIZE;
132 ret = __blk_rq_map_user(q, rq, ubuf, map_len);
141 rq->buffer = rq->data = NULL;
144 blk_rq_unmap_user(bio);
148 EXPORT_SYMBOL(blk_rq_map_user);
151 * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
152 * @q: request queue where request should be inserted
153 * @rq: request to map data to
154 * @iov: pointer to the iovec
155 * @iov_count: number of elements in the iovec
156 * @len: I/O byte count
159 * Data will be mapped directly for zero copy io, if possible. Otherwise
160 * a kernel bounce buffer is used.
162 * A matching blk_rq_unmap_user() must be issued at the end of io, while
163 * still in process context.
165 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
166 * before being submitted to the device, as pages mapped may be out of
167 * reach. It's the callers responsibility to make sure this happens. The
168 * original bio must be passed back in to blk_rq_unmap_user() for proper
171 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
172 struct sg_iovec *iov, int iov_count, unsigned int len)
176 if (!iov || iov_count <= 0)
179 /* we don't allow misaligned data like bio_map_user() does. If the
180 * user is using sg, they're expected to know the alignment constraints
181 * and respect them accordingly */
182 bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
186 if (bio->bi_size != len) {
193 blk_rq_bio_prep(q, rq, bio);
194 rq->buffer = rq->data = NULL;
198 EXPORT_SYMBOL(blk_rq_map_user_iov);
201 * blk_rq_unmap_user - unmap a request with user data
202 * @bio: start of bio list
205 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
206 * supply the original rq->bio from the blk_rq_map_user() return, since
207 * the io completion may have changed rq->bio.
209 int blk_rq_unmap_user(struct bio *bio)
211 struct bio *mapped_bio;
216 if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
217 mapped_bio = bio->bi_private;
219 ret2 = __blk_rq_unmap_user(mapped_bio);
231 EXPORT_SYMBOL(blk_rq_unmap_user);
234 * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
235 * @q: request queue where request should be inserted
236 * @rq: request to fill
237 * @kbuf: the kernel buffer
238 * @len: length of user data
239 * @gfp_mask: memory allocation flags
241 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
242 unsigned int len, gfp_t gfp_mask)
246 if (len > (q->max_hw_sectors << 9))
251 bio = bio_map_kern(q, kbuf, len, gfp_mask);
255 if (rq_data_dir(rq) == WRITE)
256 bio->bi_rw |= (1 << BIO_RW);
258 blk_rq_bio_prep(q, rq, bio);
259 blk_queue_bounce(q, &rq->bio);
260 rq->buffer = rq->data = NULL;
264 EXPORT_SYMBOL(blk_rq_map_kern);