X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=block%2Fblk-settings.c;h=5713f7e5cbd266c6db06a8b5311b0ffa97d225b2;hb=57eee3d23e8833ca18708b374c648235691942ba;hp=c8d0c572409826a109410d0d197c91fa305aeb13;hpb=8f859016ea49cd8d7c743533bd1ab3db9a813ffa;p=linux-2.6 diff --git a/block/blk-settings.c b/block/blk-settings.c index c8d0c57240..5713f7e5cb 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -140,7 +140,7 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) /* Assume anything <= 4GB can be handled by IOMMU. Actually some IOMMUs can handle everything, but I don't know of a way to test this here. */ - if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) + if (b_pfn < (min_t(u64, 0x100000000UL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) dma = 1; q->bounce_pfn = max_low_pfn; #else @@ -293,9 +293,26 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) EXPORT_SYMBOL(blk_queue_stack_limits); /** - * blk_queue_dma_drain - Set up a drain buffer for excess dma. + * blk_queue_dma_pad - set pad mask + * @q: the request queue for the device + * @mask: pad mask + * + * Set pad mask. Direct IO requests are padded to the mask specified. * + * Appending pad buffer to a request modifies ->data_len such that it + * includes the pad buffer. The original requested data length can be + * obtained using blk_rq_raw_data_len(). + **/ +void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) +{ + q->dma_pad_mask = mask; +} +EXPORT_SYMBOL(blk_queue_dma_pad); + +/** + * blk_queue_dma_drain - Set up a drain buffer for excess dma. * @q: the request queue for the device + * @dma_drain_needed: fn which returns non-zero if drain is necessary * @buf: physically contiguous buffer * @size: size of the buffer in bytes * @@ -315,14 +332,16 @@ EXPORT_SYMBOL(blk_queue_stack_limits); * device can support otherwise there won't be room for the drain * buffer. */ -int blk_queue_dma_drain(struct request_queue *q, void *buf, - unsigned int size) +int blk_queue_dma_drain(struct request_queue *q, + dma_drain_needed_fn *dma_drain_needed, + void *buf, unsigned int size) { if (q->max_hw_segments < 2 || q->max_phys_segments < 2) return -EINVAL; /* make room for appending the drain */ --q->max_hw_segments; --q->max_phys_segments; + q->dma_drain_needed = dma_drain_needed; q->dma_drain_buffer = buf; q->dma_drain_size = size; @@ -386,7 +405,7 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask) } EXPORT_SYMBOL(blk_queue_update_dma_alignment); -int __init blk_settings_init(void) +static int __init blk_settings_init(void) { blk_max_low_pfn = max_low_pfn - 1; blk_max_pfn = max_pfn - 1;