X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=lib%2Fscatterlist.c;h=b80c21100d783ceeee87fa828600d90b322120ab;hb=ccf2779544eecfcc5447e2028d1029b6d4ff7bb6;hp=02aaa27e010ef2b0786e71a70f840a9915b1ed22;hpb=0db9299f48ebd4a860d6ad4e1d36ac50671d48e7;p=linux-2.6 diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 02aaa27e01..b80c21100d 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -8,6 +8,7 @@ */ #include #include +#include /** * sg_next - return the next scatterlist entry in a list @@ -130,13 +131,17 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents) /** * __sg_free_table - Free a previously mapped sg table * @table: The sg table header to use + * @max_ents: The maximum number of entries per single scatterlist * @free_fn: Free function * * Description: - * Free an sg table previously allocated and setup with __sg_alloc_table(). + * Free an sg table previously allocated and setup with + * __sg_alloc_table(). The @max_ents value must be identical to + * that previously used with __sg_alloc_table(). * **/ -void __sg_free_table(struct sg_table *table, sg_free_fn *free_fn) +void __sg_free_table(struct sg_table *table, unsigned int max_ents, + sg_free_fn *free_fn) { struct scatterlist *sgl, *next; @@ -149,14 +154,14 @@ void __sg_free_table(struct sg_table *table, sg_free_fn *free_fn) unsigned int sg_size; /* - * If we have more than SG_MAX_SINGLE_ALLOC segments left, + * If we have more than max_ents segments left, * then assign 'next' to the sg table after the current one. * sg_size is then one less than alloc size, since the last * element is the chain pointer. */ - if (alloc_size > SG_MAX_SINGLE_ALLOC) { - next = sg_chain_ptr(&sgl[SG_MAX_SINGLE_ALLOC - 1]); - alloc_size = SG_MAX_SINGLE_ALLOC; + if (alloc_size > max_ents) { + next = sg_chain_ptr(&sgl[max_ents - 1]); + alloc_size = max_ents; sg_size = alloc_size - 1; } else { sg_size = alloc_size; @@ -179,7 +184,7 @@ EXPORT_SYMBOL(__sg_free_table); **/ void sg_free_table(struct sg_table *table) { - __sg_free_table(table, sg_kfree); + __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree); } EXPORT_SYMBOL(sg_free_table); @@ -187,22 +192,30 @@ EXPORT_SYMBOL(sg_free_table); * __sg_alloc_table - Allocate and initialize an sg table with given allocator * @table: The sg table header to use * @nents: Number of entries in sg list + * @max_ents: The maximum number of entries the allocator returns per call * @gfp_mask: GFP allocation mask * @alloc_fn: Allocator to use * + * Description: + * This function returns a @table @nents long. The allocator is + * defined to return scatterlist chunks of maximum size @max_ents. + * Thus if @nents is bigger than @max_ents, the scatterlists will be + * chained in units of @max_ents. + * * Notes: * If this function returns non-0 (eg failure), the caller must call * __sg_free_table() to cleanup any leftover allocations. * **/ -int __sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask, +int __sg_alloc_table(struct sg_table *table, unsigned int nents, + unsigned int max_ents, gfp_t gfp_mask, sg_alloc_fn *alloc_fn) { struct scatterlist *sg, *prv; unsigned int left; #ifndef ARCH_HAS_SG_CHAIN - BUG_ON(nents > SG_MAX_SINGLE_ALLOC); + BUG_ON(nents > max_ents); #endif memset(table, 0, sizeof(*table)); @@ -212,8 +225,8 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask, do { unsigned int sg_size, alloc_size = left; - if (alloc_size > SG_MAX_SINGLE_ALLOC) { - alloc_size = SG_MAX_SINGLE_ALLOC; + if (alloc_size > max_ents) { + alloc_size = max_ents; sg_size = alloc_size - 1; } else sg_size = alloc_size; @@ -232,7 +245,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask, * If this is not the first mapping, chain previous part. */ if (prv) - sg_chain(prv, SG_MAX_SINGLE_ALLOC, sg); + sg_chain(prv, max_ents, sg); else table->sgl = sg; @@ -272,10 +285,112 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) { int ret; - ret = __sg_alloc_table(table, nents, gfp_mask, sg_kmalloc); + ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, + gfp_mask, sg_kmalloc); if (unlikely(ret)) - __sg_free_table(table, sg_kfree); + __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree); return ret; } EXPORT_SYMBOL(sg_alloc_table); + +/** + * sg_copy_buffer - Copy data between a linear buffer and an SG list + * @sgl: The SG list + * @nents: Number of SG entries + * @buf: Where to copy from + * @buflen: The number of bytes to copy + * @to_buffer: transfer direction (non zero == from an sg list to a + * buffer, 0 == from a buffer to an sg list + * + * Returns the number of copied bytes. + * + **/ +static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen, int to_buffer) +{ + struct scatterlist *sg; + size_t buf_off = 0; + int i; + + WARN_ON(!irqs_disabled()); + + for_each_sg(sgl, sg, nents, i) { + struct page *page; + int n = 0; + unsigned int sg_off = sg->offset; + unsigned int sg_copy = sg->length; + + if (sg_copy > buflen) + sg_copy = buflen; + buflen -= sg_copy; + + while (sg_copy > 0) { + unsigned int page_copy; + void *p; + + page_copy = PAGE_SIZE - sg_off; + if (page_copy > sg_copy) + page_copy = sg_copy; + + page = nth_page(sg_page(sg), n); + p = kmap_atomic(page, KM_BIO_SRC_IRQ); + + if (to_buffer) + memcpy(buf + buf_off, p + sg_off, page_copy); + else { + memcpy(p + sg_off, buf + buf_off, page_copy); + flush_kernel_dcache_page(page); + } + + kunmap_atomic(p, KM_BIO_SRC_IRQ); + + buf_off += page_copy; + sg_off += page_copy; + if (sg_off == PAGE_SIZE) { + sg_off = 0; + n++; + } + sg_copy -= page_copy; + } + + if (!buflen) + break; + } + + return buf_off; +} + +/** + * sg_copy_from_buffer - Copy from a linear buffer to an SG list + * @sgl: The SG list + * @nents: Number of SG entries + * @buf: Where to copy from + * @buflen: The number of bytes to copy + * + * Returns the number of copied bytes. + * + **/ +size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen) +{ + return sg_copy_buffer(sgl, nents, buf, buflen, 0); +} +EXPORT_SYMBOL(sg_copy_from_buffer); + +/** + * sg_copy_to_buffer - Copy from an SG list to a linear buffer + * @sgl: The SG list + * @nents: Number of SG entries + * @buf: Where to copy to + * @buflen: The number of bytes to copy + * + * Returns the number of copied bytes. + * + **/ +size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen) +{ + return sg_copy_buffer(sgl, nents, buf, buflen, 1); +} +EXPORT_SYMBOL(sg_copy_to_buffer);