X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fmd%2Fdm-crypt.c;h=4c2471ee054aa099a8777de111e7b219fe6c6eb9;hb=ac6d141dc7d1d0eeec850d1b451dca83ce649684;hp=3783cf97885016d8509e69d1ee16dd8a9fe9837f;hpb=8b004457168995f2ae2a35327f885183a9e74141;p=linux-2.6 diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 3783cf9788..4c2471ee05 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -16,9 +16,11 @@ #include #include #include +#include #include #include #include +#include #include "dm.h" @@ -35,6 +37,7 @@ struct crypt_io { struct work_struct work; atomic_t pending; int error; + int post_process; }; /* @@ -76,13 +79,17 @@ struct crypt_config { */ mempool_t *io_pool; mempool_t *page_pool; + struct bio_set *bs; /* * crypto related data */ struct crypt_iv_operations *iv_gen_ops; char *iv_mode; - struct crypto_cipher *iv_gen_private; + union { + struct crypto_cipher *essiv_tfm; + int benbi_shift; + } iv_gen_private; sector_t iv_offset; unsigned int iv_size; @@ -94,11 +101,11 @@ struct crypt_config { u8 key[0]; }; -#define MIN_IOS 256 +#define MIN_IOS 16 #define MIN_POOL_PAGES 32 #define MIN_BIO_PAGES 8 -static kmem_cache_t *_crypt_io_pool; +static struct kmem_cache *_crypt_io_pool; /* * Different IV generation algorithms: @@ -110,6 +117,9 @@ static kmem_cache_t *_crypt_io_pool; * encrypted with the bulk cipher using a salt as key. The salt * should be derived from the bulk cipher's key via hashing. * + * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 + * (needed for LRW-32-AES and possible other narrow block modes) + * * plumb: unimplemented, see: * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 */ @@ -188,21 +198,61 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, } kfree(salt); - cc->iv_gen_private = essiv_tfm; + cc->iv_gen_private.essiv_tfm = essiv_tfm; return 0; } static void crypt_iv_essiv_dtr(struct crypt_config *cc) { - crypto_free_cipher(cc->iv_gen_private); - cc->iv_gen_private = NULL; + crypto_free_cipher(cc->iv_gen_private.essiv_tfm); + cc->iv_gen_private.essiv_tfm = NULL; } static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) { memset(iv, 0, cc->iv_size); *(u64 *)iv = cpu_to_le64(sector); - crypto_cipher_encrypt_one(cc->iv_gen_private, iv, iv); + crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv); + return 0; +} + +static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, + const char *opts) +{ + unsigned int bs = crypto_blkcipher_blocksize(cc->tfm); + int log = ilog2(bs); + + /* we need to calculate how far we must shift the sector count + * to get the cipher block count, we use this shift in _gen */ + + if (1 << log != bs) { + ti->error = "cypher blocksize is not a power of 2"; + return -EINVAL; + } + + if (log > 9) { + ti->error = "cypher blocksize is > 512"; + return -EINVAL; + } + + cc->iv_gen_private.benbi_shift = 9 - log; + + return 0; +} + +static void crypt_iv_benbi_dtr(struct crypt_config *cc) +{ +} + +static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) +{ + __be64 val; + + memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ + + val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1); + put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); + return 0; } @@ -216,13 +266,18 @@ static struct crypt_iv_operations crypt_iv_essiv_ops = { .generator = crypt_iv_essiv_gen }; +static struct crypt_iv_operations crypt_iv_benbi_ops = { + .ctr = crypt_iv_benbi_ctr, + .dtr = crypt_iv_benbi_dtr, + .generator = crypt_iv_benbi_gen +}; static int crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, struct scatterlist *in, unsigned int length, int write, sector_t sector) { - u8 iv[cc->iv_size]; + u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64)))); struct blkcipher_desc desc = { .tfm = cc->tfm, .info = iv, @@ -310,6 +365,14 @@ static int crypt_convert(struct crypt_config *cc, return r; } + static void dm_crypt_bio_destructor(struct bio *bio) + { + struct crypt_io *io = bio->bi_private; + struct crypt_config *cc = io->target->private; + + bio_free(bio, cc->bs); + } + /* * Generate a new unfragmented bio with the given size * This should never violate the device limitations @@ -324,18 +387,17 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; unsigned int i; - /* - * Use __GFP_NOMEMALLOC to tell the VM to act less aggressively and - * to fail earlier. This is not necessary but increases throughput. - * FIXME: Is this really intelligent? - */ - if (base_bio) - clone = bio_clone(base_bio, GFP_NOIO|__GFP_NOMEMALLOC); - else - clone = bio_alloc(GFP_NOIO|__GFP_NOMEMALLOC, nr_iovecs); + if (base_bio) { + clone = bio_alloc_bioset(GFP_NOIO, base_bio->bi_max_vecs, cc->bs); + __bio_clone(clone, base_bio); + } else + clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); + if (!clone) return NULL; + clone->bi_destructor = dm_crypt_bio_destructor; + /* if the last bio was not complete, continue where that one ended */ clone->bi_idx = *bio_vec_idx; clone->bi_vcnt = *bio_vec_idx; @@ -445,15 +507,14 @@ static void dec_pending(struct crypt_io *io, int error) * kcryptd: * * Needed because it would be very unwise to do decryption in an - * interrupt context, so bios returning from read requests get - * queued here. + * interrupt context. */ static struct workqueue_struct *_kcryptd_workqueue; -static void kcryptd_do_work(void *data); +static void kcryptd_do_work(struct work_struct *work); static void kcryptd_queue_io(struct crypt_io *io) { - INIT_WORK(&io->work, kcryptd_do_work, io); + INIT_WORK(&io->work, kcryptd_do_work); queue_work(_kcryptd_workqueue, &io->work); } @@ -470,12 +531,10 @@ static int crypt_endio(struct bio *clone, unsigned int done, int error) if (!read_io) crypt_free_buffer_pages(cc, clone, done); + /* keep going - not finished yet */ if (unlikely(clone->bi_size)) return 1; - /* - * successful reads are decrypted by the worker thread - */ if (!read_io) goto out; @@ -485,6 +544,7 @@ static int crypt_endio(struct bio *clone, unsigned int done, int error) } bio_put(clone); + io->post_process = 1; kcryptd_queue_io(io); return 0; @@ -504,60 +564,99 @@ static void clone_init(struct crypt_io *io, struct bio *clone) clone->bi_rw = io->base_bio->bi_rw; } -static struct bio *clone_read(struct crypt_io *io, - sector_t sector) +static void process_read(struct crypt_io *io) { struct crypt_config *cc = io->target->private; struct bio *base_bio = io->base_bio; struct bio *clone; + sector_t sector = base_bio->bi_sector - io->target->begin; + + atomic_inc(&io->pending); /* * The block layer might modify the bvec array, so always * copy the required bvecs because we need the original * one in order to decrypt the whole bio data *afterwards*. */ - clone = bio_alloc(GFP_NOIO, bio_segments(base_bio)); - if (unlikely(!clone)) - return NULL; + clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); + if (unlikely(!clone)) { + dec_pending(io, -ENOMEM); + return; + } clone_init(io, clone); + clone->bi_destructor = dm_crypt_bio_destructor; clone->bi_idx = 0; clone->bi_vcnt = bio_segments(base_bio); clone->bi_size = base_bio->bi_size; + clone->bi_sector = cc->start + sector; memcpy(clone->bi_io_vec, bio_iovec(base_bio), sizeof(struct bio_vec) * clone->bi_vcnt); - clone->bi_sector = cc->start + sector; - return clone; + generic_make_request(clone); } -static struct bio *clone_write(struct crypt_io *io, - sector_t sector, - unsigned *bvec_idx, - struct convert_context *ctx) +static void process_write(struct crypt_io *io) { struct crypt_config *cc = io->target->private; struct bio *base_bio = io->base_bio; struct bio *clone; + struct convert_context ctx; + unsigned remaining = base_bio->bi_size; + sector_t sector = base_bio->bi_sector - io->target->begin; + unsigned bvec_idx = 0; - clone = crypt_alloc_buffer(cc, base_bio->bi_size, - io->first_clone, bvec_idx); - if (!clone) - return NULL; + atomic_inc(&io->pending); - ctx->bio_out = clone; + crypt_convert_init(cc, &ctx, NULL, base_bio, sector, 1); - if (unlikely(crypt_convert(cc, ctx) < 0)) { - crypt_free_buffer_pages(cc, clone, - clone->bi_size); - bio_put(clone); - return NULL; - } + /* + * The allocated buffers can be smaller than the whole bio, + * so repeat the whole process until all the data can be handled. + */ + while (remaining) { + clone = crypt_alloc_buffer(cc, base_bio->bi_size, + io->first_clone, &bvec_idx); + if (unlikely(!clone)) { + dec_pending(io, -ENOMEM); + return; + } - clone_init(io, clone); - clone->bi_sector = cc->start + sector; + ctx.bio_out = clone; - return clone; + if (unlikely(crypt_convert(cc, &ctx) < 0)) { + crypt_free_buffer_pages(cc, clone, clone->bi_size); + bio_put(clone); + dec_pending(io, -EIO); + return; + } + + clone_init(io, clone); + clone->bi_sector = cc->start + sector; + + if (!io->first_clone) { + /* + * hold a reference to the first clone, because it + * holds the bio_vec array and that can't be freed + * before all other clones are released + */ + bio_get(clone); + io->first_clone = clone; + } + + remaining -= clone->bi_size; + sector += bio_sectors(clone); + + /* prevent bio_put of first_clone */ + if (remaining) + atomic_inc(&io->pending); + + generic_make_request(clone); + + /* out of memory -> run queues */ + if (remaining) + congestion_wait(bio_data_dir(clone), HZ/100); + } } static void process_read_endio(struct crypt_io *io) @@ -571,11 +670,16 @@ static void process_read_endio(struct crypt_io *io) dec_pending(io, crypt_convert(cc, &ctx)); } -static void kcryptd_do_work(void *data) +static void kcryptd_do_work(struct work_struct *work) { - struct crypt_io *io = data; + struct crypt_io *io = container_of(work, struct crypt_io, work); - process_read_endio(io); + if (io->post_process) + process_read_endio(io); + else if (bio_data_dir(io->base_bio) == READ) + process_read(io); + else + process_write(io); } /* @@ -716,7 +820,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->tfm = tfm; /* - * Choose ivmode. Valid modes: "plain", "essiv:". + * Choose ivmode. Valid modes: "plain", "essiv:", "benbi". * See comments at iv code */ @@ -726,6 +830,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->iv_gen_ops = &crypt_iv_plain_ops; else if (strcmp(ivmode, "essiv") == 0) cc->iv_gen_ops = &crypt_iv_essiv_ops; + else if (strcmp(ivmode, "benbi") == 0) + cc->iv_gen_ops = &crypt_iv_benbi_ops; else { ti->error = "Invalid IV mode"; goto bad2; @@ -761,6 +867,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad4; } + cc->bs = bioset_create(MIN_IOS, MIN_IOS, 4); + if (!cc->bs) { + ti->error = "Cannot allocate crypt bioset"; + goto bad_bs; + } + if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) { ti->error = "Error setting key"; goto bad5; @@ -800,6 +912,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) return 0; bad5: + bioset_free(cc->bs); +bad_bs: mempool_destroy(cc->page_pool); bad4: mempool_destroy(cc->io_pool); @@ -819,6 +933,7 @@ static void crypt_dtr(struct dm_target *ti) { struct crypt_config *cc = (struct crypt_config *) ti->private; + bioset_free(cc->bs); mempool_destroy(cc->page_pool); mempool_destroy(cc->io_pool); @@ -838,76 +953,22 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, { struct crypt_config *cc = ti->private; struct crypt_io *io; - struct convert_context ctx; - struct bio *clone; - unsigned int remaining = bio->bi_size; - sector_t sector = bio->bi_sector - ti->begin; - unsigned int bvec_idx = 0; io = mempool_alloc(cc->io_pool, GFP_NOIO); io->target = ti; io->base_bio = bio; io->first_clone = NULL; - io->error = 0; - atomic_set(&io->pending, 1); /* hold a reference */ - - if (bio_data_dir(bio) == WRITE) - crypt_convert_init(cc, &ctx, NULL, bio, sector, 1); - - /* - * The allocated buffers can be smaller than the whole bio, - * so repeat the whole process until all the data can be handled. - */ - while (remaining) { - if (bio_data_dir(bio) == WRITE) - clone = clone_write(io, sector, &bvec_idx, &ctx); - else - clone = clone_read(io, sector); - if (!clone) - goto cleanup; - - if (!io->first_clone) { - /* - * hold a reference to the first clone, because it - * holds the bio_vec array and that can't be freed - * before all other clones are released - */ - bio_get(clone); - io->first_clone = clone; - } - atomic_inc(&io->pending); - - remaining -= clone->bi_size; - sector += bio_sectors(clone); - - generic_make_request(clone); - - /* out of memory -> run queues */ - if (remaining) - blk_congestion_wait(bio_data_dir(clone), HZ/100); - } - - /* drop reference, clones could have returned before we reach this */ - dec_pending(io, 0); - return 0; - -cleanup: - if (io->first_clone) { - dec_pending(io, -ENOMEM); - return 0; - } + io->error = io->post_process = 0; + atomic_set(&io->pending, 0); + kcryptd_queue_io(io); - /* if no bio has been dispatched yet, we can directly return the error */ - mempool_free(io, cc->io_pool); - return -ENOMEM; + return DM_MAPIO_SUBMITTED; } static int crypt_status(struct dm_target *ti, status_type_t type, char *result, unsigned int maxlen) { struct crypt_config *cc = (struct crypt_config *) ti->private; - const char *cipher; - const char *chainmode = NULL; unsigned int sz = 0; switch (type) { @@ -916,14 +977,11 @@ static int crypt_status(struct dm_target *ti, status_type_t type, break; case STATUSTYPE_TABLE: - cipher = crypto_blkcipher_name(cc->tfm); - - chainmode = cc->chainmode; - if (cc->iv_mode) - DMEMIT("%s-%s-%s ", cipher, chainmode, cc->iv_mode); + DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode, + cc->iv_mode); else - DMEMIT("%s-%s ", cipher, chainmode); + DMEMIT("%s-%s ", cc->cipher, cc->chainmode); if (cc->key_size > 0) { if ((maxlen - sz) < ((cc->key_size << 1) + 1)) @@ -999,7 +1057,7 @@ error: static struct target_type crypt_target = { .name = "crypt", - .version= {1, 2, 0}, + .version= {1, 3, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr,