X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fmd%2Fdm-crypt.c;h=ab6a61db63ce2248b5553ec43678fab61642e9a7;hb=53ffe3b440aa85af6fc4eda09b2d44bcdd312d4d;hp=44e1aa30e3f66b19300b3689cd7d74679e3c06cc;hpb=899c95d36c896f9fb7bc5f4f03b4abd86bda292c;p=linux-2.6 diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 44e1aa30e3..ab6a61db63 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1,11 +1,12 @@ /* * Copyright (C) 2003 Christophe Saout * Copyright (C) 2004 Clemens Fruhwirth - * Copyright (C) 2006-2007 Red Hat, Inc. All rights reserved. + * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ +#include #include #include #include @@ -31,6 +32,7 @@ * context holding the current state of a multi-part conversion */ struct convert_context { + struct completion restart; struct bio *bio_in; struct bio *bio_out; unsigned int offset_in; @@ -38,6 +40,7 @@ struct convert_context { unsigned int idx_in; unsigned int idx_out; sector_t sector; + atomic_t pending; }; /* @@ -55,6 +58,11 @@ struct dm_crypt_io { sector_t sector; }; +struct dm_crypt_request { + struct scatterlist sg_in; + struct scatterlist sg_out; +}; + struct crypt_config; struct crypt_iv_operations { @@ -75,15 +83,18 @@ struct crypt_config { sector_t start; /* - * pool for per bio private data and - * for encryption buffer pages + * pool for per bio private data, crypto requests and + * encryption requeusts/buffer pages */ mempool_t *io_pool; + mempool_t *req_pool; mempool_t *page_pool; struct bio_set *bs; struct workqueue_struct *io_queue; struct workqueue_struct *crypt_queue; + wait_queue_head_t writeq; + /* * crypto related data */ @@ -96,9 +107,25 @@ struct crypt_config { sector_t iv_offset; unsigned int iv_size; + /* + * Layout of each crypto request: + * + * struct ablkcipher_request + * context + * padding + * struct dm_crypt_request + * padding + * IV + * + * The padding is added so that dm_crypt_request and the IV are + * correctly aligned. + */ + unsigned int dmreq_start; + struct ablkcipher_request *req; + char cipher[CRYPTO_MAX_ALG_NAME]; char chainmode[CRYPTO_MAX_ALG_NAME]; - struct crypto_blkcipher *tfm; + struct crypto_ablkcipher *tfm; unsigned long flags; unsigned int key_size; u8 key[0]; @@ -192,7 +219,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, return PTR_ERR(essiv_tfm); } if (crypto_cipher_blocksize(essiv_tfm) != - crypto_blkcipher_ivsize(cc->tfm)) { + crypto_ablkcipher_ivsize(cc->tfm)) { ti->error = "Block size of ESSIV cipher does " "not match IV size of block cipher"; crypto_free_cipher(essiv_tfm); @@ -229,7 +256,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, const char *opts) { - unsigned int bs = crypto_blkcipher_blocksize(cc->tfm); + unsigned bs = crypto_ablkcipher_blocksize(cc->tfm); int log = ilog2(bs); /* we need to calculate how far we must shift the sector count @@ -293,38 +320,6 @@ static struct crypt_iv_operations crypt_iv_null_ops = { .generator = crypt_iv_null_gen }; -static int -crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, - struct scatterlist *in, unsigned int length, - int write, sector_t sector) -{ - u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64)))); - struct blkcipher_desc desc = { - .tfm = cc->tfm, - .info = iv, - .flags = CRYPTO_TFM_REQ_MAY_SLEEP, - }; - int r; - - if (cc->iv_gen_ops) { - r = cc->iv_gen_ops->generator(cc, iv, sector); - if (r < 0) - return r; - - if (write) - r = crypto_blkcipher_encrypt_iv(&desc, out, in, length); - else - r = crypto_blkcipher_decrypt_iv(&desc, out, in, length); - } else { - if (write) - r = crypto_blkcipher_encrypt(&desc, out, in, length); - else - r = crypto_blkcipher_decrypt(&desc, out, in, length); - } - - return r; -} - static void crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx, struct bio *bio_out, struct bio *bio_in, @@ -337,6 +332,72 @@ static void crypt_convert_init(struct crypt_config *cc, ctx->idx_in = bio_in ? bio_in->bi_idx : 0; ctx->idx_out = bio_out ? bio_out->bi_idx : 0; ctx->sector = sector + cc->iv_offset; + init_completion(&ctx->restart); + atomic_set(&ctx->pending, 1); +} + +static int crypt_convert_block(struct crypt_config *cc, + struct convert_context *ctx, + struct ablkcipher_request *req) +{ + struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); + struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); + struct dm_crypt_request *dmreq; + u8 *iv; + int r = 0; + + dmreq = (struct dm_crypt_request *)((char *)req + cc->dmreq_start); + iv = (u8 *)ALIGN((unsigned long)(dmreq + 1), + crypto_ablkcipher_alignmask(cc->tfm) + 1); + + sg_init_table(&dmreq->sg_in, 1); + sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, + bv_in->bv_offset + ctx->offset_in); + + sg_init_table(&dmreq->sg_out, 1); + sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, + bv_out->bv_offset + ctx->offset_out); + + ctx->offset_in += 1 << SECTOR_SHIFT; + if (ctx->offset_in >= bv_in->bv_len) { + ctx->offset_in = 0; + ctx->idx_in++; + } + + ctx->offset_out += 1 << SECTOR_SHIFT; + if (ctx->offset_out >= bv_out->bv_len) { + ctx->offset_out = 0; + ctx->idx_out++; + } + + if (cc->iv_gen_ops) { + r = cc->iv_gen_ops->generator(cc, iv, ctx->sector); + if (r < 0) + return r; + } + + ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, + 1 << SECTOR_SHIFT, iv); + + if (bio_data_dir(ctx->bio_in) == WRITE) + r = crypto_ablkcipher_encrypt(req); + else + r = crypto_ablkcipher_decrypt(req); + + return r; +} + +static void kcryptd_async_done(struct crypto_async_request *async_req, + int error); +static void crypt_alloc_req(struct crypt_config *cc, + struct convert_context *ctx) +{ + if (!cc->req) + cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); + ablkcipher_request_set_tfm(cc->req, cc->tfm); + ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP, + kcryptd_async_done, ctx); } /* @@ -345,41 +406,43 @@ static void crypt_convert_init(struct crypt_config *cc, static int crypt_convert(struct crypt_config *cc, struct convert_context *ctx) { - int r = 0; + int r; while(ctx->idx_in < ctx->bio_in->bi_vcnt && ctx->idx_out < ctx->bio_out->bi_vcnt) { - struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); - struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); - struct scatterlist sg_in, sg_out; - - sg_init_table(&sg_in, 1); - sg_set_page(&sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, bv_in->bv_offset + ctx->offset_in); - - sg_init_table(&sg_out, 1); - sg_set_page(&sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, bv_out->bv_offset + ctx->offset_out); - - ctx->offset_in += sg_in.length; - if (ctx->offset_in >= bv_in->bv_len) { - ctx->offset_in = 0; - ctx->idx_in++; - } - ctx->offset_out += sg_out.length; - if (ctx->offset_out >= bv_out->bv_len) { - ctx->offset_out = 0; - ctx->idx_out++; + crypt_alloc_req(cc, ctx); + + atomic_inc(&ctx->pending); + + r = crypt_convert_block(cc, ctx, cc->req); + + switch (r) { + /* async */ + case -EBUSY: + wait_for_completion(&ctx->restart); + INIT_COMPLETION(ctx->restart); + /* fall through*/ + case -EINPROGRESS: + cc->req = NULL; + ctx->sector++; + continue; + + /* sync */ + case 0: + atomic_dec(&ctx->pending); + ctx->sector++; + cond_resched(); + continue; + + /* error */ + default: + atomic_dec(&ctx->pending); + return r; } - - r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length, - bio_data_dir(ctx->bio_in) == WRITE, ctx->sector); - if (r < 0) - break; - - ctx->sector++; } - return r; + return 0; } static void dm_crypt_bio_destructor(struct bio *bio) @@ -555,6 +618,11 @@ static void kcryptd_io_read(struct dm_crypt_io *io) static void kcryptd_io_write(struct dm_crypt_io *io) { + struct bio *clone = io->ctx.bio_out; + struct crypt_config *cc = io->target->private; + + generic_make_request(clone); + wake_up(&cc->writeq); } static void kcryptd_io(struct work_struct *work) @@ -575,7 +643,8 @@ static void kcryptd_queue_io(struct dm_crypt_io *io) queue_work(cc->io_queue, &io->work); } -static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int error) +static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, + int error, int async) { struct bio *clone = io->ctx.bio_out; struct crypt_config *cc = io->target->private; @@ -593,8 +662,12 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int error) clone->bi_sector = cc->start + io->sector; io->sector += bio_sectors(clone); - atomic_inc(&io->pending); - generic_make_request(clone); + if (async) + kcryptd_queue_io(io); + else { + atomic_inc(&io->pending); + generic_make_request(clone); + } } static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io) @@ -622,13 +695,21 @@ static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io) r = crypt_convert(cc, &io->ctx); - kcryptd_crypt_write_io_submit(io, r); - if (unlikely(r < 0)) - return; + if (atomic_dec_and_test(&io->ctx.pending)) { + /* processed, no running async crypto */ + kcryptd_crypt_write_io_submit(io, r, 0); + if (unlikely(r < 0)) + return; + } else + atomic_inc(&io->pending); /* out of memory -> run queues */ - if (unlikely(remaining)) + if (unlikely(remaining)) { + /* wait for async crypto then reinitialize pending */ + wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); + atomic_set(&io->ctx.pending, 1); congestion_wait(WRITE, HZ/100); + } } } @@ -660,12 +741,40 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) struct crypt_config *cc = io->target->private; int r = 0; + atomic_inc(&io->pending); + crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, io->sector); r = crypt_convert(cc, &io->ctx); - kcryptd_crypt_read_done(io, r); + if (atomic_dec_and_test(&io->ctx.pending)) + kcryptd_crypt_read_done(io, r); + + crypt_dec_pending(io); +} + +static void kcryptd_async_done(struct crypto_async_request *async_req, + int error) +{ + struct convert_context *ctx = async_req->data; + struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); + struct crypt_config *cc = io->target->private; + + if (error == -EINPROGRESS) { + complete(&ctx->restart); + return; + } + + mempool_free(ablkcipher_request_cast(async_req), cc->req_pool); + + if (!atomic_dec_and_test(&ctx->pending)) + return; + + if (bio_data_dir(io->base_bio) == READ) + kcryptd_crypt_read_done(io, error); + else + kcryptd_crypt_write_io_submit(io, error, 1); } static void kcryptd_crypt(struct work_struct *work) @@ -759,7 +868,7 @@ static int crypt_wipe_key(struct crypt_config *cc) static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct crypt_config *cc; - struct crypto_blkcipher *tfm; + struct crypto_ablkcipher *tfm; char *tmp; char *cipher; char *chainmode; @@ -813,7 +922,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_cipher; } - tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); + tfm = crypto_alloc_ablkcipher(cc->cipher, 0, 0); if (IS_ERR(tfm)) { ti->error = "Error allocating crypto tfm"; goto bad_cipher; @@ -847,7 +956,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) goto bad_ivmode; - cc->iv_size = crypto_blkcipher_ivsize(tfm); + cc->iv_size = crypto_ablkcipher_ivsize(tfm); if (cc->iv_size) /* at least a 64 bit sector number should fit in our buffer */ cc->iv_size = max(cc->iv_size, @@ -867,6 +976,20 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_slab_pool; } + cc->dmreq_start = sizeof(struct ablkcipher_request); + cc->dmreq_start += crypto_ablkcipher_reqsize(tfm); + cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); + cc->dmreq_start += crypto_ablkcipher_alignmask(tfm) & + ~(crypto_tfm_ctx_alignment() - 1); + + cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + + sizeof(struct dm_crypt_request) + cc->iv_size); + if (!cc->req_pool) { + ti->error = "Cannot allocate crypt request mempool"; + goto bad_req_pool; + } + cc->req = NULL; + cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); if (!cc->page_pool) { ti->error = "Cannot allocate page mempool"; @@ -879,7 +1002,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_bs; } - if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) { + if (crypto_ablkcipher_setkey(tfm, cc->key, key_size) < 0) { ti->error = "Error setting key"; goto bad_device; } @@ -926,6 +1049,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_crypt_queue; } + init_waitqueue_head(&cc->writeq); ti->private = cc; return 0; @@ -940,12 +1064,14 @@ bad_device: bad_bs: mempool_destroy(cc->page_pool); bad_page_pool: + mempool_destroy(cc->req_pool); +bad_req_pool: mempool_destroy(cc->io_pool); bad_slab_pool: if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) cc->iv_gen_ops->dtr(cc); bad_ivmode: - crypto_free_blkcipher(tfm); + crypto_free_ablkcipher(tfm); bad_cipher: /* Must zero key material before freeing */ memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); @@ -960,14 +1086,18 @@ static void crypt_dtr(struct dm_target *ti) destroy_workqueue(cc->io_queue); destroy_workqueue(cc->crypt_queue); + if (cc->req) + mempool_free(cc->req, cc->req_pool); + bioset_free(cc->bs); mempool_destroy(cc->page_pool); + mempool_destroy(cc->req_pool); mempool_destroy(cc->io_pool); kfree(cc->iv_mode); if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) cc->iv_gen_ops->dtr(cc); - crypto_free_blkcipher(cc->tfm); + crypto_free_ablkcipher(cc->tfm); dm_put_device(ti, cc->dev); /* Must zero key material before freeing */