#define DM_MSG_PREFIX "crypt"
#define MESG_STR(x) x, sizeof(x)
-/*
- * per bio private data
- */
-struct dm_crypt_io {
- struct dm_target *target;
- struct bio *base_bio;
- struct work_struct work;
- atomic_t pending;
- int error;
-};
-
/*
* context holding the current state of a multi-part conversion
*/
unsigned int idx_in;
unsigned int idx_out;
sector_t sector;
- int write;
+};
+
+/*
+ * per bio private data
+ */
+struct dm_crypt_io {
+ struct dm_target *target;
+ struct bio *base_bio;
+ struct work_struct work;
+
+ struct convert_context ctx;
+
+ atomic_t pending;
+ int error;
};
struct crypt_config;
return -ENOMEM;
}
- sg_set_buf(&sg, cc->key, cc->key_size);
+ sg_init_one(&sg, cc->key, cc->key_size);
desc.tfm = hash_tfm;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
static void crypt_convert_init(struct crypt_config *cc,
struct convert_context *ctx,
struct bio *bio_out, struct bio *bio_in,
- sector_t sector, int write)
+ sector_t sector)
{
ctx->bio_in = bio_in;
ctx->bio_out = bio_out;
ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
ctx->sector = sector + cc->iv_offset;
- ctx->write = write;
}
/*
}
r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,
- ctx->write, ctx->sector);
+ bio_data_dir(ctx->bio_in) == WRITE, ctx->sector);
if (r < 0)
break;
struct bio *clone;
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
- unsigned int i;
+ unsigned i, len;
+ struct page *page;
clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
if (!clone)
clone_init(io, clone);
for (i = 0; i < nr_iovecs; i++) {
- struct bio_vec *bv = bio_iovec_idx(clone, i);
-
- bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
- if (!bv->bv_page)
+ page = mempool_alloc(cc->page_pool, gfp_mask);
+ if (!page)
break;
/*
if (i == (MIN_BIO_PAGES - 1))
gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
- bv->bv_offset = 0;
- if (size > PAGE_SIZE)
- bv->bv_len = PAGE_SIZE;
- else
- bv->bv_len = size;
+ len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
+
+ if (!bio_add_page(clone, page, len, 0)) {
+ mempool_free(page, cc->page_pool);
+ break;
+ }
- clone->bi_size += bv->bv_len;
- clone->bi_vcnt++;
- size -= bv->bv_len;
+ size -= len;
}
if (!clone->bi_size) {
* One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer.
*/
-static void crypt_dec_pending(struct dm_crypt_io *io, int error)
+static void crypt_dec_pending(struct dm_crypt_io *io)
{
- struct crypt_config *cc = (struct crypt_config *) io->target->private;
-
- if (error < 0)
- io->error = error;
+ struct crypt_config *cc = io->target->private;
if (!atomic_dec_and_test(&io->pending))
return;
bio_endio(io->base_bio, io->error);
-
mempool_free(io, cc->io_pool);
}
{
struct dm_crypt_io *io = clone->bi_private;
struct crypt_config *cc = io->target->private;
- unsigned read_io = bio_data_dir(clone) == READ;
+ unsigned rw = bio_data_dir(clone);
+
+ if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
+ error = -EIO;
/*
* free the processed pages
*/
- if (!read_io) {
+ if (rw == WRITE)
crypt_free_buffer_pages(cc, clone);
- goto out;
- }
- if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) {
- error = -EIO;
- goto out;
+ bio_put(clone);
+
+ if (rw == READ && !error) {
+ kcryptd_queue_crypt(io);
+ return;
}
- bio_put(clone);
- kcryptd_queue_crypt(io);
- return;
+ if (unlikely(error))
+ io->error = error;
-out:
- bio_put(clone);
- crypt_dec_pending(io, error);
+ crypt_dec_pending(io);
}
static void clone_init(struct dm_crypt_io *io, struct bio *clone)
*/
clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
if (unlikely(!clone)) {
- crypt_dec_pending(io, -ENOMEM);
+ io->error = -ENOMEM;
+ crypt_dec_pending(io);
return;
}
struct crypt_config *cc = io->target->private;
struct bio *base_bio = io->base_bio;
struct bio *clone;
- struct convert_context ctx;
unsigned remaining = base_bio->bi_size;
sector_t sector = base_bio->bi_sector - io->target->begin;
atomic_inc(&io->pending);
- crypt_convert_init(cc, &ctx, NULL, base_bio, sector, 1);
+ crypt_convert_init(cc, &io->ctx, NULL, base_bio, sector);
/*
* The allocated buffers can be smaller than the whole bio,
while (remaining) {
clone = crypt_alloc_buffer(io, remaining);
if (unlikely(!clone)) {
- crypt_dec_pending(io, -ENOMEM);
+ io->error = -ENOMEM;
+ crypt_dec_pending(io);
return;
}
- ctx.bio_out = clone;
- ctx.idx_out = 0;
+ io->ctx.bio_out = clone;
+ io->ctx.idx_out = 0;
- if (unlikely(crypt_convert(cc, &ctx) < 0)) {
+ if (unlikely(crypt_convert(cc, &io->ctx) < 0)) {
crypt_free_buffer_pages(cc, clone);
bio_put(clone);
- crypt_dec_pending(io, -EIO);
+ io->error = -EIO;
+ crypt_dec_pending(io);
return;
}
/* crypt_convert should have filled the clone bio */
- BUG_ON(ctx.idx_out < clone->bi_vcnt);
+ BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
clone->bi_sector = cc->start + sector;
remaining -= clone->bi_size;
}
}
+static void crypt_read_done(struct dm_crypt_io *io, int error)
+{
+ if (unlikely(error < 0))
+ io->error = -EIO;
+
+ crypt_dec_pending(io);
+}
+
static void process_read_endio(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->target->private;
- struct convert_context ctx;
+ int r = 0;
+
+ crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
+ io->base_bio->bi_sector - io->target->begin);
- crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio,
- io->base_bio->bi_sector - io->target->begin, 0);
+ r = crypt_convert(cc, &io->ctx);
- crypt_dec_pending(io, crypt_convert(cc, &ctx));
+ crypt_read_done(io, r);
}
static void kcryptd_do_work(struct work_struct *work)