* Different IV generation algorithms:
*
* plain: the initial vector is the 32-bit little-endian version of the sector
- * number, padded with zeros if neccessary.
+ * number, padded with zeros if necessary.
*
* essiv: "encrypted sector|salt initial vector", the sector number is
* encrypted with the bulk cipher using a salt as key. The salt
return -ENOMEM;
}
- sg_set_buf(&sg, cc->key, cc->key_size);
+ sg_init_one(&sg, cc->key, cc->key_size);
desc.tfm = hash_tfm;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
ctx->idx_out < ctx->bio_out->bi_vcnt) {
struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
- struct scatterlist sg_in = {
- .page = bv_in->bv_page,
- .offset = bv_in->bv_offset + ctx->offset_in,
- .length = 1 << SECTOR_SHIFT
- };
- struct scatterlist sg_out = {
- .page = bv_out->bv_page,
- .offset = bv_out->bv_offset + ctx->offset_out,
- .length = 1 << SECTOR_SHIFT
- };
+ struct scatterlist sg_in, sg_out;
+
+ sg_init_table(&sg_in, 1);
+ sg_set_page(&sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, bv_in->bv_offset + ctx->offset_in);
+
+ sg_init_table(&sg_out, 1);
+ sg_set_page(&sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, bv_out->bv_offset + ctx->offset_out);
ctx->offset_in += sg_in.length;
if (ctx->offset_in >= bv_in->bv_len) {
struct bio *clone;
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
- unsigned int i;
+ unsigned i, len;
+ struct page *page;
clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
if (!clone)
clone_init(io, clone);
for (i = 0; i < nr_iovecs; i++) {
- struct bio_vec *bv = bio_iovec_idx(clone, i);
-
- bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
- if (!bv->bv_page)
+ page = mempool_alloc(cc->page_pool, gfp_mask);
+ if (!page)
break;
/*
if (i == (MIN_BIO_PAGES - 1))
gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
- bv->bv_offset = 0;
- if (size > PAGE_SIZE)
- bv->bv_len = PAGE_SIZE;
- else
- bv->bv_len = size;
+ len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
+
+ if (!bio_add_page(clone, page, len, 0)) {
+ mempool_free(page, cc->page_pool);
+ break;
+ }
- clone->bi_size += bv->bv_len;
- clone->bi_vcnt++;
- size -= bv->bv_len;
+ size -= len;
}
if (!clone->bi_size) {
* One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer.
*/
-static void dec_pending(struct dm_crypt_io *io, int error)
+static void crypt_dec_pending(struct dm_crypt_io *io, int error)
{
struct crypt_config *cc = (struct crypt_config *) io->target->private;
struct crypt_config *cc = io->target->private;
unsigned read_io = bio_data_dir(clone) == READ;
+ if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
+ error = -EIO;
+
/*
* free the processed pages
*/
goto out;
}
- if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) {
- error = -EIO;
+ if (unlikely(error))
goto out;
- }
bio_put(clone);
kcryptd_queue_crypt(io);
out:
bio_put(clone);
- dec_pending(io, error);
+ crypt_dec_pending(io, error);
}
static void clone_init(struct dm_crypt_io *io, struct bio *clone)
*/
clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
if (unlikely(!clone)) {
- dec_pending(io, -ENOMEM);
+ crypt_dec_pending(io, -ENOMEM);
return;
}
while (remaining) {
clone = crypt_alloc_buffer(io, remaining);
if (unlikely(!clone)) {
- dec_pending(io, -ENOMEM);
+ crypt_dec_pending(io, -ENOMEM);
return;
}
if (unlikely(crypt_convert(cc, &ctx) < 0)) {
crypt_free_buffer_pages(cc, clone);
bio_put(clone);
- dec_pending(io, -EIO);
+ crypt_dec_pending(io, -EIO);
return;
}
crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio,
io->base_bio->bi_sector - io->target->begin, 0);
- dec_pending(io, crypt_convert(cc, &ctx));
+ crypt_dec_pending(io, crypt_convert(cc, &ctx));
}
static void kcryptd_do_work(struct work_struct *work)