/*
* per bio private data
*/
-struct crypt_io {
+struct dm_crypt_io {
struct dm_target *target;
struct bio *base_bio;
struct work_struct work;
static struct kmem_cache *_crypt_io_pool;
-static void clone_init(struct crypt_io *, struct bio *);
+static void clone_init(struct dm_crypt_io *, struct bio *);
/*
* Different IV generation algorithms:
* benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
* (needed for LRW-32-AES and possible other narrow block modes)
*
+ * null: the initial vector is always zero. Provides compatibility with
+ * obsolete loop_fish2 devices. Do not use for new devices.
+ *
* plumb: unimplemented, see:
* http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
*/
return 0;
}
+static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
+{
+ memset(iv, 0, cc->iv_size);
+
+ return 0;
+}
+
static struct crypt_iv_operations crypt_iv_plain_ops = {
.generator = crypt_iv_plain_gen
};
.generator = crypt_iv_benbi_gen
};
+static struct crypt_iv_operations crypt_iv_null_ops = {
+ .generator = crypt_iv_null_gen
+};
+
static int
crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
struct scatterlist *in, unsigned int length,
static void dm_crypt_bio_destructor(struct bio *bio)
{
- struct crypt_io *io = bio->bi_private;
+ struct dm_crypt_io *io = bio->bi_private;
struct crypt_config *cc = io->target->private;
bio_free(bio, cc->bs);
* This should never violate the device limitations
* May return a smaller bio when running out of pages
*/
-static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size)
+static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
{
struct crypt_config *cc = io->target->private;
struct bio *clone;
* One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer.
*/
-static void dec_pending(struct crypt_io *io, int error)
+static void dec_pending(struct dm_crypt_io *io, int error)
{
struct crypt_config *cc = (struct crypt_config *) io->target->private;
static struct workqueue_struct *_kcryptd_workqueue;
static void kcryptd_do_work(struct work_struct *work);
-static void kcryptd_queue_io(struct crypt_io *io)
+static void kcryptd_queue_io(struct dm_crypt_io *io)
{
INIT_WORK(&io->work, kcryptd_do_work);
queue_work(_kcryptd_workqueue, &io->work);
static int crypt_endio(struct bio *clone, unsigned int done, int error)
{
- struct crypt_io *io = clone->bi_private;
+ struct dm_crypt_io *io = clone->bi_private;
struct crypt_config *cc = io->target->private;
unsigned read_io = bio_data_dir(clone) == READ;
return error;
}
-static void clone_init(struct crypt_io *io, struct bio *clone)
+static void clone_init(struct dm_crypt_io *io, struct bio *clone)
{
struct crypt_config *cc = io->target->private;
clone->bi_destructor = dm_crypt_bio_destructor;
}
-static void process_read(struct crypt_io *io)
+static void process_read(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->target->private;
struct bio *base_bio = io->base_bio;
generic_make_request(clone);
}
-static void process_write(struct crypt_io *io)
+static void process_write(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->target->private;
struct bio *base_bio = io->base_bio;
}
}
-static void process_read_endio(struct crypt_io *io)
+static void process_read_endio(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->target->private;
struct convert_context ctx;
static void kcryptd_do_work(struct work_struct *work)
{
- struct crypt_io *io = container_of(work, struct crypt_io, work);
+ struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
if (io->post_process)
process_read_endio(io);
cc->iv_gen_ops = &crypt_iv_essiv_ops;
else if (strcmp(ivmode, "benbi") == 0)
cc->iv_gen_ops = &crypt_iv_benbi_ops;
+ else if (strcmp(ivmode, "null") == 0)
+ cc->iv_gen_ops = &crypt_iv_null_ops;
else {
ti->error = "Invalid IV mode";
goto bad2;
{
struct crypt_config *cc = (struct crypt_config *) ti->private;
+ flush_workqueue(_kcryptd_workqueue);
+
bioset_free(cc->bs);
mempool_destroy(cc->page_pool);
mempool_destroy(cc->io_pool);
union map_info *map_context)
{
struct crypt_config *cc = ti->private;
- struct crypt_io *io;
-
- if (bio_barrier(bio))
- return -EOPNOTSUPP;
+ struct dm_crypt_io *io;
io = mempool_alloc(cc->io_pool, GFP_NOIO);
io->target = ti;
static struct target_type crypt_target = {
.name = "crypt",
- .version= {1, 3, 0},
+ .version= {1, 5, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
{
int r;
- _crypt_io_pool = kmem_cache_create("dm-crypt_io",
- sizeof(struct crypt_io),
- 0, 0, NULL, NULL);
+ _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
if (!_crypt_io_pool)
return -ENOMEM;