diff options
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r-- | drivers/md/dm-crypt.c | 590 |
1 files changed, 383 insertions, 207 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index bdbd34993a80..4c2471ee054a 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1,6 +1,7 @@ /* * Copyright (C) 2003 Christophe Saout <christophe@saout.de> * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> + * Copyright (C) 2006 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ @@ -15,24 +16,28 @@ #include <linux/slab.h> #include <linux/crypto.h> #include <linux/workqueue.h> +#include <linux/backing-dev.h> #include <asm/atomic.h> #include <linux/scatterlist.h> #include <asm/page.h> +#include <asm/unaligned.h> #include "dm.h" #define DM_MSG_PREFIX "crypt" +#define MESG_STR(x) x, sizeof(x) /* * per bio private data */ struct crypt_io { struct dm_target *target; - struct bio *bio; + struct bio *base_bio; struct bio *first_clone; struct work_struct work; atomic_t pending; int error; + int post_process; }; /* @@ -63,6 +68,7 @@ struct crypt_iv_operations { * Crypt: maps a linear range of a block device * and encrypts / decrypts at the same time. */ +enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; struct crypt_config { struct dm_dev *dev; sector_t start; @@ -73,28 +79,33 @@ struct crypt_config { */ mempool_t *io_pool; mempool_t *page_pool; + struct bio_set *bs; /* * crypto related data */ struct crypt_iv_operations *iv_gen_ops; char *iv_mode; - struct crypto_cipher *iv_gen_private; + union { + struct crypto_cipher *essiv_tfm; + int benbi_shift; + } iv_gen_private; sector_t iv_offset; unsigned int iv_size; char cipher[CRYPTO_MAX_ALG_NAME]; char chainmode[CRYPTO_MAX_ALG_NAME]; struct crypto_blkcipher *tfm; + unsigned long flags; unsigned int key_size; u8 key[0]; }; -#define MIN_IOS 256 +#define MIN_IOS 16 #define MIN_POOL_PAGES 32 #define MIN_BIO_PAGES 8 -static kmem_cache_t *_crypt_io_pool; +static struct kmem_cache *_crypt_io_pool; /* * Different IV generation algorithms: @@ -106,6 +117,9 @@ static kmem_cache_t *_crypt_io_pool; * encrypted with the bulk cipher using a salt as key. The salt * should be derived from the bulk cipher's key via hashing. * + * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 + * (needed for LRW-32-AES and possible other narrow block modes) + * * plumb: unimplemented, see: * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 */ @@ -184,21 +198,61 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, } kfree(salt); - cc->iv_gen_private = essiv_tfm; + cc->iv_gen_private.essiv_tfm = essiv_tfm; return 0; } static void crypt_iv_essiv_dtr(struct crypt_config *cc) { - crypto_free_cipher(cc->iv_gen_private); - cc->iv_gen_private = NULL; + crypto_free_cipher(cc->iv_gen_private.essiv_tfm); + cc->iv_gen_private.essiv_tfm = NULL; } static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) { memset(iv, 0, cc->iv_size); *(u64 *)iv = cpu_to_le64(sector); - crypto_cipher_encrypt_one(cc->iv_gen_private, iv, iv); + crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv); + return 0; +} + +static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, + const char *opts) +{ + unsigned int bs = crypto_blkcipher_blocksize(cc->tfm); + int log = ilog2(bs); + + /* we need to calculate how far we must shift the sector count + * to get the cipher block count, we use this shift in _gen */ + + if (1 << log != bs) { + ti->error = "cypher blocksize is not a power of 2"; + return -EINVAL; + } + + if (log > 9) { + ti->error = "cypher blocksize is > 512"; + return -EINVAL; + } + + cc->iv_gen_private.benbi_shift = 9 - log; + + return 0; +} + +static void crypt_iv_benbi_dtr(struct crypt_config *cc) +{ +} + +static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) +{ + __be64 val; + + memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ + + val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1); + put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); + return 0; } @@ -212,13 +266,18 @@ static struct crypt_iv_operations crypt_iv_essiv_ops = { .generator = crypt_iv_essiv_gen }; +static struct crypt_iv_operations crypt_iv_benbi_ops = { + .ctr = crypt_iv_benbi_ctr, + .dtr = crypt_iv_benbi_dtr, + .generator = crypt_iv_benbi_gen +}; static int crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, struct scatterlist *in, unsigned int length, int write, sector_t sector) { - u8 iv[cc->iv_size]; + u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64)))); struct blkcipher_desc desc = { .tfm = cc->tfm, .info = iv, @@ -306,6 +365,14 @@ static int crypt_convert(struct crypt_config *cc, return r; } + static void dm_crypt_bio_destructor(struct bio *bio) + { + struct crypt_io *io = bio->bi_private; + struct crypt_config *cc = io->target->private; + + bio_free(bio, cc->bs); + } + /* * Generate a new unfragmented bio with the given size * This should never violate the device limitations @@ -315,34 +382,33 @@ static struct bio * crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, struct bio *base_bio, unsigned int *bio_vec_idx) { - struct bio *bio; + struct bio *clone; unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; unsigned int i; - /* - * Use __GFP_NOMEMALLOC to tell the VM to act less aggressively and - * to fail earlier. This is not necessary but increases throughput. - * FIXME: Is this really intelligent? - */ - if (base_bio) - bio = bio_clone(base_bio, GFP_NOIO|__GFP_NOMEMALLOC); - else - bio = bio_alloc(GFP_NOIO|__GFP_NOMEMALLOC, nr_iovecs); - if (!bio) + if (base_bio) { + clone = bio_alloc_bioset(GFP_NOIO, base_bio->bi_max_vecs, cc->bs); + __bio_clone(clone, base_bio); + } else + clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); + + if (!clone) return NULL; + clone->bi_destructor = dm_crypt_bio_destructor; + /* if the last bio was not complete, continue where that one ended */ - bio->bi_idx = *bio_vec_idx; - bio->bi_vcnt = *bio_vec_idx; - bio->bi_size = 0; - bio->bi_flags &= ~(1 << BIO_SEG_VALID); + clone->bi_idx = *bio_vec_idx; + clone->bi_vcnt = *bio_vec_idx; + clone->bi_size = 0; + clone->bi_flags &= ~(1 << BIO_SEG_VALID); - /* bio->bi_idx pages have already been allocated */ - size -= bio->bi_idx * PAGE_SIZE; + /* clone->bi_idx pages have already been allocated */ + size -= clone->bi_idx * PAGE_SIZE; - for(i = bio->bi_idx; i < nr_iovecs; i++) { - struct bio_vec *bv = bio_iovec_idx(bio, i); + for (i = clone->bi_idx; i < nr_iovecs; i++) { + struct bio_vec *bv = bio_iovec_idx(clone, i); bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask); if (!bv->bv_page) @@ -353,7 +419,7 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, * return a partially allocated bio, the caller will then try * to allocate additional bios while submitting this partial bio */ - if ((i - bio->bi_idx) == (MIN_BIO_PAGES - 1)) + if ((i - clone->bi_idx) == (MIN_BIO_PAGES - 1)) gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; bv->bv_offset = 0; @@ -362,13 +428,13 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, else bv->bv_len = size; - bio->bi_size += bv->bv_len; - bio->bi_vcnt++; + clone->bi_size += bv->bv_len; + clone->bi_vcnt++; size -= bv->bv_len; } - if (!bio->bi_size) { - bio_put(bio); + if (!clone->bi_size) { + bio_put(clone); return NULL; } @@ -376,13 +442,13 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, * Remember the last bio_vec allocated to be able * to correctly continue after the splitting. */ - *bio_vec_idx = bio->bi_vcnt; + *bio_vec_idx = clone->bi_vcnt; - return bio; + return clone; } static void crypt_free_buffer_pages(struct crypt_config *cc, - struct bio *bio, unsigned int bytes) + struct bio *clone, unsigned int bytes) { unsigned int i, start, end; struct bio_vec *bv; @@ -396,19 +462,19 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, * A fix to the bi_idx issue in the kernel is in the works, so * we will hopefully be able to revert to the cleaner solution soon. */ - i = bio->bi_vcnt - 1; - bv = bio_iovec_idx(bio, i); - end = (i << PAGE_SHIFT) + (bv->bv_offset + bv->bv_len) - bio->bi_size; + i = clone->bi_vcnt - 1; + bv = bio_iovec_idx(clone, i); + end = (i << PAGE_SHIFT) + (bv->bv_offset + bv->bv_len) - clone->bi_size; start = end - bytes; start >>= PAGE_SHIFT; - if (!bio->bi_size) - end = bio->bi_vcnt; + if (!clone->bi_size) + end = clone->bi_vcnt; else end >>= PAGE_SHIFT; - for(i = start; i < end; i++) { - bv = bio_iovec_idx(bio, i); + for (i = start; i < end; i++) { + bv = bio_iovec_idx(clone, i); BUG_ON(!bv->bv_page); mempool_free(bv->bv_page, cc->page_pool); bv->bv_page = NULL; @@ -432,7 +498,7 @@ static void dec_pending(struct crypt_io *io, int error) if (io->first_clone) bio_put(io->first_clone); - bio_endio(io->bio, io->bio->bi_size, io->error); + bio_endio(io->base_bio, io->base_bio->bi_size, io->error); mempool_free(io, cc->io_pool); } @@ -441,29 +507,179 @@ static void dec_pending(struct crypt_io *io, int error) * kcryptd: * * Needed because it would be very unwise to do decryption in an - * interrupt context, so bios returning from read requests get - * queued here. + * interrupt context. */ static struct workqueue_struct *_kcryptd_workqueue; +static void kcryptd_do_work(struct work_struct *work); -static void kcryptd_do_work(void *data) +static void kcryptd_queue_io(struct crypt_io *io) { - struct crypt_io *io = (struct crypt_io *) data; - struct crypt_config *cc = (struct crypt_config *) io->target->private; + INIT_WORK(&io->work, kcryptd_do_work); + queue_work(_kcryptd_workqueue, &io->work); +} + +static int crypt_endio(struct bio *clone, unsigned int done, int error) +{ + struct crypt_io *io = clone->bi_private; + struct crypt_config *cc = io->target->private; + unsigned read_io = bio_data_dir(clone) == READ; + + /* + * free the processed pages, even if + * it's only a partially completed write + */ + if (!read_io) + crypt_free_buffer_pages(cc, clone, done); + + /* keep going - not finished yet */ + if (unlikely(clone->bi_size)) + return 1; + + if (!read_io) + goto out; + + if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) { + error = -EIO; + goto out; + } + + bio_put(clone); + io->post_process = 1; + kcryptd_queue_io(io); + return 0; + +out: + bio_put(clone); + dec_pending(io, error); + return error; +} + +static void clone_init(struct crypt_io *io, struct bio *clone) +{ + struct crypt_config *cc = io->target->private; + + clone->bi_private = io; + clone->bi_end_io = crypt_endio; + clone->bi_bdev = cc->dev->bdev; + clone->bi_rw = io->base_bio->bi_rw; +} + +static void process_read(struct crypt_io *io) +{ + struct crypt_config *cc = io->target->private; + struct bio *base_bio = io->base_bio; + struct bio *clone; + sector_t sector = base_bio->bi_sector - io->target->begin; + + atomic_inc(&io->pending); + + /* + * The block layer might modify the bvec array, so always + * copy the required bvecs because we need the original + * one in order to decrypt the whole bio data *afterwards*. + */ + clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); + if (unlikely(!clone)) { + dec_pending(io, -ENOMEM); + return; + } + + clone_init(io, clone); + clone->bi_destructor = dm_crypt_bio_destructor; + clone->bi_idx = 0; + clone->bi_vcnt = bio_segments(base_bio); + clone->bi_size = base_bio->bi_size; + clone->bi_sector = cc->start + sector; + memcpy(clone->bi_io_vec, bio_iovec(base_bio), + sizeof(struct bio_vec) * clone->bi_vcnt); + + generic_make_request(clone); +} + +static void process_write(struct crypt_io *io) +{ + struct crypt_config *cc = io->target->private; + struct bio *base_bio = io->base_bio; + struct bio *clone; struct convert_context ctx; - int r; + unsigned remaining = base_bio->bi_size; + sector_t sector = base_bio->bi_sector - io->target->begin; + unsigned bvec_idx = 0; - crypt_convert_init(cc, &ctx, io->bio, io->bio, - io->bio->bi_sector - io->target->begin, 0); - r = crypt_convert(cc, &ctx); + atomic_inc(&io->pending); + + crypt_convert_init(cc, &ctx, NULL, base_bio, sector, 1); + + /* + * The allocated buffers can be smaller than the whole bio, + * so repeat the whole process until all the data can be handled. + */ + while (remaining) { + clone = crypt_alloc_buffer(cc, base_bio->bi_size, + io->first_clone, &bvec_idx); + if (unlikely(!clone)) { + dec_pending(io, -ENOMEM); + return; + } + + ctx.bio_out = clone; + + if (unlikely(crypt_convert(cc, &ctx) < 0)) { + crypt_free_buffer_pages(cc, clone, clone->bi_size); + bio_put(clone); + dec_pending(io, -EIO); + return; + } - dec_pending(io, r); + clone_init(io, clone); + clone->bi_sector = cc->start + sector; + + if (!io->first_clone) { + /* + * hold a reference to the first clone, because it + * holds the bio_vec array and that can't be freed + * before all other clones are released + */ + bio_get(clone); + io->first_clone = clone; + } + + remaining -= clone->bi_size; + sector += bio_sectors(clone); + + /* prevent bio_put of first_clone */ + if (remaining) + atomic_inc(&io->pending); + + generic_make_request(clone); + + /* out of memory -> run queues */ + if (remaining) + congestion_wait(bio_data_dir(clone), HZ/100); + } } -static void kcryptd_queue_io(struct crypt_io *io) +static void process_read_endio(struct crypt_io *io) { - INIT_WORK(&io->work, kcryptd_do_work, io); - queue_work(_kcryptd_workqueue, &io->work); + struct crypt_config *cc = io->target->private; + struct convert_context ctx; + + crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio, + io->base_bio->bi_sector - io->target->begin, 0); + + dec_pending(io, crypt_convert(cc, &ctx)); +} + +static void kcryptd_do_work(struct work_struct *work) +{ + struct crypt_io *io = container_of(work, struct crypt_io, work); + + if (io->post_process) + process_read_endio(io); + else if (bio_data_dir(io->base_bio) == READ) + process_read(io); + else + process_write(io); } /* @@ -477,7 +693,7 @@ static int crypt_decode_key(u8 *key, char *hex, unsigned int size) buffer[2] = '\0'; - for(i = 0; i < size; i++) { + for (i = 0; i < size; i++) { buffer[0] = *hex++; buffer[1] = *hex++; @@ -500,13 +716,38 @@ static void crypt_encode_key(char *hex, u8 *key, unsigned int size) { unsigned int i; - for(i = 0; i < size; i++) { + for (i = 0; i < size; i++) { sprintf(hex, "%02x", *key); hex += 2; key++; } } +static int crypt_set_key(struct crypt_config *cc, char *key) +{ + unsigned key_size = strlen(key) >> 1; + + if (cc->key_size && cc->key_size != key_size) + return -EINVAL; + + cc->key_size = key_size; /* initial settings */ + + if ((!key_size && strcmp(key, "-")) || + (key_size && crypt_decode_key(cc->key, key, key_size) < 0)) + return -EINVAL; + + set_bit(DM_CRYPT_KEY_VALID, &cc->flags); + + return 0; +} + +static int crypt_wipe_key(struct crypt_config *cc) +{ + clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); + memset(&cc->key, 0, cc->key_size * sizeof(u8)); + return 0; +} + /* * Construct an encryption mapping: * <cipher> <key> <iv_offset> <dev_path> <start> @@ -539,16 +780,14 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) key_size = strlen(argv[1]) >> 1; - cc = kmalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); + cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); if (cc == NULL) { ti->error = "Cannot allocate transparent encryption context"; return -ENOMEM; } - cc->key_size = key_size; - if ((!key_size && strcmp(argv[1], "-") != 0) || - (key_size && crypt_decode_key(cc->key, argv[1], key_size) < 0)) { + if (crypt_set_key(cc, argv[1])) { ti->error = "Error decoding key"; goto bad1; } @@ -581,7 +820,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->tfm = tfm; /* - * Choose ivmode. Valid modes: "plain", "essiv:<esshash>". + * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi". * See comments at iv code */ @@ -591,6 +830,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->iv_gen_ops = &crypt_iv_plain_ops; else if (strcmp(ivmode, "essiv") == 0) cc->iv_gen_ops = &crypt_iv_essiv_ops; + else if (strcmp(ivmode, "benbi") == 0) + cc->iv_gen_ops = &crypt_iv_benbi_ops; else { ti->error = "Invalid IV mode"; goto bad2; @@ -626,6 +867,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad4; } + cc->bs = bioset_create(MIN_IOS, MIN_IOS, 4); + if (!cc->bs) { + ti->error = "Cannot allocate crypt bioset"; + goto bad_bs; + } + if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) { ti->error = "Error setting key"; goto bad5; @@ -665,6 +912,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) return 0; bad5: + bioset_free(cc->bs); +bad_bs: mempool_destroy(cc->page_pool); bad4: mempool_destroy(cc->io_pool); @@ -684,6 +933,7 @@ static void crypt_dtr(struct dm_target *ti) { struct crypt_config *cc = (struct crypt_config *) ti->private; + bioset_free(cc->bs); mempool_destroy(cc->page_pool); mempool_destroy(cc->io_pool); @@ -698,155 +948,27 @@ static void crypt_dtr(struct dm_target *ti) kfree(cc); } -static int crypt_endio(struct bio *bio, unsigned int done, int error) -{ - struct crypt_io *io = (struct crypt_io *) bio->bi_private; - struct crypt_config *cc = (struct crypt_config *) io->target->private; - - if (bio_data_dir(bio) == WRITE) { - /* - * free the processed pages, even if - * it's only a partially completed write - */ - crypt_free_buffer_pages(cc, bio, done); - } - - if (bio->bi_size) - return 1; - - bio_put(bio); - - /* - * successful reads are decrypted by the worker thread - */ - if ((bio_data_dir(bio) == READ) - && bio_flagged(bio, BIO_UPTODATE)) { - kcryptd_queue_io(io); - return 0; - } - - dec_pending(io, error); - return error; -} - -static inline struct bio * -crypt_clone(struct crypt_config *cc, struct crypt_io *io, struct bio *bio, - sector_t sector, unsigned int *bvec_idx, - struct convert_context *ctx) -{ - struct bio *clone; - - if (bio_data_dir(bio) == WRITE) { - clone = crypt_alloc_buffer(cc, bio->bi_size, - io->first_clone, bvec_idx); - if (clone) { - ctx->bio_out = clone; - if (crypt_convert(cc, ctx) < 0) { - crypt_free_buffer_pages(cc, clone, - clone->bi_size); - bio_put(clone); - return NULL; - } - } - } else { - /* - * The block layer might modify the bvec array, so always - * copy the required bvecs because we need the original - * one in order to decrypt the whole bio data *afterwards*. - */ - clone = bio_alloc(GFP_NOIO, bio_segments(bio)); - if (clone) { - clone->bi_idx = 0; - clone->bi_vcnt = bio_segments(bio); - clone->bi_size = bio->bi_size; - memcpy(clone->bi_io_vec, bio_iovec(bio), - sizeof(struct bio_vec) * clone->bi_vcnt); - } - } - - if (!clone) - return NULL; - - clone->bi_private = io; - clone->bi_end_io = crypt_endio; - clone->bi_bdev = cc->dev->bdev; - clone->bi_sector = cc->start + sector; - clone->bi_rw = bio->bi_rw; - - return clone; -} - static int crypt_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) { - struct crypt_config *cc = (struct crypt_config *) ti->private; - struct crypt_io *io = mempool_alloc(cc->io_pool, GFP_NOIO); - struct convert_context ctx; - struct bio *clone; - unsigned int remaining = bio->bi_size; - sector_t sector = bio->bi_sector - ti->begin; - unsigned int bvec_idx = 0; + struct crypt_config *cc = ti->private; + struct crypt_io *io; + io = mempool_alloc(cc->io_pool, GFP_NOIO); io->target = ti; - io->bio = bio; + io->base_bio = bio; io->first_clone = NULL; - io->error = 0; - atomic_set(&io->pending, 1); /* hold a reference */ - - if (bio_data_dir(bio) == WRITE) - crypt_convert_init(cc, &ctx, NULL, bio, sector, 1); + io->error = io->post_process = 0; + atomic_set(&io->pending, 0); + kcryptd_queue_io(io); - /* - * The allocated buffers can be smaller than the whole bio, - * so repeat the whole process until all the data can be handled. - */ - while (remaining) { - clone = crypt_clone(cc, io, bio, sector, &bvec_idx, &ctx); - if (!clone) - goto cleanup; - - if (!io->first_clone) { - /* - * hold a reference to the first clone, because it - * holds the bio_vec array and that can't be freed - * before all other clones are released - */ - bio_get(clone); - io->first_clone = clone; - } - atomic_inc(&io->pending); - - remaining -= clone->bi_size; - sector += bio_sectors(clone); - - generic_make_request(clone); - - /* out of memory -> run queues */ - if (remaining) - blk_congestion_wait(bio_data_dir(clone), HZ/100); - } - - /* drop reference, clones could have returned before we reach this */ - dec_pending(io, 0); - return 0; - -cleanup: - if (io->first_clone) { - dec_pending(io, -ENOMEM); - return 0; - } - - /* if no bio has been dispatched yet, we can directly return the error */ - mempool_free(io, cc->io_pool); - return -ENOMEM; + return DM_MAPIO_SUBMITTED; } static int crypt_status(struct dm_target *ti, status_type_t type, char *result, unsigned int maxlen) { struct crypt_config *cc = (struct crypt_config *) ti->private; - const char *cipher; - const char *chainmode = NULL; unsigned int sz = 0; switch (type) { @@ -855,14 +977,11 @@ static int crypt_status(struct dm_target *ti, status_type_t type, break; case STATUSTYPE_TABLE: - cipher = crypto_blkcipher_name(cc->tfm); - - chainmode = cc->chainmode; - if (cc->iv_mode) - DMEMIT("%s-%s-%s ", cipher, chainmode, cc->iv_mode); + DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode, + cc->iv_mode); else - DMEMIT("%s-%s ", cipher, chainmode); + DMEMIT("%s-%s ", cc->cipher, cc->chainmode); if (cc->key_size > 0) { if ((maxlen - sz) < ((cc->key_size << 1) + 1)) @@ -883,14 +1002,71 @@ static int crypt_status(struct dm_target *ti, status_type_t type, return 0; } +static void crypt_postsuspend(struct dm_target *ti) +{ + struct crypt_config *cc = ti->private; + + set_bit(DM_CRYPT_SUSPENDED, &cc->flags); +} + +static int crypt_preresume(struct dm_target *ti) +{ + struct crypt_config *cc = ti->private; + + if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { + DMERR("aborting resume - crypt key is not set."); + return -EAGAIN; + } + + return 0; +} + +static void crypt_resume(struct dm_target *ti) +{ + struct crypt_config *cc = ti->private; + + clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); +} + +/* Message interface + * key set <key> + * key wipe + */ +static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) +{ + struct crypt_config *cc = ti->private; + + if (argc < 2) + goto error; + + if (!strnicmp(argv[0], MESG_STR("key"))) { + if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { + DMWARN("not suspended during key manipulation."); + return -EINVAL; + } + if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) + return crypt_set_key(cc, argv[2]); + if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) + return crypt_wipe_key(cc); + } + +error: + DMWARN("unrecognised message received."); + return -EINVAL; +} + static struct target_type crypt_target = { .name = "crypt", - .version= {1, 1, 0}, + .version= {1, 3, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, .map = crypt_map, .status = crypt_status, + .postsuspend = crypt_postsuspend, + .preresume = crypt_preresume, + .resume = crypt_resume, + .message = crypt_message, }; static int __init dm_crypt_init(void) |