aboutsummaryrefslogtreecommitdiff
path: root/drivers/md/dm-crypt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r--drivers/md/dm-crypt.c91
1 files changed, 40 insertions, 51 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d8121234c34..7b0fcfc9eaa 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -33,7 +33,6 @@
struct crypt_io {
struct dm_target *target;
struct bio *base_bio;
- struct bio *first_clone;
struct work_struct work;
atomic_t pending;
int error;
@@ -107,6 +106,8 @@ struct crypt_config {
static struct kmem_cache *_crypt_io_pool;
+static void clone_init(struct crypt_io *, struct bio *);
+
/*
* Different IV generation algorithms:
*
@@ -120,6 +121,9 @@ static struct kmem_cache *_crypt_io_pool;
* benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
* (needed for LRW-32-AES and possible other narrow block modes)
*
+ * null: the initial vector is always zero. Provides compatibility with
+ * obsolete loop_fish2 devices. Do not use for new devices.
+ *
* plumb: unimplemented, see:
* http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
*/
@@ -256,6 +260,13 @@ static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
return 0;
}
+static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
+{
+ memset(iv, 0, cc->iv_size);
+
+ return 0;
+}
+
static struct crypt_iv_operations crypt_iv_plain_ops = {
.generator = crypt_iv_plain_gen
};
@@ -272,6 +283,10 @@ static struct crypt_iv_operations crypt_iv_benbi_ops = {
.generator = crypt_iv_benbi_gen
};
+static struct crypt_iv_operations crypt_iv_null_ops = {
+ .generator = crypt_iv_null_gen
+};
+
static int
crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
struct scatterlist *in, unsigned int length,
@@ -378,36 +393,21 @@ static int crypt_convert(struct crypt_config *cc,
* This should never violate the device limitations
* May return a smaller bio when running out of pages
*/
-static struct bio *
-crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
- struct bio *base_bio, unsigned int *bio_vec_idx)
+static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size)
{
+ struct crypt_config *cc = io->target->private;
struct bio *clone;
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
unsigned int i;
- if (base_bio) {
- clone = bio_alloc_bioset(GFP_NOIO, base_bio->bi_max_vecs, cc->bs);
- __bio_clone(clone, base_bio);
- } else
- clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
-
+ clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
if (!clone)
return NULL;
- clone->bi_destructor = dm_crypt_bio_destructor;
-
- /* if the last bio was not complete, continue where that one ended */
- clone->bi_idx = *bio_vec_idx;
- clone->bi_vcnt = *bio_vec_idx;
- clone->bi_size = 0;
- clone->bi_flags &= ~(1 << BIO_SEG_VALID);
-
- /* clone->bi_idx pages have already been allocated */
- size -= clone->bi_idx * PAGE_SIZE;
+ clone_init(io, clone);
- for (i = clone->bi_idx; i < nr_iovecs; i++) {
+ for (i = 0; i < nr_iovecs; i++) {
struct bio_vec *bv = bio_iovec_idx(clone, i);
bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
@@ -419,7 +419,7 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
* return a partially allocated bio, the caller will then try
* to allocate additional bios while submitting this partial bio
*/
- if ((i - clone->bi_idx) == (MIN_BIO_PAGES - 1))
+ if (i == (MIN_BIO_PAGES - 1))
gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
bv->bv_offset = 0;
@@ -438,12 +438,6 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
return NULL;
}
- /*
- * Remember the last bio_vec allocated to be able
- * to correctly continue after the splitting.
- */
- *bio_vec_idx = clone->bi_vcnt;
-
return clone;
}
@@ -495,9 +489,6 @@ static void dec_pending(struct crypt_io *io, int error)
if (!atomic_dec_and_test(&io->pending))
return;
- if (io->first_clone)
- bio_put(io->first_clone);
-
bio_endio(io->base_bio, io->base_bio->bi_size, io->error);
mempool_free(io, cc->io_pool);
@@ -562,6 +553,7 @@ static void clone_init(struct crypt_io *io, struct bio *clone)
clone->bi_end_io = crypt_endio;
clone->bi_bdev = cc->dev->bdev;
clone->bi_rw = io->base_bio->bi_rw;
+ clone->bi_destructor = dm_crypt_bio_destructor;
}
static void process_read(struct crypt_io *io)
@@ -585,7 +577,6 @@ static void process_read(struct crypt_io *io)
}
clone_init(io, clone);
- clone->bi_destructor = dm_crypt_bio_destructor;
clone->bi_idx = 0;
clone->bi_vcnt = bio_segments(base_bio);
clone->bi_size = base_bio->bi_size;
@@ -604,7 +595,6 @@ static void process_write(struct crypt_io *io)
struct convert_context ctx;
unsigned remaining = base_bio->bi_size;
sector_t sector = base_bio->bi_sector - io->target->begin;
- unsigned bvec_idx = 0;
atomic_inc(&io->pending);
@@ -615,14 +605,14 @@ static void process_write(struct crypt_io *io)
* so repeat the whole process until all the data can be handled.
*/
while (remaining) {
- clone = crypt_alloc_buffer(cc, base_bio->bi_size,
- io->first_clone, &bvec_idx);
+ clone = crypt_alloc_buffer(io, remaining);
if (unlikely(!clone)) {
dec_pending(io, -ENOMEM);
return;
}
ctx.bio_out = clone;
+ ctx.idx_out = 0;
if (unlikely(crypt_convert(cc, &ctx) < 0)) {
crypt_free_buffer_pages(cc, clone, clone->bi_size);
@@ -631,31 +621,26 @@ static void process_write(struct crypt_io *io)
return;
}
- clone_init(io, clone);
- clone->bi_sector = cc->start + sector;
-
- if (!io->first_clone) {
- /*
- * hold a reference to the first clone, because it
- * holds the bio_vec array and that can't be freed
- * before all other clones are released
- */
- bio_get(clone);
- io->first_clone = clone;
- }
+ /* crypt_convert should have filled the clone bio */
+ BUG_ON(ctx.idx_out < clone->bi_vcnt);
+ clone->bi_sector = cc->start + sector;
remaining -= clone->bi_size;
sector += bio_sectors(clone);
- /* prevent bio_put of first_clone */
+ /* Grab another reference to the io struct
+ * before we kick off the request */
if (remaining)
atomic_inc(&io->pending);
generic_make_request(clone);
+ /* Do not reference clone after this - it
+ * may be gone already. */
+
/* out of memory -> run queues */
if (remaining)
- congestion_wait(bio_data_dir(clone), HZ/100);
+ congestion_wait(WRITE, HZ/100);
}
}
@@ -832,6 +817,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->iv_gen_ops = &crypt_iv_essiv_ops;
else if (strcmp(ivmode, "benbi") == 0)
cc->iv_gen_ops = &crypt_iv_benbi_ops;
+ else if (strcmp(ivmode, "null") == 0)
+ cc->iv_gen_ops = &crypt_iv_null_ops;
else {
ti->error = "Invalid IV mode";
goto bad2;
@@ -954,10 +941,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
struct crypt_config *cc = ti->private;
struct crypt_io *io;
+ if (bio_barrier(bio))
+ return -EOPNOTSUPP;
+
io = mempool_alloc(cc->io_pool, GFP_NOIO);
io->target = ti;
io->base_bio = bio;
- io->first_clone = NULL;
io->error = io->post_process = 0;
atomic_set(&io->pending, 0);
kcryptd_queue_io(io);
@@ -1057,7 +1046,7 @@ error:
static struct target_type crypt_target = {
.name = "crypt",
- .version= {1, 3, 0},
+ .version= {1, 5, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,