From b673c3a8192e28f13e2050a4b82c1986be92cc15 Mon Sep 17 00:00:00 2001 From: Kazuo Ito Date: Tue, 21 Oct 2008 17:44:50 +0100 Subject: dm kcopyd: avoid queue shuffle Write throughput to LVM snapshot origin volume is an order of magnitude slower than those to LV without snapshots or snapshot target volumes, especially in the case of sequential writes with O_SYNC on. The following patch originally written by Kevin Jamieson and Jan Blunck and slightly modified for the current RCs by myself tries to improve the performance by modifying the behaviour of kcopyd, so that it pushes back an I/O job to the head of the job queue instead of the tail as process_jobs() currently does when it has to wait for free pages. This way, write requests aren't shuffled to cause extra seeks. I tested the patch against 2.6.27-rc5 and got the following results. The test is a dd command writing to snapshot origin followed by fsync to the file just created/updated. A couple of filesystem benchmarks gave me similar results in case of sequential writes, while random writes didn't suffer much. dd if=/dev/zero of= bs=4096 count=... [conv=notrunc when updating] 1) linux 2.6.27-rc5 without the patch, write to snapshot origin, average throughput (MB/s) 10M 100M 1000M create,dd 511.46 610.72 11.81 create,dd+fsync 7.10 6.77 8.13 update,dd 431.63 917.41 12.75 update,dd+fsync 7.79 7.43 8.12 compared with write throughput to LV without any snapshots, all dd+fsync and 1000 MiB writes perform very poorly. 10M 100M 1000M create,dd 555.03 608.98 123.29 create,dd+fsync 114.27 72.78 76.65 update,dd 152.34 1267.27 124.04 update,dd+fsync 130.56 77.81 77.84 2) linux 2.6.27-rc5 with the patch, write to snapshot origin, average throughput (MB/s) 10M 100M 1000M create,dd 537.06 589.44 46.21 create,dd+fsync 31.63 29.19 29.23 update,dd 487.59 897.65 37.76 update,dd+fsync 34.12 30.07 26.85 Although still not on par with plain LV performance - cannot be avoided because it's copy on write anyway - this simple patch successfully improves throughtput of dd+fsync while not affecting the rest. Signed-off-by: Jan Blunck Signed-off-by: Kazuo Ito Signed-off-by: Alasdair G Kergon Cc: stable@kernel.org --- drivers/md/dm-kcopyd.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 996802b8a45..8f153530d6d 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -268,6 +268,17 @@ static void push(struct list_head *jobs, struct kcopyd_job *job) spin_unlock_irqrestore(&kc->job_lock, flags); } + +static void push_head(struct list_head *jobs, struct kcopyd_job *job) +{ + unsigned long flags; + struct dm_kcopyd_client *kc = job->kc; + + spin_lock_irqsave(&kc->job_lock, flags); + list_add(&job->list, jobs); + spin_unlock_irqrestore(&kc->job_lock, flags); +} + /* * These three functions process 1 item from the corresponding * job list. @@ -398,7 +409,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, * We couldn't service this job ATM, so * push this job back onto the list. */ - push(jobs, job); + push_head(jobs, job); break; } -- cgit v1.2.3 From 7c5f78b9d7f21937e46c26db82976df4b459c95c Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 21 Oct 2008 17:44:51 +0100 Subject: dm snapshot: fix primary_pe race Fix a race condition with primary_pe ref_count handling. put_pending_exception runs under dm_snapshot->lock, it does atomic_dec_and_test on primary_pe->ref_count, and later does atomic_read primary_pe->ref_count. __origin_write does atomic_dec_and_test on primary_pe->ref_count without holding dm_snapshot->lock. This opens the following race condition: Assume two CPUs, CPU1 is executing put_pending_exception (and holding dm_snapshot->lock). CPU2 is executing __origin_write in parallel. primary_pe->ref_count == 2. CPU1: if (primary_pe && atomic_dec_and_test(&primary_pe->ref_count)) origin_bios = bio_list_get(&primary_pe->origin_bios); ... decrements primary_pe->ref_count to 1. Doesn't load origin_bios CPU2: if (first && atomic_dec_and_test(&primary_pe->ref_count)) { flush_bios(bio_list_get(&primary_pe->origin_bios)); free_pending_exception(primary_pe); /* If we got here, pe_queue is necessarily empty. */ return r; } ... decrements primary_pe->ref_count to 0, submits pending bios, frees primary_pe. CPU1: if (!primary_pe || primary_pe != pe) free_pending_exception(pe); ... this has no effect. if (primary_pe && !atomic_read(&primary_pe->ref_count)) free_pending_exception(primary_pe); ... sees ref_count == 0 (written by CPU 2), does double free !! This bug can happen only if someone is simultaneously writing to both the origin and the snapshot. If someone is writing only to the origin, __origin_write will submit kcopyd request after it decrements primary_pe->ref_count (so it can't happen that the finished copy races with primary_pe->ref_count decrementation). If someone is writing only to the snapshot, __origin_write isn't invoked at all and the race can't happen. The race happens when someone writes to the snapshot --- this creates pending_exception with primary_pe == NULL and starts copying. Then, someone writes to the same chunk in the snapshot, and __origin_write races with termination of already submitted request in pending_complete (that calls put_pending_exception). This race may be reason for bugs: http://bugzilla.kernel.org/show_bug.cgi?id=11636 https://bugzilla.redhat.com/show_bug.cgi?id=465825 The patch fixes the code to make sure that: 1. If atomic_dec_and_test(&primary_pe->ref_count) returns false, the process must no longer dereference primary_pe (because someone else may free it under us). 2. If atomic_dec_and_test(&primary_pe->ref_count) returns true, the process is responsible for freeing primary_pe. Signed-off-by: Mikulas Patocka Signed-off-by: Alasdair G Kergon Cc: stable@kernel.org --- drivers/md/dm-snap.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 6e5528aecc9..4ed9b7aaadb 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -824,8 +824,10 @@ static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) * the bios for the original write to the origin. */ if (primary_pe && - atomic_dec_and_test(&primary_pe->ref_count)) + atomic_dec_and_test(&primary_pe->ref_count)) { origin_bios = bio_list_get(&primary_pe->origin_bios); + free_pending_exception(primary_pe); + } /* * Free the pe if it's not linked to an origin write or if @@ -834,12 +836,6 @@ static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) if (!primary_pe || primary_pe != pe) free_pending_exception(pe); - /* - * Free the primary pe if nothing references it. - */ - if (primary_pe && !atomic_read(&primary_pe->ref_count)) - free_pending_exception(primary_pe); - return origin_bios; } -- cgit v1.2.3 From f68d4f3d394da5b1f69d855b8513f4256ccc803e Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 21 Oct 2008 17:44:53 +0100 Subject: dm snapshot: drop unused last_percent The last_percent field is unused - remove it. (It dates from when events were triggered as each X% filled up.) Signed-off-by: Mikulas Patocka Signed-off-by: Alasdair G Kergon --- drivers/md/dm-snap.c | 1 - drivers/md/dm-snap.h | 3 --- 2 files changed, 4 deletions(-) diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 4ed9b7aaadb..b2d9d1ac28a 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -600,7 +600,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) s->valid = 1; s->active = 0; - s->last_percent = 0; init_rwsem(&s->lock); spin_lock_init(&s->pe_lock); s->ti = ti; diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h index 292c15609ae..49c17bf12c2 100644 --- a/drivers/md/dm-snap.h +++ b/drivers/md/dm-snap.h @@ -158,9 +158,6 @@ struct dm_snapshot { /* Used for display of table */ char type; - /* The last percentage we notified */ - int last_percent; - mempool_t *pending_pool; struct exception_table pending; -- cgit v1.2.3 From 7c9e6c17325fab380fbe9c9787680ff7d4a51abd Mon Sep 17 00:00:00 2001 From: Alasdair G Kergon Date: Tue, 21 Oct 2008 17:44:55 +0100 Subject: dm exception store: refactor zero_area Use a separate buffer for writing zeroes to the on-disk snapshot exception store, make the updating of ps->current_area explicit and refactor the code in preparation for the fix in the next patch. No functional change. Signed-off-by: Alasdair G Kergon Signed-off-by: Mikulas Patocka Cc: stable@kernel.org --- drivers/md/dm-exception-store.c | 102 ++++++++++++++++++++++++++-------------- 1 file changed, 68 insertions(+), 34 deletions(-) diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 769ab677f8e..fe6cef8df20 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c @@ -104,6 +104,11 @@ struct pstore { */ void *area; + /* + * An area of zeros used to clear the next area. + */ + void *zero_area; + /* * Used to keep track of which metadata area the data in * 'chunk' refers to. @@ -149,6 +154,13 @@ static int alloc_area(struct pstore *ps) if (!ps->area) return r; + ps->zero_area = vmalloc(len); + if (!ps->zero_area) { + vfree(ps->area); + return r; + } + memset(ps->zero_area, 0, len); + return 0; } @@ -156,6 +168,8 @@ static void free_area(struct pstore *ps) { vfree(ps->area); ps->area = NULL; + vfree(ps->zero_area); + ps->zero_area = NULL; } struct mdata_req { @@ -220,25 +234,41 @@ static chunk_t area_location(struct pstore *ps, chunk_t area) * Read or write a metadata area. Remembering to skip the first * chunk which holds the header. */ -static int area_io(struct pstore *ps, chunk_t area, int rw) +static int area_io(struct pstore *ps, int rw) { int r; chunk_t chunk; - chunk = area_location(ps, area); + chunk = area_location(ps, ps->current_area); r = chunk_io(ps, chunk, rw, 0); if (r) return r; - ps->current_area = area; return 0; } -static int zero_area(struct pstore *ps, chunk_t area) +static void zero_memory_area(struct pstore *ps) { memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT); - return area_io(ps, area, WRITE); +} + +static int zero_disk_area(struct pstore *ps, chunk_t area) +{ + struct dm_io_region where = { + .bdev = ps->snap->cow->bdev, + .sector = ps->snap->chunk_size * area_location(ps, area), + .count = ps->snap->chunk_size, + }; + struct dm_io_request io_req = { + .bi_rw = WRITE, + .mem.type = DM_IO_VMA, + .mem.ptr.vma = ps->zero_area, + .client = ps->io_client, + .notify.fn = NULL, + }; + + return dm_io(&io_req, 1, &where, NULL); } static int read_header(struct pstore *ps, int *new_snapshot) @@ -411,15 +441,14 @@ static int insert_exceptions(struct pstore *ps, int *full) static int read_exceptions(struct pstore *ps) { - chunk_t area; int r, full = 1; /* * Keeping reading chunks and inserting exceptions until * we find a partially full area. */ - for (area = 0; full; area++) { - r = area_io(ps, area, READ); + for (ps->current_area = 0; full; ps->current_area++) { + r = area_io(ps, READ); if (r) return r; @@ -428,6 +457,8 @@ static int read_exceptions(struct pstore *ps) return r; } + ps->current_area--; + return 0; } @@ -486,12 +517,13 @@ static int persistent_read_metadata(struct exception_store *store) return r; } - r = zero_area(ps, 0); + ps->current_area = 0; + zero_memory_area(ps); + r = zero_disk_area(ps, 0); if (r) { - DMWARN("zero_area(0) failed"); + DMWARN("zero_disk_area(0) failed"); return r; } - } else { /* * Sanity checks. @@ -551,7 +583,6 @@ static void persistent_commit(struct exception_store *store, void (*callback) (void *, int success), void *callback_context) { - int r; unsigned int i; struct pstore *ps = get_info(store); struct disk_exception de; @@ -572,33 +603,36 @@ static void persistent_commit(struct exception_store *store, cb->context = callback_context; /* - * If there are no more exceptions in flight, or we have - * filled this metadata area we commit the exceptions to - * disk. + * If there are exceptions in flight and we have not yet + * filled this metadata area there's nothing more to do. */ - if (atomic_dec_and_test(&ps->pending_count) || - (ps->current_committed == ps->exceptions_per_area)) { - r = area_io(ps, ps->current_area, WRITE); - if (r) - ps->valid = 0; + if (!atomic_dec_and_test(&ps->pending_count) && + (ps->current_committed != ps->exceptions_per_area)) + return; - /* - * Have we completely filled the current area ? - */ - if (ps->current_committed == ps->exceptions_per_area) { - ps->current_committed = 0; - r = zero_area(ps, ps->current_area + 1); - if (r) - ps->valid = 0; - } + /* + * Commit exceptions to disk. + */ + if (area_io(ps, WRITE)) + ps->valid = 0; - for (i = 0; i < ps->callback_count; i++) { - cb = ps->callbacks + i; - cb->callback(cb->context, r == 0 ? 1 : 0); - } + /* + * Advance to the next area if this one is full. + */ + if (ps->current_committed == ps->exceptions_per_area) { + if (zero_disk_area(ps, ps->current_area + 1)) + ps->valid = 0; + ps->current_committed = 0; + ps->current_area++; + zero_memory_area(ps); + } - ps->callback_count = 0; + for (i = 0; i < ps->callback_count; i++) { + cb = ps->callbacks + i; + cb->callback(cb->context, ps->valid); } + + ps->callback_count = 0; } static void persistent_drop(struct exception_store *store) -- cgit v1.2.3 From 7acedc5b98a2fcff64f00c21110aae7d3ac2f7df Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 21 Oct 2008 17:44:56 +0100 Subject: dm exception store: fix misordered writes We must zero the next chunk on disk *before* writing out the current chunk, not after. Otherwise if the machine crashes at the wrong time, the "end of metadata" marker may be missing. Signed-off-by: Mikulas Patocka Signed-off-by: Alasdair G Kergon Cc: stable@kernel.org --- drivers/md/dm-exception-store.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index fe6cef8df20..6179bf70f98 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c @@ -610,18 +610,23 @@ static void persistent_commit(struct exception_store *store, (ps->current_committed != ps->exceptions_per_area)) return; + /* + * If we completely filled the current area, then wipe the next one. + */ + if ((ps->current_committed == ps->exceptions_per_area) && + zero_disk_area(ps, ps->current_area + 1)) + ps->valid = 0; + /* * Commit exceptions to disk. */ - if (area_io(ps, WRITE)) + if (ps->valid && area_io(ps, WRITE)) ps->valid = 0; /* * Advance to the next area if this one is full. */ if (ps->current_committed == ps->exceptions_per_area) { - if (zero_disk_area(ps, ps->current_area + 1)) - ps->valid = 0; ps->current_committed = 0; ps->current_area++; zero_memory_area(ps); -- cgit v1.2.3 From d63a5ce3c0d25c96bdadc78792e5b48b846e899d Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 21 Oct 2008 17:44:57 +0100 Subject: dm: publish array_too_big Move array_too_big to include/linux/device-mapper.h because it is used by targets. Remove the test from dm-raid1 as the number of mirror legs is limited such that it can never fail. (Even for stripes it seems rather unlikely.) Signed-off-by: Mikulas Patocka Signed-off-by: Alasdair G Kergon --- drivers/md/dm-raid1.c | 3 --- drivers/md/dm-stripe.c | 4 ++-- drivers/md/dm.h | 9 --------- include/linux/device-mapper.h | 3 +++ 4 files changed, 5 insertions(+), 14 deletions(-) diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 29913e42c4a..ecfd82169cb 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -1315,9 +1315,6 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors, size_t len; struct mirror_set *ms = NULL; - if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors)) - return NULL; - len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors); ms = kzalloc(len, GFP_KERNEL); diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index b745d8ac625..287e2458473 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -60,8 +60,8 @@ static inline struct stripe_c *alloc_context(unsigned int stripes) { size_t len; - if (array_too_big(sizeof(struct stripe_c), sizeof(struct stripe), - stripes)) + if (dm_array_too_big(sizeof(struct stripe_c), sizeof(struct stripe), + stripes)) return NULL; len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index cd189da2b2f..0ade60cdef4 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -62,15 +62,6 @@ void dm_put_target_type(struct target_type *t); int dm_target_iterate(void (*iter_func)(struct target_type *tt, void *param), void *param); -/*----------------------------------------------------------------- - * Useful inlines. - *---------------------------------------------------------------*/ -static inline int array_too_big(unsigned long fixed, unsigned long obj, - unsigned long num) -{ - return (num > (ULONG_MAX - fixed) / obj); -} - int dm_split_args(int *argc, char ***argvp, char *input); /* diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 08d783592b7..dfb30db475e 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -354,6 +354,9 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); */ #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) +#define dm_array_too_big(fixed, obj, num) \ + ((num) > (UINT_MAX - (fixed)) / (obj)) + static inline sector_t to_sector(unsigned long n) { return (n >> SECTOR_SHIFT); -- cgit v1.2.3 From 586e80e6ee0d137c7d79fbae183bb37bc60ee97e Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 21 Oct 2008 17:44:59 +0100 Subject: dm: remove dm header from targets Change #include "dm.h" to #include in all targets. Targets should not need direct access to internal DM structures. Signed-off-by: Mikulas Patocka Signed-off-by: Alasdair G Kergon --- drivers/md/dm-crypt.c | 2 +- drivers/md/dm-delay.c | 3 ++- drivers/md/dm-exception-store.c | 1 - drivers/md/dm-io.c | 2 +- drivers/md/dm-kcopyd.c | 1 + drivers/md/dm-linear.c | 2 +- drivers/md/dm-log.c | 2 +- drivers/md/dm-mpath.c | 3 ++- drivers/md/dm-path-selector.c | 3 ++- drivers/md/dm-raid1.c | 3 ++- drivers/md/dm-round-robin.c | 3 ++- drivers/md/dm-snap.h | 2 +- drivers/md/dm-stripe.c | 2 +- drivers/md/dm-zero.c | 2 +- 14 files changed, 18 insertions(+), 13 deletions(-) diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 682ef9e6acd..8939cba6e74 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -23,7 +23,7 @@ #include #include -#include "dm.h" +#include #define DM_MSG_PREFIX "crypt" #define MESG_STR(x) x, sizeof(x) diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index bdd37f881c4..848b381f117 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -13,7 +13,8 @@ #include #include -#include "dm.h" +#include + #include "dm-bio-list.h" #define DM_MSG_PREFIX "delay" diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 6179bf70f98..01590f3e000 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c @@ -7,7 +7,6 @@ * This file is released under the GPL. */ -#include "dm.h" #include "dm-snap.h" #include diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 4789c42d9a3..2fd6d445063 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -5,7 +5,7 @@ * This file is released under the GPL. */ -#include "dm.h" +#include #include #include diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 8f153530d6d..3073618269e 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include "dm.h" diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 6449bcdf84c..1b29e913675 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -5,12 +5,12 @@ */ #include "dm.h" - #include #include #include #include #include +#include #define DM_MSG_PREFIX "linear" diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 5b48478c79f..a8c0fc79ca7 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -12,7 +12,7 @@ #include #include -#include "dm.h" +#include #define DM_MSG_PREFIX "dirty region log" diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 9bf3460c554..abf6e8cfaed 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -5,7 +5,8 @@ * This file is released under the GPL. */ -#include "dm.h" +#include + #include "dm-path-selector.h" #include "dm-bio-list.h" #include "dm-bio-record.h" diff --git a/drivers/md/dm-path-selector.c b/drivers/md/dm-path-selector.c index ca1bb636a3e..96ea226155b 100644 --- a/drivers/md/dm-path-selector.c +++ b/drivers/md/dm-path-selector.c @@ -9,7 +9,8 @@ * Path selector registration. */ -#include "dm.h" +#include + #include "dm-path-selector.h" #include diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index ecfd82169cb..f358853af5c 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -4,7 +4,8 @@ * This file is released under the GPL. */ -#include "dm.h" +#include + #include "dm-bio-list.h" #include "dm-bio-record.h" diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c index 391dfa2ad43..cdfbf65b28c 100644 --- a/drivers/md/dm-round-robin.c +++ b/drivers/md/dm-round-robin.c @@ -9,7 +9,8 @@ * Round-robin path selector. */ -#include "dm.h" +#include + #include "dm-path-selector.h" #include diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h index 49c17bf12c2..f07315fe236 100644 --- a/drivers/md/dm-snap.h +++ b/drivers/md/dm-snap.h @@ -9,7 +9,7 @@ #ifndef DM_SNAPSHOT_H #define DM_SNAPSHOT_H -#include "dm.h" +#include #include "dm-bio-list.h" #include #include diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 287e2458473..a2d068dbe9e 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -4,7 +4,7 @@ * This file is released under the GPL. */ -#include "dm.h" +#include #include #include diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c index bdec206c404..cdbf126ec10 100644 --- a/drivers/md/dm-zero.c +++ b/drivers/md/dm-zero.c @@ -4,7 +4,7 @@ * This file is released under the GPL. */ -#include "dm.h" +#include #include #include -- cgit v1.2.3 From b635b00e0e159d858486fd899c4021d1d67757e2 Mon Sep 17 00:00:00 2001 From: Milan Broz Date: Tue, 21 Oct 2008 17:45:00 +0100 Subject: dm crypt: tidy sector Prepare local sector variable (offset) for later patch. Do not update io->sector for still-running I/O. No functional change. Signed-off-by: Milan Broz Signed-off-by: Alasdair G Kergon --- drivers/md/dm-crypt.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 8939cba6e74..ad98ded3008 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -688,7 +688,6 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, BUG_ON(io->ctx.idx_out < clone->bi_vcnt); clone->bi_sector = cc->start + io->sector; - io->sector += bio_sectors(clone); if (async) kcryptd_queue_io(io); @@ -703,13 +702,14 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) int crypt_finished; unsigned out_of_pages = 0; unsigned remaining = io->base_bio->bi_size; + sector_t sector = io->sector; int r; /* * Prevent io from disappearing until this function completes. */ crypt_inc_pending(io); - crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector); + crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); /* * The allocated buffers can be smaller than the whole bio, @@ -726,6 +726,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) io->ctx.idx_out = 0; remaining -= clone->bi_size; + sector += bio_sectors(clone); crypt_inc_pending(io); r = crypt_convert(cc, &io->ctx); @@ -741,6 +742,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) */ if (unlikely(r < 0)) break; + + io->sector = sector; } /* -- cgit v1.2.3 From 393b47ef23bbcf16890c907d0144b5a8ec4caebf Mon Sep 17 00:00:00 2001 From: Milan Broz Date: Tue, 21 Oct 2008 17:45:02 +0100 Subject: dm crypt: fix async split When writing io, dm-crypt has to allocate a new cloned bio and encrypt the data into newly-allocated pages attached to this bio. In rare cases, because of hw restrictions (e.g. physical segment limit) or memory pressure, sometimes more than one cloned bio has to be used, each processing a different fragment of the original. Currently there is one waitqueue which waits for one fragment to finish and continues processing the next fragment. But when using asynchronous crypto this doesn't work, because several fragments may be processed asynchronously or in parallel and there is only one crypt context that cannot be shared between the bio fragments. The result may be corruption of the data contained in the encrypted bio. The patch fixes this by allocating new dm_crypt_io structs (with new crypto contexts) and running them independently. The fragments contains a pointer to the base dm_crypt_io struct to handle reference counting, so the base one is properly deallocated after all the fragments are finished. In a low memory situation, this only uses one additional object from the mempool. If the mempool is empty, the next allocation simple waits for previous fragments to complete. Signed-off-by: Milan Broz Signed-off-by: Alasdair G Kergon --- drivers/md/dm-crypt.c | 41 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index ad98ded3008..046ee516074 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -56,6 +56,7 @@ struct dm_crypt_io { atomic_t pending; int error; sector_t sector; + struct dm_crypt_io *base_io; }; struct dm_crypt_request { @@ -534,6 +535,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, io->base_bio = bio; io->sector = sector; io->error = 0; + io->base_io = NULL; atomic_set(&io->pending, 0); return io; @@ -547,6 +549,7 @@ static void crypt_inc_pending(struct dm_crypt_io *io) /* * One of the bios was finished. Check for completion of * the whole request and correctly clean up the buffer. + * If base_io is set, wait for the last fragment to complete. */ static void crypt_dec_pending(struct dm_crypt_io *io) { @@ -555,7 +558,14 @@ static void crypt_dec_pending(struct dm_crypt_io *io) if (!atomic_dec_and_test(&io->pending)) return; - bio_endio(io->base_bio, io->error); + if (likely(!io->base_io)) + bio_endio(io->base_bio, io->error); + else { + if (io->error && !io->base_io->error) + io->base_io->error = io->error; + crypt_dec_pending(io->base_io); + } + mempool_free(io, cc->io_pool); } @@ -699,6 +709,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) { struct crypt_config *cc = io->target->private; struct bio *clone; + struct dm_crypt_io *new_io; int crypt_finished; unsigned out_of_pages = 0; unsigned remaining = io->base_bio->bi_size; @@ -753,6 +764,34 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) if (unlikely(out_of_pages)) congestion_wait(WRITE, HZ/100); + /* + * With async crypto it is unsafe to share the crypto context + * between fragments, so switch to a new dm_crypt_io structure. + */ + if (unlikely(!crypt_finished && remaining)) { + new_io = crypt_io_alloc(io->target, io->base_bio, + sector); + crypt_inc_pending(new_io); + crypt_convert_init(cc, &new_io->ctx, NULL, + io->base_bio, sector); + new_io->ctx.idx_in = io->ctx.idx_in; + new_io->ctx.offset_in = io->ctx.offset_in; + + /* + * Fragments after the first use the base_io + * pending count. + */ + if (!io->base_io) + new_io->base_io = io; + else { + new_io->base_io = io->base_io; + crypt_inc_pending(io->base_io); + crypt_dec_pending(io); + } + + io = new_io; + } + if (unlikely(remaining)) wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); } -- cgit v1.2.3 From 0a4a1047a445062793d9004bcb0d52756726fdf5 Mon Sep 17 00:00:00 2001 From: Milan Broz Date: Tue, 21 Oct 2008 17:45:03 +0100 Subject: dm crypt: remove waitqueue Remove waitqueue no longer needed with the async crypto interface. Signed-off-by: Milan Broz Signed-off-by: Alasdair G Kergon --- drivers/md/dm-crypt.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 046ee516074..ce26c84af06 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -94,7 +94,6 @@ struct crypt_config { struct workqueue_struct *io_queue; struct workqueue_struct *crypt_queue; - wait_queue_head_t writeq; /* * crypto related data @@ -656,10 +655,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io) static void kcryptd_io_write(struct dm_crypt_io *io) { struct bio *clone = io->ctx.bio_out; - struct crypt_config *cc = io->target->private; - generic_make_request(clone); - wake_up(&cc->writeq); } static void kcryptd_io(struct work_struct *work) @@ -791,9 +787,6 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) io = new_io; } - - if (unlikely(remaining)) - wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); } crypt_dec_pending(io); @@ -1120,7 +1113,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_crypt_queue; } - init_waitqueue_head(&cc->writeq); ti->private = cc; return 0; -- cgit v1.2.3 From f3e1d26ede3fb15c06904d700f1d7b21bba2215e Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Tue, 21 Oct 2008 17:45:04 +0100 Subject: dm: mark split bio as cloned When a bio gets split, mark its fragments with the BIO_CLONED flag. Signed-off-by: Martin K. Petersen Signed-off-by: Alasdair G Kergon --- drivers/md/dm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 327de03a5bd..829d9fc6645 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -669,6 +669,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, clone->bi_size = to_bytes(len); clone->bi_io_vec->bv_offset = offset; clone->bi_io_vec->bv_len = clone->bi_size; + clone->bi_flags |= 1 << BIO_CLONED; return clone; } -- cgit v1.2.3 From 1f965b19437017cea6d3f3f46acdc5acae5fd011 Mon Sep 17 00:00:00 2001 From: Heinz Mauelshagen Date: Tue, 21 Oct 2008 17:45:06 +0100 Subject: dm raid1: separate region_hash interface part1 Separate the region hash code from raid1 so it can be shared by forthcoming targets. Use BUG_ON() for failed async dm_io() calls. Signed-off-by: Heinz Mauelshagen Signed-off-by: Alasdair G Kergon --- drivers/md/Makefile | 2 +- drivers/md/dm-raid1.c | 789 ++++++----------------------------------- drivers/md/dm-region-hash.c | 704 ++++++++++++++++++++++++++++++++++++ include/linux/dm-region-hash.h | 104 ++++++ 4 files changed, 912 insertions(+), 687 deletions(-) create mode 100644 drivers/md/dm-region-hash.c create mode 100644 include/linux/dm-region-hash.h diff --git a/drivers/md/Makefile b/drivers/md/Makefile index f1ef33dfd8c..1c615804ea7 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -34,7 +34,7 @@ obj-$(CONFIG_DM_CRYPT) += dm-crypt.o obj-$(CONFIG_DM_DELAY) += dm-delay.o obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o -obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o +obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o obj-$(CONFIG_DM_ZERO) += dm-zero.o quiet_cmd_unroll = UNROLL $@ diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index f358853af5c..92dcc06832a 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -1,118 +1,36 @@ /* * Copyright (C) 2003 Sistina Software Limited. + * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ -#include - #include "dm-bio-list.h" #include "dm-bio-record.h" -#include #include #include #include #include #include -#include -#include #include -#include -#include +#include #include #include #include +#include #define DM_MSG_PREFIX "raid1" + +#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ #define DM_IO_PAGES 64 +#define DM_KCOPYD_PAGES 64 #define DM_RAID1_HANDLE_ERRORS 0x01 #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped); -/*----------------------------------------------------------------- - * Region hash - * - * The mirror splits itself up into discrete regions. Each - * region can be in one of three states: clean, dirty, - * nosync. There is no need to put clean regions in the hash. - * - * In addition to being present in the hash table a region _may_ - * be present on one of three lists. - * - * clean_regions: Regions on this list have no io pending to - * them, they are in sync, we are no longer interested in them, - * they are dull. rh_update_states() will remove them from the - * hash table. - * - * quiesced_regions: These regions have been spun down, ready - * for recovery. rh_recovery_start() will remove regions from - * this list and hand them to kmirrord, which will schedule the - * recovery io with kcopyd. - * - * recovered_regions: Regions that kcopyd has successfully - * recovered. rh_update_states() will now schedule any delayed - * io, up the recovery_count, and remove the region from the - * hash. - * - * There are 2 locks: - * A rw spin lock 'hash_lock' protects just the hash table, - * this is never held in write mode from interrupt context, - * which I believe means that we only have to disable irqs when - * doing a write lock. - * - * An ordinary spin lock 'region_lock' that protects the three - * lists in the region_hash, with the 'state', 'list' and - * 'bhs_delayed' fields of the regions. This is used from irq - * context, so all other uses will have to suspend local irqs. - *---------------------------------------------------------------*/ -struct mirror_set; -struct region_hash { - struct mirror_set *ms; - uint32_t region_size; - unsigned region_shift; - - /* holds persistent region state */ - struct dm_dirty_log *log; - - /* hash table */ - rwlock_t hash_lock; - mempool_t *region_pool; - unsigned int mask; - unsigned int nr_buckets; - struct list_head *buckets; - - spinlock_t region_lock; - atomic_t recovery_in_flight; - struct semaphore recovery_count; - struct list_head clean_regions; - struct list_head quiesced_regions; - struct list_head recovered_regions; - struct list_head failed_recovered_regions; -}; - -enum { - RH_CLEAN, - RH_DIRTY, - RH_NOSYNC, - RH_RECOVERING -}; - -struct region { - struct region_hash *rh; /* FIXME: can we get rid of this ? */ - region_t key; - int state; - - struct list_head hash_list; - struct list_head list; - - atomic_t pending; - struct bio_list delayed_bios; -}; - - /*----------------------------------------------------------------- * Mirror set structures. *---------------------------------------------------------------*/ @@ -133,8 +51,7 @@ struct mirror { struct mirror_set { struct dm_target *ti; struct list_head list; - struct region_hash rh; - struct dm_kcopyd_client *kcopyd_client; + uint64_t features; spinlock_t lock; /* protects the lists */ @@ -142,6 +59,8 @@ struct mirror_set { struct bio_list writes; struct bio_list failures; + struct dm_region_hash *rh; + struct dm_kcopyd_client *kcopyd_client; struct dm_io_client *io_client; mempool_t *read_record_pool; @@ -160,25 +79,14 @@ struct mirror_set { struct work_struct trigger_event; - unsigned int nr_mirrors; + unsigned nr_mirrors; struct mirror mirror[0]; }; -/* - * Conversion fns - */ -static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio) +static void wakeup_mirrord(void *context) { - return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift; -} + struct mirror_set *ms = context; -static inline sector_t region_to_sector(struct region_hash *rh, region_t region) -{ - return region << rh->region_shift; -} - -static void wake(struct mirror_set *ms) -{ queue_work(ms->kmirrord_wq, &ms->kmirrord_work); } @@ -187,7 +95,7 @@ static void delayed_wake_fn(unsigned long data) struct mirror_set *ms = (struct mirror_set *) data; clear_bit(0, &ms->timer_pending); - wake(ms); + wakeup_mirrord(ms); } static void delayed_wake(struct mirror_set *ms) @@ -201,473 +109,34 @@ static void delayed_wake(struct mirror_set *ms) add_timer(&ms->timer); } -/* FIXME move this */ -static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); - -#define MIN_REGIONS 64 -#define MAX_RECOVERY 1 -static int rh_init(struct region_hash *rh, struct mirror_set *ms, - struct dm_dirty_log *log, uint32_t region_size, - region_t nr_regions) -{ - unsigned int nr_buckets, max_buckets; - size_t i; - - /* - * Calculate a suitable number of buckets for our hash - * table. - */ - max_buckets = nr_regions >> 6; - for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1) - ; - nr_buckets >>= 1; - - rh->ms = ms; - rh->log = log; - rh->region_size = region_size; - rh->region_shift = ffs(region_size) - 1; - rwlock_init(&rh->hash_lock); - rh->mask = nr_buckets - 1; - rh->nr_buckets = nr_buckets; - - rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets)); - if (!rh->buckets) { - DMERR("unable to allocate region hash memory"); - return -ENOMEM; - } - - for (i = 0; i < nr_buckets; i++) - INIT_LIST_HEAD(rh->buckets + i); - - spin_lock_init(&rh->region_lock); - sema_init(&rh->recovery_count, 0); - atomic_set(&rh->recovery_in_flight, 0); - INIT_LIST_HEAD(&rh->clean_regions); - INIT_LIST_HEAD(&rh->quiesced_regions); - INIT_LIST_HEAD(&rh->recovered_regions); - INIT_LIST_HEAD(&rh->failed_recovered_regions); - - rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, - sizeof(struct region)); - if (!rh->region_pool) { - vfree(rh->buckets); - rh->buckets = NULL; - return -ENOMEM; - } - - return 0; -} - -static void rh_exit(struct region_hash *rh) -{ - unsigned int h; - struct region *reg, *nreg; - - BUG_ON(!list_empty(&rh->quiesced_regions)); - for (h = 0; h < rh->nr_buckets; h++) { - list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) { - BUG_ON(atomic_read(®->pending)); - mempool_free(reg, rh->region_pool); - } - } - - if (rh->log) - dm_dirty_log_destroy(rh->log); - if (rh->region_pool) - mempool_destroy(rh->region_pool); - vfree(rh->buckets); -} - -#define RH_HASH_MULT 2654435387U - -static inline unsigned int rh_hash(struct region_hash *rh, region_t region) -{ - return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask; -} - -static struct region *__rh_lookup(struct region_hash *rh, region_t region) -{ - struct region *reg; - - list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list) - if (reg->key == region) - return reg; - - return NULL; -} - -static void __rh_insert(struct region_hash *rh, struct region *reg) -{ - unsigned int h = rh_hash(rh, reg->key); - list_add(®->hash_list, rh->buckets + h); -} - -static struct region *__rh_alloc(struct region_hash *rh, region_t region) -{ - struct region *reg, *nreg; - - read_unlock(&rh->hash_lock); - nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC); - if (unlikely(!nreg)) - nreg = kmalloc(sizeof(struct region), GFP_NOIO); - nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? - RH_CLEAN : RH_NOSYNC; - nreg->rh = rh; - nreg->key = region; - - INIT_LIST_HEAD(&nreg->list); - - atomic_set(&nreg->pending, 0); - bio_list_init(&nreg->delayed_bios); - write_lock_irq(&rh->hash_lock); - - reg = __rh_lookup(rh, region); - if (reg) - /* we lost the race */ - mempool_free(nreg, rh->region_pool); - - else { - __rh_insert(rh, nreg); - if (nreg->state == RH_CLEAN) { - spin_lock(&rh->region_lock); - list_add(&nreg->list, &rh->clean_regions); - spin_unlock(&rh->region_lock); - } - reg = nreg; - } - write_unlock_irq(&rh->hash_lock); - read_lock(&rh->hash_lock); - - return reg; -} - -static inline struct region *__rh_find(struct region_hash *rh, region_t region) -{ - struct region *reg; - - reg = __rh_lookup(rh, region); - if (!reg) - reg = __rh_alloc(rh, region); - - return reg; -} - -static int rh_state(struct region_hash *rh, region_t region, int may_block) -{ - int r; - struct region *reg; - - read_lock(&rh->hash_lock); - reg = __rh_lookup(rh, region); - read_unlock(&rh->hash_lock); - - if (reg) - return reg->state; - - /* - * The region wasn't in the hash, so we fall back to the - * dirty log. - */ - r = rh->log->type->in_sync(rh->log, region, may_block); - - /* - * Any error from the dirty log (eg. -EWOULDBLOCK) gets - * taken as a RH_NOSYNC - */ - return r == 1 ? RH_CLEAN : RH_NOSYNC; -} - -static inline int rh_in_sync(struct region_hash *rh, - region_t region, int may_block) -{ - int state = rh_state(rh, region, may_block); - return state == RH_CLEAN || state == RH_DIRTY; -} - -static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list) -{ - struct bio *bio; - - while ((bio = bio_list_pop(bio_list))) { - queue_bio(ms, bio, WRITE); - } -} - -static void complete_resync_work(struct region *reg, int success) -{ - struct region_hash *rh = reg->rh; - - rh->log->type->set_region_sync(rh->log, reg->key, success); - - /* - * Dispatch the bios before we call 'wake_up_all'. - * This is important because if we are suspending, - * we want to know that recovery is complete and - * the work queue is flushed. If we wake_up_all - * before we dispatch_bios (queue bios and call wake()), - * then we risk suspending before the work queue - * has been properly flushed. - */ - dispatch_bios(rh->ms, ®->delayed_bios); - if (atomic_dec_and_test(&rh->recovery_in_flight)) - wake_up_all(&_kmirrord_recovery_stopped); - up(&rh->recovery_count); -} - -static void rh_update_states(struct region_hash *rh) -{ - struct region *reg, *next; - - LIST_HEAD(clean); - LIST_HEAD(recovered); - LIST_HEAD(failed_recovered); - - /* - * Quickly grab the lists. - */ - write_lock_irq(&rh->hash_lock); - spin_lock(&rh->region_lock); - if (!list_empty(&rh->clean_regions)) { - list_splice_init(&rh->clean_regions, &clean); - - list_for_each_entry(reg, &clean, list) - list_del(®->hash_list); - } - - if (!list_empty(&rh->recovered_regions)) { - list_splice_init(&rh->recovered_regions, &recovered); - - list_for_each_entry (reg, &recovered, list) - list_del(®->hash_list); - } - - if (!list_empty(&rh->failed_recovered_regions)) { - list_splice_init(&rh->failed_recovered_regions, - &failed_recovered); - - list_for_each_entry(reg, &failed_recovered, list) - list_del(®->hash_list); - } - - spin_unlock(&rh->region_lock); - write_unlock_irq(&rh->hash_lock); - - /* - * All the regions on the recovered and clean lists have - * now been pulled out of the system, so no need to do - * any more locking. - */ - list_for_each_entry_safe (reg, next, &recovered, list) { - rh->log->type->clear_region(rh->log, reg->key); - complete_resync_work(reg, 1); - mempool_free(reg, rh->region_pool); - } - - list_for_each_entry_safe(reg, next, &failed_recovered, list) { - complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1); - mempool_free(reg, rh->region_pool); - } - - list_for_each_entry_safe(reg, next, &clean, list) { - rh->log->type->clear_region(rh->log, reg->key); - mempool_free(reg, rh->region_pool); - } - - rh->log->type->flush(rh->log); -} - -static void rh_inc(struct region_hash *rh, region_t region) -{ - struct region *reg; - - read_lock(&rh->hash_lock); - reg = __rh_find(rh, region); - - spin_lock_irq(&rh->region_lock); - atomic_inc(®->pending); - - if (reg->state == RH_CLEAN) { - reg->state = RH_DIRTY; - list_del_init(®->list); /* take off the clean list */ - spin_unlock_irq(&rh->region_lock); - - rh->log->type->mark_region(rh->log, reg->key); - } else - spin_unlock_irq(&rh->region_lock); - - - read_unlock(&rh->hash_lock); -} - -static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios) +static void wakeup_all_recovery_waiters(void *context) { - struct bio *bio; - - for (bio = bios->head; bio; bio = bio->bi_next) - rh_inc(rh, bio_to_region(rh, bio)); + wake_up_all(&_kmirrord_recovery_stopped); } -static void rh_dec(struct region_hash *rh, region_t region) +static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) { unsigned long flags; - struct region *reg; int should_wake = 0; + struct bio_list *bl; - read_lock(&rh->hash_lock); - reg = __rh_lookup(rh, region); - read_unlock(&rh->hash_lock); - - spin_lock_irqsave(&rh->region_lock, flags); - if (atomic_dec_and_test(®->pending)) { - /* - * There is no pending I/O for this region. - * We can move the region to corresponding list for next action. - * At this point, the region is not yet connected to any list. - * - * If the state is RH_NOSYNC, the region should be kept off - * from clean list. - * The hash entry for RH_NOSYNC will remain in memory - * until the region is recovered or the map is reloaded. - */ - - /* do nothing for RH_NOSYNC */ - if (reg->state == RH_RECOVERING) { - list_add_tail(®->list, &rh->quiesced_regions); - } else if (reg->state == RH_DIRTY) { - reg->state = RH_CLEAN; - list_add(®->list, &rh->clean_regions); - } - should_wake = 1; - } - spin_unlock_irqrestore(&rh->region_lock, flags); + bl = (rw == WRITE) ? &ms->writes : &ms->reads; + spin_lock_irqsave(&ms->lock, flags); + should_wake = !(bl->head); + bio_list_add(bl, bio); + spin_unlock_irqrestore(&ms->lock, flags); if (should_wake) - wake(rh->ms); + wakeup_mirrord(ms); } -/* - * Starts quiescing a region in preparation for recovery. - */ -static int __rh_recovery_prepare(struct region_hash *rh) +static void dispatch_bios(void *context, struct bio_list *bio_list) { - int r; - struct region *reg; - region_t region; - - /* - * Ask the dirty log what's next. - */ - r = rh->log->type->get_resync_work(rh->log, ®ion); - if (r <= 0) - return r; - - /* - * Get this region, and start it quiescing by setting the - * recovering flag. - */ - read_lock(&rh->hash_lock); - reg = __rh_find(rh, region); - read_unlock(&rh->hash_lock); - - spin_lock_irq(&rh->region_lock); - reg->state = RH_RECOVERING; - - /* Already quiesced ? */ - if (atomic_read(®->pending)) - list_del_init(®->list); - else - list_move(®->list, &rh->quiesced_regions); - - spin_unlock_irq(&rh->region_lock); - - return 1; -} - -static void rh_recovery_prepare(struct region_hash *rh) -{ - /* Extra reference to avoid race with rh_stop_recovery */ - atomic_inc(&rh->recovery_in_flight); - - while (!down_trylock(&rh->recovery_count)) { - atomic_inc(&rh->recovery_in_flight); - if (__rh_recovery_prepare(rh) <= 0) { - atomic_dec(&rh->recovery_in_flight); - up(&rh->recovery_count); - break; - } - } - - /* Drop the extra reference */ - if (atomic_dec_and_test(&rh->recovery_in_flight)) - wake_up_all(&_kmirrord_recovery_stopped); -} - -/* - * Returns any quiesced regions. - */ -static struct region *rh_recovery_start(struct region_hash *rh) -{ - struct region *reg = NULL; - - spin_lock_irq(&rh->region_lock); - if (!list_empty(&rh->quiesced_regions)) { - reg = list_entry(rh->quiesced_regions.next, - struct region, list); - list_del_init(®->list); /* remove from the quiesced list */ - } - spin_unlock_irq(&rh->region_lock); - - return reg; -} - -static void rh_recovery_end(struct region *reg, int success) -{ - struct region_hash *rh = reg->rh; - - spin_lock_irq(&rh->region_lock); - if (success) - list_add(®->list, ®->rh->recovered_regions); - else { - reg->state = RH_NOSYNC; - list_add(®->list, ®->rh->failed_recovered_regions); - } - spin_unlock_irq(&rh->region_lock); - - wake(rh->ms); -} - -static int rh_flush(struct region_hash *rh) -{ - return rh->log->type->flush(rh->log); -} - -static void rh_delay(struct region_hash *rh, struct bio *bio) -{ - struct region *reg; - - read_lock(&rh->hash_lock); - reg = __rh_find(rh, bio_to_region(rh, bio)); - bio_list_add(®->delayed_bios, bio); - read_unlock(&rh->hash_lock); -} - -static void rh_stop_recovery(struct region_hash *rh) -{ - int i; - - /* wait for any recovering regions */ - for (i = 0; i < MAX_RECOVERY; i++) - down(&rh->recovery_count); -} - -static void rh_start_recovery(struct region_hash *rh) -{ - int i; - - for (i = 0; i < MAX_RECOVERY; i++) - up(&rh->recovery_count); + struct mirror_set *ms = context; + struct bio *bio; - wake(rh->ms); + while ((bio = bio_list_pop(bio_list))) + queue_bio(ms, bio, WRITE); } #define MIN_READ_RECORDS 20 @@ -777,8 +246,8 @@ out: static void recovery_complete(int read_err, unsigned long write_err, void *context) { - struct region *reg = (struct region *)context; - struct mirror_set *ms = reg->rh->ms; + struct dm_region *reg = context; + struct mirror_set *ms = dm_rh_region_context(reg); int m, bit = 0; if (read_err) { @@ -804,31 +273,33 @@ static void recovery_complete(int read_err, unsigned long write_err, } } - rh_recovery_end(reg, !(read_err || write_err)); + dm_rh_recovery_end(reg, !(read_err || write_err)); } -static int recover(struct mirror_set *ms, struct region *reg) +static int recover(struct mirror_set *ms, struct dm_region *reg) { int r; - unsigned int i; + unsigned i; struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest; struct mirror *m; unsigned long flags = 0; + region_t key = dm_rh_get_region_key(reg); + sector_t region_size = dm_rh_get_region_size(ms->rh); /* fill in the source */ m = get_default_mirror(ms); from.bdev = m->dev->bdev; - from.sector = m->offset + region_to_sector(reg->rh, reg->key); - if (reg->key == (ms->nr_regions - 1)) { + from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key); + if (key == (ms->nr_regions - 1)) { /* * The final region may be smaller than * region_size. */ - from.count = ms->ti->len & (reg->rh->region_size - 1); + from.count = ms->ti->len & (region_size - 1); if (!from.count) - from.count = reg->rh->region_size; + from.count = region_size; } else - from.count = reg->rh->region_size; + from.count = region_size; /* fill in the destinations */ for (i = 0, dest = to; i < ms->nr_mirrors; i++) { @@ -837,7 +308,7 @@ static int recover(struct mirror_set *ms, struct region *reg) m = ms->mirror + i; dest->bdev = m->dev->bdev; - dest->sector = m->offset + region_to_sector(reg->rh, reg->key); + dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key); dest->count = from.count; dest++; } @@ -854,22 +325,22 @@ static int recover(struct mirror_set *ms, struct region *reg) static void do_recovery(struct mirror_set *ms) { + struct dm_region *reg; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); int r; - struct region *reg; - struct dm_dirty_log *log = ms->rh.log; /* * Start quiescing some regions. */ - rh_recovery_prepare(&ms->rh); + dm_rh_recovery_prepare(ms->rh); /* * Copy any already quiesced regions. */ - while ((reg = rh_recovery_start(&ms->rh))) { + while ((reg = dm_rh_recovery_start(ms->rh))) { r = recover(ms, reg); if (r) - rh_recovery_end(reg, 0); + dm_rh_recovery_end(reg, 0); } /* @@ -910,9 +381,10 @@ static int default_ok(struct mirror *m) static int mirror_available(struct mirror_set *ms, struct bio *bio) { - region_t region = bio_to_region(&ms->rh, bio); + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); + region_t region = dm_rh_bio_to_region(ms->rh, bio); - if (ms->rh.log->type->in_sync(ms->rh.log, region, 0)) + if (log->type->in_sync(log, region, 0)) return choose_mirror(ms, bio->bi_sector) ? 1 : 0; return 0; @@ -986,7 +458,14 @@ static void read_async_bio(struct mirror *m, struct bio *bio) map_region(&io, m, bio); bio_set_m(bio, m); - (void) dm_io(&io_req, 1, &io, NULL); + BUG_ON(dm_io(&io_req, 1, &io, NULL)); +} + +static inline int region_in_sync(struct mirror_set *ms, region_t region, + int may_block) +{ + int state = dm_rh_get_state(ms->rh, region, may_block); + return state == DM_RH_CLEAN || state == DM_RH_DIRTY; } static void do_reads(struct mirror_set *ms, struct bio_list *reads) @@ -996,13 +475,13 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) struct mirror *m; while ((bio = bio_list_pop(reads))) { - region = bio_to_region(&ms->rh, bio); + region = dm_rh_bio_to_region(ms->rh, bio); m = get_default_mirror(ms); /* * We can only read balance if the region is in sync. */ - if (likely(rh_in_sync(&ms->rh, region, 1))) + if (likely(region_in_sync(ms, region, 1))) m = choose_mirror(ms, bio->bi_sector); else if (m && atomic_read(&m->error_count)) m = NULL; @@ -1025,57 +504,6 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) * NOSYNC: increment pending, just write to the default mirror *---------------------------------------------------------------*/ -/* __bio_mark_nosync - * @ms - * @bio - * @done - * @error - * - * The bio was written on some mirror(s) but failed on other mirror(s). - * We can successfully endio the bio but should avoid the region being - * marked clean by setting the state RH_NOSYNC. - * - * This function is _not_ safe in interrupt context! - */ -static void __bio_mark_nosync(struct mirror_set *ms, - struct bio *bio, unsigned done, int error) -{ - unsigned long flags; - struct region_hash *rh = &ms->rh; - struct dm_dirty_log *log = ms->rh.log; - struct region *reg; - region_t region = bio_to_region(rh, bio); - int recovering = 0; - - /* We must inform the log that the sync count has changed. */ - log->type->set_region_sync(log, region, 0); - ms->in_sync = 0; - - read_lock(&rh->hash_lock); - reg = __rh_find(rh, region); - read_unlock(&rh->hash_lock); - - /* region hash entry should exist because write was in-flight */ - BUG_ON(!reg); - BUG_ON(!list_empty(®->list)); - - spin_lock_irqsave(&rh->region_lock, flags); - /* - * Possible cases: - * 1) RH_DIRTY - * 2) RH_NOSYNC: was dirty, other preceeding writes failed - * 3) RH_RECOVERING: flushing pending writes - * Either case, the region should have not been connected to list. - */ - recovering = (reg->state == RH_RECOVERING); - reg->state = RH_NOSYNC; - BUG_ON(!list_empty(®->list)); - spin_unlock_irqrestore(&rh->region_lock, flags); - - bio_endio(bio, error); - if (recovering) - complete_resync_work(reg, 0); -} static void write_callback(unsigned long error, void *context) { @@ -1120,7 +548,7 @@ static void write_callback(unsigned long error, void *context) bio_list_add(&ms->failures, bio); spin_unlock_irqrestore(&ms->lock, flags); if (should_wake) - wake(ms); + wakeup_mirrord(ms); return; } out: @@ -1150,7 +578,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio) */ bio_set_m(bio, get_default_mirror(ms)); - (void) dm_io(&io_req, ms->nr_mirrors, io, NULL); + BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL)); } static void do_writes(struct mirror_set *ms, struct bio_list *writes) @@ -1170,18 +598,19 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) bio_list_init(&recover); while ((bio = bio_list_pop(writes))) { - state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1); + state = dm_rh_get_state(ms->rh, + dm_rh_bio_to_region(ms->rh, bio), 1); switch (state) { - case RH_CLEAN: - case RH_DIRTY: + case DM_RH_CLEAN: + case DM_RH_DIRTY: this_list = &sync; break; - case RH_NOSYNC: + case DM_RH_NOSYNC: this_list = &nosync; break; - case RH_RECOVERING: + case DM_RH_RECOVERING: this_list = &recover; break; } @@ -1194,9 +623,9 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) * be written to (writes to recover regions are going to * be delayed). */ - rh_inc_pending(&ms->rh, &sync); - rh_inc_pending(&ms->rh, &nosync); - ms->log_failure = rh_flush(&ms->rh) ? 1 : 0; + dm_rh_inc_pending(ms->rh, &sync); + dm_rh_inc_pending(ms->rh, &nosync); + ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0; /* * Dispatch io. @@ -1205,13 +634,13 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) spin_lock_irq(&ms->lock); bio_list_merge(&ms->failures, &sync); spin_unlock_irq(&ms->lock); - wake(ms); + wakeup_mirrord(ms); } else while ((bio = bio_list_pop(&sync))) do_write(ms, bio); while ((bio = bio_list_pop(&recover))) - rh_delay(&ms->rh, bio); + dm_rh_delay(ms->rh, bio); while ((bio = bio_list_pop(&nosync))) { map_bio(get_default_mirror(ms), bio); @@ -1228,7 +657,8 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures) if (!ms->log_failure) { while ((bio = bio_list_pop(failures))) - __bio_mark_nosync(ms, bio, bio->bi_size, 0); + ms->in_sync = 0; + dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0); return; } @@ -1281,8 +711,8 @@ static void trigger_event(struct work_struct *work) *---------------------------------------------------------------*/ static void do_mirror(struct work_struct *work) { - struct mirror_set *ms =container_of(work, struct mirror_set, - kmirrord_work); + struct mirror_set *ms = container_of(work, struct mirror_set, + kmirrord_work); struct bio_list reads, writes, failures; unsigned long flags; @@ -1295,7 +725,7 @@ static void do_mirror(struct work_struct *work) bio_list_init(&ms->failures); spin_unlock_irqrestore(&ms->lock, flags); - rh_update_states(&ms->rh); + dm_rh_update_states(ms->rh, errors_handled(ms)); do_recovery(ms); do_reads(ms, &reads); do_writes(ms, &writes); @@ -1304,7 +734,6 @@ static void do_mirror(struct work_struct *work) dm_table_unplug_all(ms->ti->table); } - /*----------------------------------------------------------------- * Target functions *---------------------------------------------------------------*/ @@ -1351,7 +780,11 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors, return NULL; } - if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { + ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord, + wakeup_all_recovery_waiters, + ms->ti->begin, MAX_RECOVERY, + dl, region_size, ms->nr_regions); + if (IS_ERR(ms->rh)) { ti->error = "Error creating dirty region hash"; dm_io_client_destroy(ms->io_client); mempool_destroy(ms->read_record_pool); @@ -1369,7 +802,7 @@ static void free_context(struct mirror_set *ms, struct dm_target *ti, dm_put_device(ti, ms->mirror[m].dev); dm_io_client_destroy(ms->io_client); - rh_exit(&ms->rh); + dm_region_hash_destroy(ms->rh); mempool_destroy(ms->read_record_pool); kfree(ms); } @@ -1409,10 +842,10 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti, * Create dirty log: log_type #log_params */ static struct dm_dirty_log *create_dirty_log(struct dm_target *ti, - unsigned int argc, char **argv, - unsigned int *args_used) + unsigned argc, char **argv, + unsigned *args_used) { - unsigned int param_count; + unsigned param_count; struct dm_dirty_log *dl; if (argc < 2) { @@ -1543,7 +976,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) } ti->private = ms; - ti->split_io = ms->rh.region_size; + ti->split_io = dm_rh_get_region_size(ms->rh); ms->kmirrord_wq = create_singlethread_workqueue("kmirrord"); if (!ms->kmirrord_wq) { @@ -1578,11 +1011,11 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto err_destroy_wq; } - r = dm_kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client); + r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client); if (r) goto err_destroy_wq; - wake(ms); + wakeup_mirrord(ms); return 0; err_destroy_wq: @@ -1603,22 +1036,6 @@ static void mirror_dtr(struct dm_target *ti) free_context(ms, ti, ms->nr_mirrors); } -static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) -{ - unsigned long flags; - int should_wake = 0; - struct bio_list *bl; - - bl = (rw == WRITE) ? &ms->writes : &ms->reads; - spin_lock_irqsave(&ms->lock, flags); - should_wake = !(bl->head); - bio_list_add(bl, bio); - spin_unlock_irqrestore(&ms->lock, flags); - - if (should_wake) - wake(ms); -} - /* * Mirror mapping function */ @@ -1629,16 +1046,16 @@ static int mirror_map(struct dm_target *ti, struct bio *bio, struct mirror *m; struct mirror_set *ms = ti->private; struct dm_raid1_read_record *read_record = NULL; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); if (rw == WRITE) { /* Save region for mirror_end_io() handler */ - map_context->ll = bio_to_region(&ms->rh, bio); + map_context->ll = dm_rh_bio_to_region(ms->rh, bio); queue_bio(ms, bio, rw); return DM_MAPIO_SUBMITTED; } - r = ms->rh.log->type->in_sync(ms->rh.log, - bio_to_region(&ms->rh, bio), 0); + r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0); if (r < 0 && r != -EWOULDBLOCK) return r; @@ -1686,7 +1103,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, * We need to dec pending if this was a write. */ if (rw == WRITE) { - rh_dec(&ms->rh, map_context->ll); + dm_rh_dec(ms->rh, map_context->ll); return error; } @@ -1742,7 +1159,7 @@ out: static void mirror_presuspend(struct dm_target *ti) { struct mirror_set *ms = (struct mirror_set *) ti->private; - struct dm_dirty_log *log = ms->rh.log; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); atomic_set(&ms->suspend, 1); @@ -1750,10 +1167,10 @@ static void mirror_presuspend(struct dm_target *ti) * We must finish up all the work that we've * generated (i.e. recovery work). */ - rh_stop_recovery(&ms->rh); + dm_rh_stop_recovery(ms->rh); wait_event(_kmirrord_recovery_stopped, - !atomic_read(&ms->rh.recovery_in_flight)); + !dm_rh_recovery_in_flight(ms->rh)); if (log->type->presuspend && log->type->presuspend(log)) /* FIXME: need better error handling */ @@ -1771,7 +1188,7 @@ static void mirror_presuspend(struct dm_target *ti) static void mirror_postsuspend(struct dm_target *ti) { struct mirror_set *ms = ti->private; - struct dm_dirty_log *log = ms->rh.log; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); if (log->type->postsuspend && log->type->postsuspend(log)) /* FIXME: need better error handling */ @@ -1781,13 +1198,13 @@ static void mirror_postsuspend(struct dm_target *ti) static void mirror_resume(struct dm_target *ti) { struct mirror_set *ms = ti->private; - struct dm_dirty_log *log = ms->rh.log; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); atomic_set(&ms->suspend, 0); if (log->type->resume && log->type->resume(log)) /* FIXME: need better error handling */ DMWARN("log resume failed"); - rh_start_recovery(&ms->rh); + dm_rh_start_recovery(ms->rh); } /* @@ -1819,7 +1236,7 @@ static int mirror_status(struct dm_target *ti, status_type_t type, { unsigned int m, sz = 0; struct mirror_set *ms = (struct mirror_set *) ti->private; - struct dm_dirty_log *log = ms->rh.log; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); char buffer[ms->nr_mirrors + 1]; switch (type) { @@ -1832,15 +1249,15 @@ static int mirror_status(struct dm_target *ti, status_type_t type, buffer[m] = '\0'; DMEMIT("%llu/%llu 1 %s ", - (unsigned long long)log->type->get_sync_count(ms->rh.log), + (unsigned long long)log->type->get_sync_count(log), (unsigned long long)ms->nr_regions, buffer); - sz += log->type->status(ms->rh.log, type, result+sz, maxlen-sz); + sz += log->type->status(log, type, result+sz, maxlen-sz); break; case STATUSTYPE_TABLE: - sz = log->type->status(ms->rh.log, type, result, maxlen); + sz = log->type->status(log, type, result, maxlen); DMEMIT("%d", ms->nr_mirrors); for (m = 0; m < ms->nr_mirrors; m++) diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c new file mode 100644 index 00000000000..59f8d9df9e1 --- /dev/null +++ b/drivers/md/dm-region-hash.c @@ -0,0 +1,704 @@ +/* + * Copyright (C) 2003 Sistina Software Limited. + * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. + * + * This file is released under the GPL. + */ + +#include +#include + +#include +#include +#include +#include + +#include "dm.h" +#include "dm-bio-list.h" + +#define DM_MSG_PREFIX "region hash" + +/*----------------------------------------------------------------- + * Region hash + * + * The mirror splits itself up into discrete regions. Each + * region can be in one of three states: clean, dirty, + * nosync. There is no need to put clean regions in the hash. + * + * In addition to being present in the hash table a region _may_ + * be present on one of three lists. + * + * clean_regions: Regions on this list have no io pending to + * them, they are in sync, we are no longer interested in them, + * they are dull. dm_rh_update_states() will remove them from the + * hash table. + * + * quiesced_regions: These regions have been spun down, ready + * for recovery. rh_recovery_start() will remove regions from + * this list and hand them to kmirrord, which will schedule the + * recovery io with kcopyd. + * + * recovered_regions: Regions that kcopyd has successfully + * recovered. dm_rh_update_states() will now schedule any delayed + * io, up the recovery_count, and remove the region from the + * hash. + * + * There are 2 locks: + * A rw spin lock 'hash_lock' protects just the hash table, + * this is never held in write mode from interrupt context, + * which I believe means that we only have to disable irqs when + * doing a write lock. + * + * An ordinary spin lock 'region_lock' that protects the three + * lists in the region_hash, with the 'state', 'list' and + * 'delayed_bios' fields of the regions. This is used from irq + * context, so all other uses will have to suspend local irqs. + *---------------------------------------------------------------*/ +struct dm_region_hash { + uint32_t region_size; + unsigned region_shift; + + /* holds persistent region state */ + struct dm_dirty_log *log; + + /* hash table */ + rwlock_t hash_lock; + mempool_t *region_pool; + unsigned mask; + unsigned nr_buckets; + unsigned prime; + unsigned shift; + struct list_head *buckets; + + unsigned max_recovery; /* Max # of regions to recover in parallel */ + + spinlock_t region_lock; + atomic_t recovery_in_flight; + struct semaphore recovery_count; + struct list_head clean_regions; + struct list_head quiesced_regions; + struct list_head recovered_regions; + struct list_head failed_recovered_regions; + + void *context; + sector_t target_begin; + + /* Callback function to schedule bios writes */ + void (*dispatch_bios)(void *context, struct bio_list *bios); + + /* Callback function to wakeup callers worker thread. */ + void (*wakeup_workers)(void *context); + + /* Callback function to wakeup callers recovery waiters. */ + void (*wakeup_all_recovery_waiters)(void *context); +}; + +struct dm_region { + struct dm_region_hash *rh; /* FIXME: can we get rid of this ? */ + region_t key; + int state; + + struct list_head hash_list; + struct list_head list; + + atomic_t pending; + struct bio_list delayed_bios; +}; + +/* + * Conversion fns + */ +static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector) +{ + return sector >> rh->region_shift; +} + +sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region) +{ + return region << rh->region_shift; +} +EXPORT_SYMBOL_GPL(dm_rh_region_to_sector); + +region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) +{ + return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin); +} +EXPORT_SYMBOL_GPL(dm_rh_bio_to_region); + +void *dm_rh_region_context(struct dm_region *reg) +{ + return reg->rh->context; +} +EXPORT_SYMBOL_GPL(dm_rh_region_context); + +region_t dm_rh_get_region_key(struct dm_region *reg) +{ + return reg->key; +} +EXPORT_SYMBOL_GPL(dm_rh_get_region_key); + +sector_t dm_rh_get_region_size(struct dm_region_hash *rh) +{ + return rh->region_size; +} +EXPORT_SYMBOL_GPL(dm_rh_get_region_size); + +/* + * FIXME: shall we pass in a structure instead of all these args to + * dm_region_hash_create()???? + */ +#define RH_HASH_MULT 2654435387U +#define RH_HASH_SHIFT 12 + +#define MIN_REGIONS 64 +struct dm_region_hash *dm_region_hash_create( + void *context, void (*dispatch_bios)(void *context, + struct bio_list *bios), + void (*wakeup_workers)(void *context), + void (*wakeup_all_recovery_waiters)(void *context), + sector_t target_begin, unsigned max_recovery, + struct dm_dirty_log *log, uint32_t region_size, + region_t nr_regions) +{ + struct dm_region_hash *rh; + unsigned nr_buckets, max_buckets; + size_t i; + + /* + * Calculate a suitable number of buckets for our hash + * table. + */ + max_buckets = nr_regions >> 6; + for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1) + ; + nr_buckets >>= 1; + + rh = kmalloc(sizeof(*rh), GFP_KERNEL); + if (!rh) { + DMERR("unable to allocate region hash memory"); + return ERR_PTR(-ENOMEM); + } + + rh->context = context; + rh->dispatch_bios = dispatch_bios; + rh->wakeup_workers = wakeup_workers; + rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters; + rh->target_begin = target_begin; + rh->max_recovery = max_recovery; + rh->log = log; + rh->region_size = region_size; + rh->region_shift = ffs(region_size) - 1; + rwlock_init(&rh->hash_lock); + rh->mask = nr_buckets - 1; + rh->nr_buckets = nr_buckets; + + rh->shift = RH_HASH_SHIFT; + rh->prime = RH_HASH_MULT; + + rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets)); + if (!rh->buckets) { + DMERR("unable to allocate region hash bucket memory"); + kfree(rh); + return ERR_PTR(-ENOMEM); + } + + for (i = 0; i < nr_buckets; i++) + INIT_LIST_HEAD(rh->buckets + i); + + spin_lock_init(&rh->region_lock); + sema_init(&rh->recovery_count, 0); + atomic_set(&rh->recovery_in_flight, 0); + INIT_LIST_HEAD(&rh->clean_regions); + INIT_LIST_HEAD(&rh->quiesced_regions); + INIT_LIST_HEAD(&rh->recovered_regions); + INIT_LIST_HEAD(&rh->failed_recovered_regions); + + rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, + sizeof(struct dm_region)); + if (!rh->region_pool) { + vfree(rh->buckets); + kfree(rh); + rh = ERR_PTR(-ENOMEM); + } + + return rh; +} +EXPORT_SYMBOL_GPL(dm_region_hash_create); + +void dm_region_hash_destroy(struct dm_region_hash *rh) +{ + unsigned h; + struct dm_region *reg, *nreg; + + BUG_ON(!list_empty(&rh->quiesced_regions)); + for (h = 0; h < rh->nr_buckets; h++) { + list_for_each_entry_safe(reg, nreg, rh->buckets + h, + hash_list) { + BUG_ON(atomic_read(®->pending)); + mempool_free(reg, rh->region_pool); + } + } + + if (rh->log) + dm_dirty_log_destroy(rh->log); + + if (rh->region_pool) + mempool_destroy(rh->region_pool); + + vfree(rh->buckets); + kfree(rh); +} +EXPORT_SYMBOL_GPL(dm_region_hash_destroy); + +struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh) +{ + return rh->log; +} +EXPORT_SYMBOL_GPL(dm_rh_dirty_log); + +static unsigned rh_hash(struct dm_region_hash *rh, region_t region) +{ + return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask; +} + +static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region) +{ + struct dm_region *reg; + struct list_head *bucket = rh->buckets + rh_hash(rh, region); + + list_for_each_entry(reg, bucket, hash_list) + if (reg->key == region) + return reg; + + return NULL; +} + +static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg) +{ + list_add(®->hash_list, rh->buckets + rh_hash(rh, reg->key)); +} + +static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region) +{ + struct dm_region *reg, *nreg; + + nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC); + if (unlikely(!nreg)) + nreg = kmalloc(sizeof(*nreg), GFP_NOIO); + + nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? + DM_RH_CLEAN : DM_RH_NOSYNC; + nreg->rh = rh; + nreg->key = region; + INIT_LIST_HEAD(&nreg->list); + atomic_set(&nreg->pending, 0); + bio_list_init(&nreg->delayed_bios); + + write_lock_irq(&rh->hash_lock); + reg = __rh_lookup(rh, region); + if (reg) + /* We lost the race. */ + mempool_free(nreg, rh->region_pool); + else { + __rh_insert(rh, nreg); + if (nreg->state == DM_RH_CLEAN) { + spin_lock(&rh->region_lock); + list_add(&nreg->list, &rh->clean_regions); + spin_unlock(&rh->region_lock); + } + + reg = nreg; + } + write_unlock_irq(&rh->hash_lock); + + return reg; +} + +static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region) +{ + struct dm_region *reg; + + reg = __rh_lookup(rh, region); + if (!reg) { + read_unlock(&rh->hash_lock); + reg = __rh_alloc(rh, region); + read_lock(&rh->hash_lock); + } + + return reg; +} + +int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block) +{ + int r; + struct dm_region *reg; + + read_lock(&rh->hash_lock); + reg = __rh_lookup(rh, region); + read_unlock(&rh->hash_lock); + + if (reg) + return reg->state; + + /* + * The region wasn't in the hash, so we fall back to the + * dirty log. + */ + r = rh->log->type->in_sync(rh->log, region, may_block); + + /* + * Any error from the dirty log (eg. -EWOULDBLOCK) gets + * taken as a DM_RH_NOSYNC + */ + return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC; +} +EXPORT_SYMBOL_GPL(dm_rh_get_state); + +static void complete_resync_work(struct dm_region *reg, int success) +{ + struct dm_region_hash *rh = reg->rh; + + rh->log->type->set_region_sync(rh->log, reg->key, success); + + /* + * Dispatch the bios before we call 'wake_up_all'. + * This is important because if we are suspending, + * we want to know that recovery is complete and + * the work queue is flushed. If we wake_up_all + * before we dispatch_bios (queue bios and call wake()), + * then we risk suspending before the work queue + * has been properly flushed. + */ + rh->dispatch_bios(rh->context, ®->delayed_bios); + if (atomic_dec_and_test(&rh->recovery_in_flight)) + rh->wakeup_all_recovery_waiters(rh->context); + up(&rh->recovery_count); +} + +/* dm_rh_mark_nosync + * @ms + * @bio + * @done + * @error + * + * The bio was written on some mirror(s) but failed on other mirror(s). + * We can successfully endio the bio but should avoid the region being + * marked clean by setting the state DM_RH_NOSYNC. + * + * This function is _not_ safe in interrupt context! + */ +void dm_rh_mark_nosync(struct dm_region_hash *rh, + struct bio *bio, unsigned done, int error) +{ + unsigned long flags; + struct dm_dirty_log *log = rh->log; + struct dm_region *reg; + region_t region = dm_rh_bio_to_region(rh, bio); + int recovering = 0; + + /* We must inform the log that the sync count has changed. */ + log->type->set_region_sync(log, region, 0); + + read_lock(&rh->hash_lock); + reg = __rh_find(rh, region); + read_unlock(&rh->hash_lock); + + /* region hash entry should exist because write was in-flight */ + BUG_ON(!reg); + BUG_ON(!list_empty(®->list)); + + spin_lock_irqsave(&rh->region_lock, flags); + /* + * Possible cases: + * 1) DM_RH_DIRTY + * 2) DM_RH_NOSYNC: was dirty, other preceeding writes failed + * 3) DM_RH_RECOVERING: flushing pending writes + * Either case, the region should have not been connected to list. + */ + recovering = (reg->state == DM_RH_RECOVERING); + reg->state = DM_RH_NOSYNC; + BUG_ON(!list_empty(®->list)); + spin_unlock_irqrestore(&rh->region_lock, flags); + + bio_endio(bio, error); + if (recovering) + complete_resync_work(reg, 0); +} +EXPORT_SYMBOL_GPL(dm_rh_mark_nosync); + +void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled) +{ + struct dm_region *reg, *next; + + LIST_HEAD(clean); + LIST_HEAD(recovered); + LIST_HEAD(failed_recovered); + + /* + * Quickly grab the lists. + */ + write_lock_irq(&rh->hash_lock); + spin_lock(&rh->region_lock); + if (!list_empty(&rh->clean_regions)) { + list_splice_init(&rh->clean_regions, &clean); + + list_for_each_entry(reg, &clean, list) + list_del(®->hash_list); + } + + if (!list_empty(&rh->recovered_regions)) { + list_splice_init(&rh->recovered_regions, &recovered); + + list_for_each_entry(reg, &recovered, list) + list_del(®->hash_list); + } + + if (!list_empty(&rh->failed_recovered_regions)) { + list_splice_init(&rh->failed_recovered_regions, + &failed_recovered); + + list_for_each_entry(reg, &failed_recovered, list) + list_del(®->hash_list); + } + + spin_unlock(&rh->region_lock); + write_unlock_irq(&rh->hash_lock); + + /* + * All the regions on the recovered and clean lists have + * now been pulled out of the system, so no need to do + * any more locking. + */ + list_for_each_entry_safe(reg, next, &recovered, list) { + rh->log->type->clear_region(rh->log, reg->key); + complete_resync_work(reg, 1); + mempool_free(reg, rh->region_pool); + } + + list_for_each_entry_safe(reg, next, &failed_recovered, list) { + complete_resync_work(reg, errors_handled ? 0 : 1); + mempool_free(reg, rh->region_pool); + } + + list_for_each_entry_safe(reg, next, &clean, list) { + rh->log->type->clear_region(rh->log, reg->key); + mempool_free(reg, rh->region_pool); + } + + rh->log->type->flush(rh->log); +} +EXPORT_SYMBOL_GPL(dm_rh_update_states); + +static void rh_inc(struct dm_region_hash *rh, region_t region) +{ + struct dm_region *reg; + + read_lock(&rh->hash_lock); + reg = __rh_find(rh, region); + + spin_lock_irq(&rh->region_lock); + atomic_inc(®->pending); + + if (reg->state == DM_RH_CLEAN) { + reg->state = DM_RH_DIRTY; + list_del_init(®->list); /* take off the clean list */ + spin_unlock_irq(&rh->region_lock); + + rh->log->type->mark_region(rh->log, reg->key); + } else + spin_unlock_irq(&rh->region_lock); + + + read_unlock(&rh->hash_lock); +} + +void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) +{ + struct bio *bio; + + for (bio = bios->head; bio; bio = bio->bi_next) + rh_inc(rh, dm_rh_bio_to_region(rh, bio)); +} +EXPORT_SYMBOL_GPL(dm_rh_inc_pending); + +void dm_rh_dec(struct dm_region_hash *rh, region_t region) +{ + unsigned long flags; + struct dm_region *reg; + int should_wake = 0; + + read_lock(&rh->hash_lock); + reg = __rh_lookup(rh, region); + read_unlock(&rh->hash_lock); + + spin_lock_irqsave(&rh->region_lock, flags); + if (atomic_dec_and_test(®->pending)) { + /* + * There is no pending I/O for this region. + * We can move the region to corresponding list for next action. + * At this point, the region is not yet connected to any list. + * + * If the state is DM_RH_NOSYNC, the region should be kept off + * from clean list. + * The hash entry for DM_RH_NOSYNC will remain in memory + * until the region is recovered or the map is reloaded. + */ + + /* do nothing for DM_RH_NOSYNC */ + if (reg->state == DM_RH_RECOVERING) { + list_add_tail(®->list, &rh->quiesced_regions); + } else if (reg->state == DM_RH_DIRTY) { + reg->state = DM_RH_CLEAN; + list_add(®->list, &rh->clean_regions); + } + should_wake = 1; + } + spin_unlock_irqrestore(&rh->region_lock, flags); + + if (should_wake) + rh->wakeup_workers(rh->context); +} +EXPORT_SYMBOL_GPL(dm_rh_dec); + +/* + * Starts quiescing a region in preparation for recovery. + */ +static int __rh_recovery_prepare(struct dm_region_hash *rh) +{ + int r; + region_t region; + struct dm_region *reg; + + /* + * Ask the dirty log what's next. + */ + r = rh->log->type->get_resync_work(rh->log, ®ion); + if (r <= 0) + return r; + + /* + * Get this region, and start it quiescing by setting the + * recovering flag. + */ + read_lock(&rh->hash_lock); + reg = __rh_find(rh, region); + read_unlock(&rh->hash_lock); + + spin_lock_irq(&rh->region_lock); + reg->state = DM_RH_RECOVERING; + + /* Already quiesced ? */ + if (atomic_read(®->pending)) + list_del_init(®->list); + else + list_move(®->list, &rh->quiesced_regions); + + spin_unlock_irq(&rh->region_lock); + + return 1; +} + +void dm_rh_recovery_prepare(struct dm_region_hash *rh) +{ + /* Extra reference to avoid race with dm_rh_stop_recovery */ + atomic_inc(&rh->recovery_in_flight); + + while (!down_trylock(&rh->recovery_count)) { + atomic_inc(&rh->recovery_in_flight); + if (__rh_recovery_prepare(rh) <= 0) { + atomic_dec(&rh->recovery_in_flight); + up(&rh->recovery_count); + break; + } + } + + /* Drop the extra reference */ + if (atomic_dec_and_test(&rh->recovery_in_flight)) + rh->wakeup_all_recovery_waiters(rh->context); +} +EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare); + +/* + * Returns any quiesced regions. + */ +struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh) +{ + struct dm_region *reg = NULL; + + spin_lock_irq(&rh->region_lock); + if (!list_empty(&rh->quiesced_regions)) { + reg = list_entry(rh->quiesced_regions.next, + struct dm_region, list); + list_del_init(®->list); /* remove from the quiesced list */ + } + spin_unlock_irq(&rh->region_lock); + + return reg; +} +EXPORT_SYMBOL_GPL(dm_rh_recovery_start); + +void dm_rh_recovery_end(struct dm_region *reg, int success) +{ + struct dm_region_hash *rh = reg->rh; + + spin_lock_irq(&rh->region_lock); + if (success) + list_add(®->list, ®->rh->recovered_regions); + else { + reg->state = DM_RH_NOSYNC; + list_add(®->list, ®->rh->failed_recovered_regions); + } + spin_unlock_irq(&rh->region_lock); + + rh->wakeup_workers(rh->context); +} +EXPORT_SYMBOL_GPL(dm_rh_recovery_end); + +/* Return recovery in flight count. */ +int dm_rh_recovery_in_flight(struct dm_region_hash *rh) +{ + return atomic_read(&rh->recovery_in_flight); +} +EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight); + +int dm_rh_flush(struct dm_region_hash *rh) +{ + return rh->log->type->flush(rh->log); +} +EXPORT_SYMBOL_GPL(dm_rh_flush); + +void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio) +{ + struct dm_region *reg; + + read_lock(&rh->hash_lock); + reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio)); + bio_list_add(®->delayed_bios, bio); + read_unlock(&rh->hash_lock); +} +EXPORT_SYMBOL_GPL(dm_rh_delay); + +void dm_rh_stop_recovery(struct dm_region_hash *rh) +{ + int i; + + /* wait for any recovering regions */ + for (i = 0; i < rh->max_recovery; i++) + down(&rh->recovery_count); +} +EXPORT_SYMBOL_GPL(dm_rh_stop_recovery); + +void dm_rh_start_recovery(struct dm_region_hash *rh) +{ + int i; + + for (i = 0; i < rh->max_recovery; i++) + up(&rh->recovery_count); + + rh->wakeup_workers(rh->context); +} +EXPORT_SYMBOL_GPL(dm_rh_start_recovery); + +MODULE_DESCRIPTION(DM_NAME " region hash"); +MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen "); +MODULE_LICENSE("GPL"); diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h new file mode 100644 index 00000000000..a9e652a4137 --- /dev/null +++ b/include/linux/dm-region-hash.h @@ -0,0 +1,104 @@ +/* + * Copyright (C) 2003 Sistina Software Limited. + * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. + * + * Device-Mapper dirty region hash interface. + * + * This file is released under the GPL. + */ + +#ifndef DM_REGION_HASH_H +#define DM_REGION_HASH_H + +#include + +/*----------------------------------------------------------------- + * Region hash + *----------------------------------------------------------------*/ +struct dm_region_hash; +struct dm_region; + +/* + * States a region can have. + */ +enum dm_rh_region_states { + DM_RH_CLEAN = 0x01, /* No writes in flight. */ + DM_RH_DIRTY = 0x02, /* Writes in flight. */ + DM_RH_NOSYNC = 0x04, /* Out of sync. */ + DM_RH_RECOVERING = 0x08, /* Under resynchronization. */ +}; + +/* + * Region hash create/destroy. + */ +struct bio_list; +struct dm_region_hash *dm_region_hash_create( + void *context, void (*dispatch_bios)(void *context, + struct bio_list *bios), + void (*wakeup_workers)(void *context), + void (*wakeup_all_recovery_waiters)(void *context), + sector_t target_begin, unsigned max_recovery, + struct dm_dirty_log *log, uint32_t region_size, + region_t nr_regions); +void dm_region_hash_destroy(struct dm_region_hash *rh); + +struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh); + +/* + * Conversion functions. + */ +region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio); +sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region); +void *dm_rh_region_context(struct dm_region *reg); + +/* + * Get region size and key (ie. number of the region). + */ +sector_t dm_rh_get_region_size(struct dm_region_hash *rh); +region_t dm_rh_get_region_key(struct dm_region *reg); + +/* + * Get/set/update region state (and dirty log). + * + */ +int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block); +void dm_rh_set_state(struct dm_region_hash *rh, region_t region, + enum dm_rh_region_states state, int may_block); + +/* Non-zero errors_handled leaves the state of the region NOSYNC */ +void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled); + +/* Flush the region hash and dirty log. */ +int dm_rh_flush(struct dm_region_hash *rh); + +/* Inc/dec pending count on regions. */ +void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios); +void dm_rh_dec(struct dm_region_hash *rh, region_t region); + +/* Delay bios on regions. */ +void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio); + +void dm_rh_mark_nosync(struct dm_region_hash *rh, + struct bio *bio, unsigned done, int error); + +/* + * Region recovery control. + */ + +/* Prepare some regions for recovery by starting to quiesce them. */ +void dm_rh_recovery_prepare(struct dm_region_hash *rh); + +/* Try fetching a quiesced region for recovery. */ +struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh); + +/* Report recovery end on a region. */ +void dm_rh_recovery_end(struct dm_region *reg, int error); + +/* Returns number of regions with recovery work outstanding. */ +int dm_rh_recovery_in_flight(struct dm_region_hash *rh); + +/* Start/stop recovery. */ +void dm_rh_start_recovery(struct dm_region_hash *rh); +void dm_rh_stop_recovery(struct dm_region_hash *rh); + +#endif /* DM_REGION_HASH_H */ -- cgit v1.2.3 From f431d9666fd6e69fbaf305cebc7278d1428950c2 Mon Sep 17 00:00:00 2001 From: Kiyoshi Ueda Date: Tue, 21 Oct 2008 17:45:07 +0100 Subject: dm: remove unused flush_all This patch removes the DM_WQ_FLUSH_ALL state that is unnecessary. The dm_queue_flush(md, DM_WQ_FLUSH_ALL, NULL) in dm_suspend() is never invoked because: - 'goto flush_and_out' is the same as 'goto out' because the 'goto flush_and_out' is called only when '!noflush' - If r is non-zero, then the code above will invoke 'goto out' and skip this code. No functional change. Signed-off-by: Kiyoshi Ueda Signed-off-by: Jun'ichi Nomura Signed-off-by: Milan Broz Signed-off-by: Alasdair G Kergon --- drivers/md/dm.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 829d9fc6645..6938bfeb5e2 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -76,7 +76,6 @@ union map_info *dm_get_mapinfo(struct bio *bio) */ struct dm_wq_req { enum { - DM_WQ_FLUSH_ALL, DM_WQ_FLUSH_DEFERRED, } type; struct work_struct work; @@ -1395,9 +1394,6 @@ static void dm_wq_work(struct work_struct *work) down_write(&md->io_lock); switch (req->type) { - case DM_WQ_FLUSH_ALL: - __merge_pushback_list(md); - /* pass through */ case DM_WQ_FLUSH_DEFERRED: __flush_deferred_io(md); break; @@ -1527,7 +1523,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) if (!md->suspended_bdev) { DMWARN("bdget failed in dm_suspend"); r = -ENOMEM; - goto flush_and_out; + goto out; } /* @@ -1578,14 +1574,6 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) set_bit(DMF_SUSPENDED, &md->flags); -flush_and_out: - if (r && noflush) - /* - * Because there may be already I/Os in the pushback list, - * flush them before return. - */ - dm_queue_flush(md, DM_WQ_FLUSH_ALL, NULL); - out: if (r && md->suspended_bdev) { bdput(md->suspended_bdev); -- cgit v1.2.3 From 51157b4ab47e1376c2b93cb854b14b637efeaff2 Mon Sep 17 00:00:00 2001 From: Kiyoshi Ueda Date: Tue, 21 Oct 2008 17:45:08 +0100 Subject: dm: tidy local_init This patch tidies local_init() in preparation for request-based dm. No functional change. Signed-off-by: Kiyoshi Ueda Signed-off-by: Jun'ichi Nomura Signed-off-by: Alasdair G Kergon --- drivers/md/dm.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6938bfeb5e2..d1d0cd0f575 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -150,40 +150,40 @@ static struct kmem_cache *_tio_cache; static int __init local_init(void) { - int r; + int r = -ENOMEM; /* allocate a slab for the dm_ios */ _io_cache = KMEM_CACHE(dm_io, 0); if (!_io_cache) - return -ENOMEM; + return r; /* allocate a slab for the target ios */ _tio_cache = KMEM_CACHE(dm_target_io, 0); - if (!_tio_cache) { - kmem_cache_destroy(_io_cache); - return -ENOMEM; - } + if (!_tio_cache) + goto out_free_io_cache; r = dm_uevent_init(); - if (r) { - kmem_cache_destroy(_tio_cache); - kmem_cache_destroy(_io_cache); - return r; - } + if (r) + goto out_free_tio_cache; _major = major; r = register_blkdev(_major, _name); - if (r < 0) { - kmem_cache_destroy(_tio_cache); - kmem_cache_destroy(_io_cache); - dm_uevent_exit(); - return r; - } + if (r < 0) + goto out_uevent_exit; if (!_major) _major = r; return 0; + +out_uevent_exit: + dm_uevent_exit(); +out_free_tio_cache: + kmem_cache_destroy(_tio_cache); +out_free_io_cache: + kmem_cache_destroy(_io_cache); + + return r; } static void local_exit(void) -- cgit v1.2.3