From 8d2d4011f1398d984819c65043abb559c451a3c8 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sun, 22 Jul 2007 22:32:51 +0300 Subject: UBI: add more prints I hit those situations and found out lack of print messages. Add more prints when erase problems occur. Signed-off-by: Artem Bityutskiy --- drivers/mtd/ubi/wl.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/mtd/ubi/wl.c') diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index a5a9b8d8730..12b25e5b7b8 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -1093,6 +1093,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, return err; } + ubi_err("failed to erase PEB %d, error %d", pnum, err); kfree(wl_wrk); kmem_cache_free(wl_entries_slab, e); -- cgit v1.2.3 From 33818bbb84cd371b63ed8849cc5264d24c8b3aa2 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 28 Aug 2007 21:29:32 +0300 Subject: UBI: allocate memory with GFP_NOFS Use GFP_NOFS flag when allocating memory on I/O path, because otherwise we may deadlock the filesystem which works on top of us. We observed the deadlocks with UBIFS. Example: VFS->FS lock a lock->UBI->kmalloc()->VFS writeback->FS locks the same lock again. Signed-off-by: Artem Bityutskiy --- drivers/mtd/ubi/wl.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/mtd/ubi/wl.c') diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 12b25e5b7b8..248ff9e477e 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -508,7 +508,7 @@ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || dtype == UBI_UNKNOWN); - pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_KERNEL); + pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); if (!pe) return -ENOMEM; @@ -645,7 +645,7 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int tortur if (err > 0) return -EINVAL; - ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); + ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); if (!ec_hdr) return -ENOMEM; @@ -768,7 +768,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", e->pnum, e->ec, torture); - wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL); + wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); if (!wl_wrk) return -ENOMEM; @@ -802,7 +802,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, if (cancel) return 0; - vid_hdr = ubi_zalloc_vid_hdr(ubi); + vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); if (!vid_hdr) return -ENOMEM; @@ -1028,7 +1028,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi) ubi->wl_scheduled = 1; spin_unlock(&ubi->wl_lock); - wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL); + wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); if (!wrk) { err = -ENOMEM; goto out_cancel; @@ -1631,7 +1631,7 @@ static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec) long long read_ec; struct ubi_ec_hdr *ec_hdr; - ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); + ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); if (!ec_hdr) return -ENOMEM; -- cgit v1.2.3 From e88d6e10e5c848fd5be8f89e09e3bce2570886b7 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Wed, 29 Aug 2007 14:51:52 +0300 Subject: UBI: do not use vmalloc on I/O path Similar reason as in case of the previous patch: it causes deadlocks if a filesystem with writeback support works on top of UBI. So pre-allocate needed buffers when attaching MTD device. We also need mutexes to protect the buffers, but they do not cause much contantion because they are used in recovery, torture, and WL copy routines, which are called seldom. Signed-off-by: Artem Bityutskiy --- drivers/mtd/ubi/wl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/mtd/ubi/wl.c') diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 248ff9e477e..6e5315bf5e1 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -208,7 +208,7 @@ struct ubi_work { }; #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID -static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec); +static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec); static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root); #else @@ -1625,7 +1625,7 @@ void ubi_wl_close(struct ubi_device *ubi) * is equivalent to @ec, %1 if not, and a negative error code if an error * occurred. */ -static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec) +static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec) { int err; long long read_ec; -- cgit v1.2.3 From 5abde384e350e44e9d0584238b9ee327f2062f93 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Thu, 13 Sep 2007 14:48:20 +0300 Subject: UBI: remove useless inlines Signed-off-by: Artem Bityutskiy --- drivers/mtd/ubi/wl.c | 119 ++++++++++++++++----------------------------------- 1 file changed, 38 insertions(+), 81 deletions(-) (limited to 'drivers/mtd/ubi/wl.c') diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 6e5315bf5e1..a4f1bf33164 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -219,17 +219,6 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, /* Slab cache for wear-leveling entries */ static struct kmem_cache *wl_entries_slab; -/** - * tree_empty - a helper function to check if an RB-tree is empty. - * @root: the root of the tree - * - * This function returns non-zero if the RB-tree is empty and zero if not. - */ -static inline int tree_empty(struct rb_root *root) -{ - return root->rb_node == NULL; -} - /** * wl_tree_add - add a wear-leveling entry to a WL RB-tree. * @e: the wear-leveling entry to add @@ -266,45 +255,6 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) rb_insert_color(&e->rb, root); } - -/* - * Helper functions to add and delete wear-leveling entries from different - * trees. - */ - -static void free_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e) -{ - wl_tree_add(e, &ubi->free); -} -static inline void used_tree_add(struct ubi_device *ubi, - struct ubi_wl_entry *e) -{ - wl_tree_add(e, &ubi->used); -} -static inline void scrub_tree_add(struct ubi_device *ubi, - struct ubi_wl_entry *e) -{ - wl_tree_add(e, &ubi->scrub); -} -static inline void free_tree_del(struct ubi_device *ubi, - struct ubi_wl_entry *e) -{ - paranoid_check_in_wl_tree(e, &ubi->free); - rb_erase(&e->rb, &ubi->free); -} -static inline void used_tree_del(struct ubi_device *ubi, - struct ubi_wl_entry *e) -{ - paranoid_check_in_wl_tree(e, &ubi->used); - rb_erase(&e->rb, &ubi->used); -} -static inline void scrub_tree_del(struct ubi_device *ubi, - struct ubi_wl_entry *e) -{ - paranoid_check_in_wl_tree(e, &ubi->scrub); - rb_erase(&e->rb, &ubi->scrub); -} - /** * do_work - do one pending work. * @ubi: UBI device description object @@ -358,7 +308,7 @@ static int produce_free_peb(struct ubi_device *ubi) int err; spin_lock(&ubi->wl_lock); - while (tree_empty(&ubi->free)) { + while (!ubi->free.rb_node) { spin_unlock(&ubi->wl_lock); dbg_wl("do one work synchronously"); @@ -514,7 +464,7 @@ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) retry: spin_lock(&ubi->wl_lock); - if (tree_empty(&ubi->free)) { + if (!ubi->free.rb_node) { if (ubi->works_count == 0) { ubi_assert(list_empty(&ubi->works)); ubi_err("no free eraseblocks"); @@ -585,7 +535,8 @@ retry: * Move the physical eraseblock to the protection trees where it will * be protected from being moved for some time. */ - free_tree_del(ubi, e); + paranoid_check_in_wl_tree(e, &ubi->free); + rb_erase(&e->rb, &ubi->free); prot_tree_add(ubi, e, pe, protect); dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect); @@ -704,7 +655,7 @@ static void check_protection_over(struct ubi_device *ubi) */ while (1) { spin_lock(&ubi->wl_lock); - if (tree_empty(&ubi->prot.aec)) { + if (!ubi->prot.aec.rb_node) { spin_unlock(&ubi->wl_lock); break; } @@ -721,7 +672,7 @@ static void check_protection_over(struct ubi_device *ubi) pe->e->pnum, ubi->abs_ec, pe->abs_ec); rb_erase(&pe->rb_aec, &ubi->prot.aec); rb_erase(&pe->rb_pnum, &ubi->prot.pnum); - used_tree_add(ubi, pe->e); + wl_tree_add(pe->e, &ubi->used); spin_unlock(&ubi->wl_lock); kfree(pe); @@ -812,8 +763,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, * Only one WL worker at a time is supported at this implementation, so * make sure a PEB is not being moved already. */ - if (ubi->move_to || tree_empty(&ubi->free) || - (tree_empty(&ubi->used) && tree_empty(&ubi->scrub))) { + if (ubi->move_to || !ubi->free.rb_node || + (!ubi->used.rb_node && !ubi->scrub.rb_node)) { /* * Only one WL worker at a time is supported at this * implementation, so if a LEB is already being moved, cancel. @@ -828,14 +779,14 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, * triggered again. */ dbg_wl("cancel WL, a list is empty: free %d, used %d", - tree_empty(&ubi->free), tree_empty(&ubi->used)); + !ubi->free.rb_node, !ubi->used.rb_node); ubi->wl_scheduled = 0; spin_unlock(&ubi->wl_lock); ubi_free_vid_hdr(ubi, vid_hdr); return 0; } - if (tree_empty(&ubi->scrub)) { + if (!ubi->scrub.rb_node) { /* * Now pick the least worn-out used physical eraseblock and a * highly worn-out free physical eraseblock. If the erase @@ -852,17 +803,20 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, ubi_free_vid_hdr(ubi, vid_hdr); return 0; } - used_tree_del(ubi, e1); + paranoid_check_in_wl_tree(e1, &ubi->used); + rb_erase(&e1->rb, &ubi->used); dbg_wl("move PEB %d EC %d to PEB %d EC %d", e1->pnum, e1->ec, e2->pnum, e2->ec); } else { e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); - scrub_tree_del(ubi, e1); + paranoid_check_in_wl_tree(e1, &ubi->scrub); + rb_erase(&e1->rb, &ubi->scrub); dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); } - free_tree_del(ubi, e2); + paranoid_check_in_wl_tree(e2, &ubi->free); + rb_erase(&e2->rb, &ubi->free); ubi_assert(!ubi->move_from && !ubi->move_to); ubi_assert(!ubi->move_to_put && !ubi->move_from_put); ubi->move_from = e1; @@ -908,7 +862,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, ubi_free_vid_hdr(ubi, vid_hdr); spin_lock(&ubi->wl_lock); if (!ubi->move_to_put) - used_tree_add(ubi, e2); + wl_tree_add(e2, &ubi->used); else put = 1; ubi->move_from = ubi->move_to = NULL; @@ -953,7 +907,7 @@ error: if (ubi->move_from_put) put = 1; else - used_tree_add(ubi, e1); + wl_tree_add(e1, &ubi->used); ubi->move_from = ubi->move_to = NULL; ubi->move_from_put = ubi->move_to_put = 0; spin_unlock(&ubi->wl_lock); @@ -1005,8 +959,8 @@ static int ensure_wear_leveling(struct ubi_device *ubi) * If the ubi->scrub tree is not empty, scrubbing is needed, and the * the WL worker has to be scheduled anyway. */ - if (tree_empty(&ubi->scrub)) { - if (tree_empty(&ubi->used) || tree_empty(&ubi->free)) + if (!ubi->scrub.rb_node) { + if (!ubi->used.rb_node || !ubi->free.rb_node) /* No physical eraseblocks - no deal */ goto out_unlock; @@ -1079,7 +1033,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, spin_lock(&ubi->wl_lock); ubi->abs_ec += 1; - free_tree_add(ubi, e); + wl_tree_add(e, &ubi->free); spin_unlock(&ubi->wl_lock); /* @@ -1212,11 +1166,13 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) spin_unlock(&ubi->wl_lock); return 0; } else { - if (in_wl_tree(e, &ubi->used)) - used_tree_del(ubi, e); - else if (in_wl_tree(e, &ubi->scrub)) - scrub_tree_del(ubi, e); - else + if (in_wl_tree(e, &ubi->used)) { + paranoid_check_in_wl_tree(e, &ubi->used); + rb_erase(&e->rb, &ubi->used); + } else if (in_wl_tree(e, &ubi->scrub)) { + paranoid_check_in_wl_tree(e, &ubi->scrub); + rb_erase(&e->rb, &ubi->scrub); + } else prot_tree_del(ubi, e->pnum); } spin_unlock(&ubi->wl_lock); @@ -1224,7 +1180,7 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) err = schedule_erase(ubi, e, torture); if (err) { spin_lock(&ubi->wl_lock); - used_tree_add(ubi, e); + wl_tree_add(e, &ubi->used); spin_unlock(&ubi->wl_lock); } @@ -1268,12 +1224,13 @@ retry: goto retry; } - if (in_wl_tree(e, &ubi->used)) - used_tree_del(ubi, e); - else + if (in_wl_tree(e, &ubi->used)) { + paranoid_check_in_wl_tree(e, &ubi->used); + rb_erase(&e->rb, &ubi->used); + } else prot_tree_del(ubi, pnum); - scrub_tree_add(ubi, e); + wl_tree_add(e, &ubi->scrub); spin_unlock(&ubi->wl_lock); /* @@ -1489,7 +1446,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) e->pnum = seb->pnum; e->ec = seb->ec; ubi_assert(e->ec >= 0); - free_tree_add(ubi, e); + wl_tree_add(e, &ubi->free); ubi->lookuptbl[e->pnum] = e; } @@ -1523,16 +1480,16 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) if (!seb->scrub) { dbg_wl("add PEB %d EC %d to the used tree", e->pnum, e->ec); - used_tree_add(ubi, e); + wl_tree_add(e, &ubi->used); } else { dbg_wl("add PEB %d EC %d to the scrub tree", e->pnum, e->ec); - scrub_tree_add(ubi, e); + wl_tree_add(e, &ubi->scrub); } } } - if (WL_RESERVED_PEBS > ubi->avail_pebs) { + if (ubi->avail_pebs < WL_RESERVED_PEBS) { ubi_err("no enough physical eraseblocks (%d, need %d)", ubi->avail_pebs, WL_RESERVED_PEBS); goto out_free; -- cgit v1.2.3