diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 6 | ||||
-rw-r--r-- | lib/Kconfig.kmemcheck | 91 | ||||
-rw-r--r-- | lib/dec_and_lock.c | 3 | ||||
-rw-r--r-- | lib/genalloc.c | 1 | ||||
-rw-r--r-- | lib/hexdump.c | 15 | ||||
-rw-r--r-- | lib/kobject.c | 7 | ||||
-rw-r--r-- | lib/radix-tree.c | 110 | ||||
-rw-r--r-- | lib/rbtree.c | 34 |
8 files changed, 191 insertions, 76 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 116a35051be..6b0c2d8a212 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -300,7 +300,7 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT config DEBUG_SLAB bool "Debug slab memory allocations" - depends on DEBUG_KERNEL && SLAB + depends on DEBUG_KERNEL && SLAB && !KMEMCHECK help Say Y here to have the kernel do limited verification on memory allocation as well as poisoning memory on free to catch use of freed @@ -312,7 +312,7 @@ config DEBUG_SLAB_LEAK config SLUB_DEBUG_ON bool "SLUB debugging on by default" - depends on SLUB && SLUB_DEBUG + depends on SLUB && SLUB_DEBUG && !KMEMCHECK default n help Boot with debugging on by default. SLUB boots by default with @@ -996,3 +996,5 @@ config DMA_API_DEBUG source "samples/Kconfig" source "lib/Kconfig.kgdb" + +source "lib/Kconfig.kmemcheck" diff --git a/lib/Kconfig.kmemcheck b/lib/Kconfig.kmemcheck new file mode 100644 index 00000000000..603c81b6654 --- /dev/null +++ b/lib/Kconfig.kmemcheck @@ -0,0 +1,91 @@ +config HAVE_ARCH_KMEMCHECK + bool + +menuconfig KMEMCHECK + bool "kmemcheck: trap use of uninitialized memory" + depends on DEBUG_KERNEL + depends on !X86_USE_3DNOW + depends on SLUB || SLAB + depends on !CC_OPTIMIZE_FOR_SIZE + depends on !FUNCTION_TRACER + select FRAME_POINTER + select STACKTRACE + default n + help + This option enables tracing of dynamically allocated kernel memory + to see if memory is used before it has been given an initial value. + Be aware that this requires half of your memory for bookkeeping and + will insert extra code at *every* read and write to tracked memory + thus slow down the kernel code (but user code is unaffected). + + The kernel may be started with kmemcheck=0 or kmemcheck=1 to disable + or enable kmemcheck at boot-time. If the kernel is started with + kmemcheck=0, the large memory and CPU overhead is not incurred. + +choice + prompt "kmemcheck: default mode at boot" + depends on KMEMCHECK + default KMEMCHECK_ONESHOT_BY_DEFAULT + help + This option controls the default behaviour of kmemcheck when the + kernel boots and no kmemcheck= parameter is given. + +config KMEMCHECK_DISABLED_BY_DEFAULT + bool "disabled" + depends on KMEMCHECK + +config KMEMCHECK_ENABLED_BY_DEFAULT + bool "enabled" + depends on KMEMCHECK + +config KMEMCHECK_ONESHOT_BY_DEFAULT + bool "one-shot" + depends on KMEMCHECK + help + In one-shot mode, only the first error detected is reported before + kmemcheck is disabled. + +endchoice + +config KMEMCHECK_QUEUE_SIZE + int "kmemcheck: error queue size" + depends on KMEMCHECK + default 64 + help + Select the maximum number of errors to store in the queue. Since + errors can occur virtually anywhere and in any context, we need a + temporary storage area which is guarantueed not to generate any + other faults. The queue will be emptied as soon as a tasklet may + be scheduled. If the queue is full, new error reports will be + lost. + +config KMEMCHECK_SHADOW_COPY_SHIFT + int "kmemcheck: shadow copy size (5 => 32 bytes, 6 => 64 bytes)" + depends on KMEMCHECK + range 2 8 + default 5 + help + Select the number of shadow bytes to save along with each entry of + the queue. These bytes indicate what parts of an allocation are + initialized, uninitialized, etc. and will be displayed when an + error is detected to help the debugging of a particular problem. + +config KMEMCHECK_PARTIAL_OK + bool "kmemcheck: allow partially uninitialized memory" + depends on KMEMCHECK + default y + help + This option works around certain GCC optimizations that produce + 32-bit reads from 16-bit variables where the upper 16 bits are + thrown away afterwards. This may of course also hide some real + bugs. + +config KMEMCHECK_BITOPS_OK + bool "kmemcheck: allow bit-field manipulation" + depends on KMEMCHECK + default n + help + This option silences warnings that would be generated for bit-field + accesses where not all the bits are initialized at the same time. + This may also hide some real bugs. + diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c index a65c3145554..e73822aa6e9 100644 --- a/lib/dec_and_lock.c +++ b/lib/dec_and_lock.c @@ -19,11 +19,10 @@ */ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) { -#ifdef CONFIG_SMP /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ if (atomic_add_unless(atomic, -1, 1)) return 0; -#endif + /* Otherwise do it the slow way */ spin_lock(lock); if (atomic_dec_and_test(atomic)) diff --git a/lib/genalloc.c b/lib/genalloc.c index f6d276db2d5..eed2bdb865e 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -85,7 +85,6 @@ void gen_pool_destroy(struct gen_pool *pool) int bit, end_bit; - write_lock(&pool->lock); list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); list_del(&chunk->next_chunk); diff --git a/lib/hexdump.c b/lib/hexdump.c index f07c0db81d2..39af2560f76 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c @@ -65,7 +65,8 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, for (j = 0; j < ngroups; j++) lx += scnprintf(linebuf + lx, linebuflen - lx, - "%16.16llx ", (unsigned long long)*(ptr8 + j)); + "%s%16.16llx", j ? " " : "", + (unsigned long long)*(ptr8 + j)); ascii_column = 17 * ngroups + 2; break; } @@ -76,7 +77,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, for (j = 0; j < ngroups; j++) lx += scnprintf(linebuf + lx, linebuflen - lx, - "%8.8x ", *(ptr4 + j)); + "%s%8.8x", j ? " " : "", *(ptr4 + j)); ascii_column = 9 * ngroups + 2; break; } @@ -87,19 +88,21 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, for (j = 0; j < ngroups; j++) lx += scnprintf(linebuf + lx, linebuflen - lx, - "%4.4x ", *(ptr2 + j)); + "%s%4.4x", j ? " " : "", *(ptr2 + j)); ascii_column = 5 * ngroups + 2; break; } default: - for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen; - j++) { + for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { ch = ptr[j]; linebuf[lx++] = hex_asc_hi(ch); linebuf[lx++] = hex_asc_lo(ch); linebuf[lx++] = ' '; } + if (j) + lx--; + ascii_column = 3 * rowsize + 2; break; } @@ -108,7 +111,7 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) linebuf[lx++] = ' '; - for (j = 0; (j < rowsize) && (j < len) && (lx + 2) < linebuflen; j++) + for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] : '.'; nil: diff --git a/lib/kobject.c b/lib/kobject.c index bacf6fe4f7a..b512b746d2a 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -793,11 +793,16 @@ static struct kset *kset_create(const char *name, struct kobject *parent_kobj) { struct kset *kset; + int retval; kset = kzalloc(sizeof(*kset), GFP_KERNEL); if (!kset) return NULL; - kobject_set_name(&kset->kobj, name); + retval = kobject_set_name(&kset->kobj, name); + if (retval) { + kfree(kset); + return NULL; + } kset->uevent_ops = uevent_ops; kset->kobj.parent = parent_kobj; diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 4bb42a0344e..23abbd93cae 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -351,20 +351,12 @@ int radix_tree_insert(struct radix_tree_root *root, } EXPORT_SYMBOL(radix_tree_insert); -/** - * radix_tree_lookup_slot - lookup a slot in a radix tree - * @root: radix tree root - * @index: index key - * - * Returns: the slot corresponding to the position @index in the - * radix tree @root. This is useful for update-if-exists operations. - * - * This function can be called under rcu_read_lock iff the slot is not - * modified by radix_tree_replace_slot, otherwise it must be called - * exclusive from other writers. Any dereference of the slot must be done - * using radix_tree_deref_slot. +/* + * is_slot == 1 : search for the slot. + * is_slot == 0 : search for the node. */ -void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) +static void *radix_tree_lookup_element(struct radix_tree_root *root, + unsigned long index, int is_slot) { unsigned int height, shift; struct radix_tree_node *node, **slot; @@ -376,7 +368,7 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) if (!radix_tree_is_indirect_ptr(node)) { if (index > 0) return NULL; - return (void **)&root->rnode; + return is_slot ? (void *)&root->rnode : node; } node = radix_tree_indirect_to_ptr(node); @@ -397,7 +389,25 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) height--; } while (height > 0); - return (void **)slot; + return is_slot ? (void *)slot:node; +} + +/** + * radix_tree_lookup_slot - lookup a slot in a radix tree + * @root: radix tree root + * @index: index key + * + * Returns: the slot corresponding to the position @index in the + * radix tree @root. This is useful for update-if-exists operations. + * + * This function can be called under rcu_read_lock iff the slot is not + * modified by radix_tree_replace_slot, otherwise it must be called + * exclusive from other writers. Any dereference of the slot must be done + * using radix_tree_deref_slot. + */ +void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) +{ + return (void **)radix_tree_lookup_element(root, index, 1); } EXPORT_SYMBOL(radix_tree_lookup_slot); @@ -415,38 +425,7 @@ EXPORT_SYMBOL(radix_tree_lookup_slot); */ void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) { - unsigned int height, shift; - struct radix_tree_node *node, **slot; - - node = rcu_dereference(root->rnode); - if (node == NULL) - return NULL; - - if (!radix_tree_is_indirect_ptr(node)) { - if (index > 0) - return NULL; - return node; - } - node = radix_tree_indirect_to_ptr(node); - - height = node->height; - if (index > radix_tree_maxindex(height)) - return NULL; - - shift = (height-1) * RADIX_TREE_MAP_SHIFT; - - do { - slot = (struct radix_tree_node **) - (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); - node = rcu_dereference(*slot); - if (node == NULL) - return NULL; - - shift -= RADIX_TREE_MAP_SHIFT; - height--; - } while (height > 0); - - return node; + return radix_tree_lookup_element(root, index, 0); } EXPORT_SYMBOL(radix_tree_lookup); @@ -666,6 +645,43 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root, } EXPORT_SYMBOL(radix_tree_next_hole); +/** + * radix_tree_prev_hole - find the prev hole (not-present entry) + * @root: tree root + * @index: index key + * @max_scan: maximum range to search + * + * Search backwards in the range [max(index-max_scan+1, 0), index] + * for the first hole. + * + * Returns: the index of the hole if found, otherwise returns an index + * outside of the set specified (in which case 'index - return >= max_scan' + * will be true). In rare cases of wrap-around, LONG_MAX will be returned. + * + * radix_tree_next_hole may be called under rcu_read_lock. However, like + * radix_tree_gang_lookup, this will not atomically search a snapshot of + * the tree at a single point in time. For example, if a hole is created + * at index 10, then subsequently a hole is created at index 5, + * radix_tree_prev_hole covering both indexes may return 5 if called under + * rcu_read_lock. + */ +unsigned long radix_tree_prev_hole(struct radix_tree_root *root, + unsigned long index, unsigned long max_scan) +{ + unsigned long i; + + for (i = 0; i < max_scan; i++) { + if (!radix_tree_lookup(root, index)) + break; + index--; + if (index == LONG_MAX) + break; + } + + return index; +} +EXPORT_SYMBOL(radix_tree_prev_hole); + static unsigned int __lookup(struct radix_tree_node *slot, void ***results, unsigned long index, unsigned int max_items, unsigned long *next_index) diff --git a/lib/rbtree.c b/lib/rbtree.c index f653659e0bc..e2aa3be2985 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -231,34 +231,34 @@ void rb_erase(struct rb_node *node, struct rb_root *root) node = node->rb_right; while ((left = node->rb_left) != NULL) node = left; + + if (rb_parent(old)) { + if (rb_parent(old)->rb_left == old) + rb_parent(old)->rb_left = node; + else + rb_parent(old)->rb_right = node; + } else + root->rb_node = node; + child = node->rb_right; parent = rb_parent(node); color = rb_color(node); - if (child) - rb_set_parent(child, parent); if (parent == old) { - parent->rb_right = child; parent = node; - } else + } else { + if (child) + rb_set_parent(child, parent); parent->rb_left = child; + node->rb_right = old->rb_right; + rb_set_parent(old->rb_right, node); + } + node->rb_parent_color = old->rb_parent_color; - node->rb_right = old->rb_right; node->rb_left = old->rb_left; - - if (rb_parent(old)) - { - if (rb_parent(old)->rb_left == old) - rb_parent(old)->rb_left = node; - else - rb_parent(old)->rb_right = node; - } else - root->rb_node = node; - rb_set_parent(old->rb_left, node); - if (old->rb_right) - rb_set_parent(old->rb_right, node); + goto color; } |