From acc6be5405b90c9f0fb0eb8a74ec4d4b7b5bf48f Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Tue, 20 May 2008 11:15:43 +0200 Subject: x86: add save_stack_trace_bp() for tracing from a specific stack frame This will help kmemcheck (and possibly other debugging tools) since we can now simply pass regs->bp to the stack tracer instead of specifying the number of stack frames to skip, which is unreliable if gcc decides to inline functions, etc. Note that this makes the API incomplete for other architectures, but I expect that those can be updated lazily, e.g. when they need it. Cc: Arjan van de Ven Signed-off-by: Vegard Nossum --- include/linux/stacktrace.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 1a8cecc4f38..551f6c7d504 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -11,6 +11,7 @@ struct stack_trace { }; extern void save_stack_trace(struct stack_trace *trace); +extern void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp); extern void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace); -- cgit v1.2.3 From b618ad31bb2020db6a36929122e5554e33210d47 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Fri, 13 Jun 2008 15:31:11 +0200 Subject: stacktrace: add forward-declaration struct task_struct This is needed if the header is to be free-standing. Signed-off-by: Vegard Nossum --- include/linux/stacktrace.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 551f6c7d504..51efbef38fb 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -4,6 +4,8 @@ struct task_struct; #ifdef CONFIG_STACKTRACE +struct task_struct; + struct stack_trace { unsigned int nr_entries, max_entries; unsigned long *entries; -- cgit v1.2.3 From 8eae985f08138758e06503588f5f1196269bc415 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Fri, 9 May 2008 20:32:44 +0200 Subject: slab: move struct kmem_cache to headers Move the SLAB struct kmem_cache definition to like with SLUB so kmemcheck can access ->ctor and ->flags. Cc: Ingo Molnar Cc: Christoph Lameter Cc: Andrew Morton Signed-off-by: Pekka Enberg [rebased for mainline inclusion] Signed-off-by: Vegard Nossum --- include/linux/slab_def.h | 81 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) (limited to 'include/linux') diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 713f841ecaa..850d057500d 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -16,6 +16,87 @@ #include #include +/* + * struct kmem_cache + * + * manages a cache. + */ + +struct kmem_cache { +/* 1) per-cpu data, touched during every alloc/free */ + struct array_cache *array[NR_CPUS]; +/* 2) Cache tunables. Protected by cache_chain_mutex */ + unsigned int batchcount; + unsigned int limit; + unsigned int shared; + + unsigned int buffer_size; + u32 reciprocal_buffer_size; +/* 3) touched by every alloc & free from the backend */ + + unsigned int flags; /* constant flags */ + unsigned int num; /* # of objs per slab */ + +/* 4) cache_grow/shrink */ + /* order of pgs per slab (2^n) */ + unsigned int gfporder; + + /* force GFP flags, e.g. GFP_DMA */ + gfp_t gfpflags; + + size_t colour; /* cache colouring range */ + unsigned int colour_off; /* colour offset */ + struct kmem_cache *slabp_cache; + unsigned int slab_size; + unsigned int dflags; /* dynamic flags */ + + /* constructor func */ + void (*ctor)(void *obj); + +/* 5) cache creation/removal */ + const char *name; + struct list_head next; + +/* 6) statistics */ +#ifdef CONFIG_DEBUG_SLAB + unsigned long num_active; + unsigned long num_allocations; + unsigned long high_mark; + unsigned long grown; + unsigned long reaped; + unsigned long errors; + unsigned long max_freeable; + unsigned long node_allocs; + unsigned long node_frees; + unsigned long node_overflow; + atomic_t allochit; + atomic_t allocmiss; + atomic_t freehit; + atomic_t freemiss; + + /* + * If debugging is enabled, then the allocator can add additional + * fields and/or padding to every object. buffer_size contains the total + * object size including these internal fields, the following two + * variables contain the offset to the user object and its size. + */ + int obj_offset; + int obj_size; +#endif /* CONFIG_DEBUG_SLAB */ + + /* + * We put nodelists[] at the end of kmem_cache, because we want to size + * this array to nr_node_ids slots instead of MAX_NUMNODES + * (see kmem_cache_init()) + * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache + * is statically defined, so we reserve the max number of nodes. + */ + struct kmem_list3 *nodelists[MAX_NUMNODES]; + /* + * Do not add fields after nodelists[] + */ +}; + /* Size description struct for general caches. */ struct cache_sizes { size_t cs_size; -- cgit v1.2.3 From 7c692cbade8b8884f1c20500393bcc7cd6d24ef8 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Wed, 21 May 2008 22:53:13 +0200 Subject: tasklets: new tasklet scheduling function Rationale: kmemcheck needs to be able to schedule a tasklet without touching any dynamically allocated memory _at_ _all_ (since that would lead to a recursive page fault). This tasklet is used for writing the error reports to the kernel log. The new scheduling function avoids touching any other tasklets by inserting the new tasklist as the head of the "tasklet_hi" list instead of on the tail. Also don't wake up the softirq thread lest the scheduler access some tracked memory and we go down with a recursive page fault. In this case, we'd better just wait for the maximum time of 1/HZ for the message to appear. Signed-off-by: Vegard Nossum --- include/linux/interrupt.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'include/linux') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index ff374ceface..dd574d51bca 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -466,6 +466,20 @@ static inline void tasklet_hi_schedule(struct tasklet_struct *t) __tasklet_hi_schedule(t); } +extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); + +/* + * This version avoids touching any other tasklets. Needed for kmemcheck + * in order not to take any page faults while enqueueing this tasklet; + * consider VERY carefully whether you really need this or + * tasklet_hi_schedule()... + */ +static inline void tasklet_hi_schedule_first(struct tasklet_struct *t) +{ + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) + __tasklet_hi_schedule_first(t); +} + static inline void tasklet_disable_nosync(struct tasklet_struct *t) { -- cgit v1.2.3 From dfec072ecd35ba6ecad2d51dde325253ac9a2936 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Fri, 4 Apr 2008 00:51:41 +0200 Subject: kmemcheck: add the kmemcheck core General description: kmemcheck is a patch to the linux kernel that detects use of uninitialized memory. It does this by trapping every read and write to memory that was allocated dynamically (e.g. using kmalloc()). If a memory address is read that has not previously been written to, a message is printed to the kernel log. Thanks to Andi Kleen for the set_memory_4k() solution. Andrew Morton suggested documenting the shadow member of struct page. Signed-off-by: Vegard Nossum Signed-off-by: Pekka Enberg [export kmemcheck_mark_initialized] [build fix for setup_max_cpus] Signed-off-by: Ingo Molnar [rebased for mainline inclusion] Signed-off-by: Vegard Nossum --- include/linux/kmemcheck.h | 17 +++++++++++++++++ include/linux/mm_types.h | 8 ++++++++ 2 files changed, 25 insertions(+) create mode 100644 include/linux/kmemcheck.h (limited to 'include/linux') diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h new file mode 100644 index 00000000000..39480c91b2f --- /dev/null +++ b/include/linux/kmemcheck.h @@ -0,0 +1,17 @@ +#ifndef LINUX_KMEMCHECK_H +#define LINUX_KMEMCHECK_H + +#include +#include + +#ifdef CONFIG_KMEMCHECK +extern int kmemcheck_enabled; + +int kmemcheck_show_addr(unsigned long address); +int kmemcheck_hide_addr(unsigned long address); +#else +#define kmemcheck_enabled 0 + +#endif /* CONFIG_KMEMCHECK */ + +#endif /* LINUX_KMEMCHECK_H */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 0e80e26ecf2..0042090a4d7 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -98,6 +98,14 @@ struct page { #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS unsigned long debug_flags; /* Use atomic bitops on this */ #endif + +#ifdef CONFIG_KMEMCHECK + /* + * kmemcheck wants to track the status of each byte in a page; this + * is a pointer to such a status block. NULL if not tracked. + */ + void *shadow; +#endif }; /* -- cgit v1.2.3 From 2dff440525f8faba8836e9f05297b76f23b4af30 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sat, 31 May 2008 15:56:17 +0200 Subject: kmemcheck: add mm functions With kmemcheck enabled, the slab allocator needs to do this: 1. Tell kmemcheck to allocate the shadow memory which stores the status of each byte in the allocation proper, e.g. whether it is initialized or uninitialized. 2. Tell kmemcheck which parts of memory that should be marked uninitialized. There are actually a few more states, such as "not yet allocated" and "recently freed". If a slab cache is set up using the SLAB_NOTRACK flag, it will never return memory that can take page faults because of kmemcheck. If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still request memory with the __GFP_NOTRACK flag. This does not prevent the page faults from occuring, however, but marks the object in question as being initialized so that no warnings will ever be produced for this object. In addition to (and in contrast to) __GFP_NOTRACK, the __GFP_NOTRACK_FALSE_POSITIVE flag indicates that the allocation should not be tracked _because_ it would produce a false positive. Their values are identical, but need not be so in the future (for example, we could now enable/disable false positives with a config option). Parts of this patch were contributed by Pekka Enberg but merged for atomicity. Signed-off-by: Vegard Nossum Signed-off-by: Pekka Enberg Signed-off-by: Ingo Molnar [rebased for mainline inclusion] Signed-off-by: Vegard Nossum --- include/linux/gfp.h | 9 ++++++++- include/linux/kmemcheck.h | 47 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/slab.h | 7 +++++++ 3 files changed, 62 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0bbc15f5453..daeaa8fe1bb 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -51,8 +51,15 @@ struct vm_area_struct; #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ #define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */ +#define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */ -#define __GFP_BITS_SHIFT 21 /* Room for 21 __GFP_FOO bits */ +/* + * This may seem redundant, but it's a way of annotating false positives vs. + * allocations that simply cannot be supported (e.g. page tables). + */ +#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) + +#define __GFP_BITS_SHIFT 22 /* Room for 22 __GFP_FOO bits */ #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /* This equals 0, but use constants in case they ever change */ diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h index 39480c91b2f..5b65f4ebead 100644 --- a/include/linux/kmemcheck.h +++ b/include/linux/kmemcheck.h @@ -7,11 +7,58 @@ #ifdef CONFIG_KMEMCHECK extern int kmemcheck_enabled; +/* The slab-related functions. */ +void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, + struct page *page, int order); +void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order); +void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, + size_t size); +void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size); + +void kmemcheck_show_pages(struct page *p, unsigned int n); +void kmemcheck_hide_pages(struct page *p, unsigned int n); + +bool kmemcheck_page_is_tracked(struct page *p); + +void kmemcheck_mark_unallocated(void *address, unsigned int n); +void kmemcheck_mark_uninitialized(void *address, unsigned int n); +void kmemcheck_mark_initialized(void *address, unsigned int n); +void kmemcheck_mark_freed(void *address, unsigned int n); + +void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n); +void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n); + int kmemcheck_show_addr(unsigned long address); int kmemcheck_hide_addr(unsigned long address); #else #define kmemcheck_enabled 0 +static inline void +kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, + struct page *page, int order) +{ +} + +static inline void +kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order) +{ +} + +static inline void +kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, + size_t size) +{ +} + +static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object, + size_t size) +{ +} + +static inline bool kmemcheck_page_is_tracked(struct page *p) +{ + return false; +} #endif /* CONFIG_KMEMCHECK */ #endif /* LINUX_KMEMCHECK_H */ diff --git a/include/linux/slab.h b/include/linux/slab.h index 48803064ced..e339fcf17cd 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -64,6 +64,13 @@ #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ +/* Don't track use of uninitialized memory */ +#ifdef CONFIG_KMEMCHECK +# define SLAB_NOTRACK 0x01000000UL +#else +# define SLAB_NOTRACK 0x00000000UL +#endif + /* The following flags affect the page allocator grouping pages by mobility */ #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ -- cgit v1.2.3 From d7002857dee6e9a3ce1f78d23f37caba106b29c5 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sun, 20 Jul 2008 10:44:54 +0200 Subject: kmemcheck: add DMA hooks This patch hooks into the DMA API to prevent the reporting of the false positives that would otherwise be reported when memory is accessed that is also used directly by devices. [rebased for mainline inclusion] Signed-off-by: Vegard Nossum --- include/linux/kmemcheck.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'include/linux') diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h index 5b65f4ebead..71f21ae33d1 100644 --- a/include/linux/kmemcheck.h +++ b/include/linux/kmemcheck.h @@ -59,6 +59,22 @@ static inline bool kmemcheck_page_is_tracked(struct page *p) { return false; } + +static inline void kmemcheck_mark_unallocated(void *address, unsigned int n) +{ +} + +static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n) +{ +} + +static inline void kmemcheck_mark_initialized(void *address, unsigned int n) +{ +} + +static inline void kmemcheck_mark_freed(void *address, unsigned int n) +{ +} #endif /* CONFIG_KMEMCHECK */ #endif /* LINUX_KMEMCHECK_H */ -- cgit v1.2.3 From b1eeab67682a5e397aecf172046b3a8bd4808ae4 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Tue, 25 Nov 2008 16:55:53 +0100 Subject: kmemcheck: add hooks for the page allocator This adds support for tracking the initializedness of memory that was allocated with the page allocator. Highmem requests are not tracked. Cc: Dave Hansen Acked-by: Pekka Enberg [build fix for !CONFIG_KMEMCHECK] Signed-off-by: Ingo Molnar [rebased for mainline inclusion] Signed-off-by: Vegard Nossum --- include/linux/gfp.h | 5 +++++ include/linux/kmemcheck.h | 35 +++++++++++++++++++++++++++++------ 2 files changed, 34 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index daeaa8fe1bb..3885e7f7556 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -51,7 +51,12 @@ struct vm_area_struct; #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ #define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */ + +#ifdef CONFIG_KMEMCHECK #define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */ +#else +#define __GFP_NOTRACK ((__force gfp_t)0) +#endif /* * This may seem redundant, but it's a way of annotating false positives vs. diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h index 71f21ae33d1..093d23969b1 100644 --- a/include/linux/kmemcheck.h +++ b/include/linux/kmemcheck.h @@ -8,13 +8,15 @@ extern int kmemcheck_enabled; /* The slab-related functions. */ -void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, - struct page *page, int order); -void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order); +void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node); +void kmemcheck_free_shadow(struct page *page, int order); void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, size_t size); void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size); +void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order, + gfp_t gfpflags); + void kmemcheck_show_pages(struct page *p, unsigned int n); void kmemcheck_hide_pages(struct page *p, unsigned int n); @@ -27,6 +29,7 @@ void kmemcheck_mark_freed(void *address, unsigned int n); void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n); void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n); +void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n); int kmemcheck_show_addr(unsigned long address); int kmemcheck_hide_addr(unsigned long address); @@ -34,13 +37,12 @@ int kmemcheck_hide_addr(unsigned long address); #define kmemcheck_enabled 0 static inline void -kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, - struct page *page, int order) +kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) { } static inline void -kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order) +kmemcheck_free_shadow(struct page *page, int order) { } @@ -55,6 +57,11 @@ static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object, { } +static inline void kmemcheck_pagealloc_alloc(struct page *p, + unsigned int order, gfp_t gfpflags) +{ +} + static inline bool kmemcheck_page_is_tracked(struct page *p) { return false; @@ -75,6 +82,22 @@ static inline void kmemcheck_mark_initialized(void *address, unsigned int n) static inline void kmemcheck_mark_freed(void *address, unsigned int n) { } + +static inline void kmemcheck_mark_unallocated_pages(struct page *p, + unsigned int n) +{ +} + +static inline void kmemcheck_mark_uninitialized_pages(struct page *p, + unsigned int n) +{ +} + +static inline void kmemcheck_mark_initialized_pages(struct page *p, + unsigned int n) +{ +} + #endif /* CONFIG_KMEMCHECK */ #endif /* LINUX_KMEMCHECK_H */ -- cgit v1.2.3 From fc7d0c9f2122e8bf58deaf1252b0e750df5b0e91 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sat, 30 Aug 2008 12:16:05 +0200 Subject: kmemcheck: introduce bitfield API Add the bitfield API which can be used to annotate bitfields in structs and get rid of false positive reports. According to Al Viro, the syntax we were using (putting #ifdef inside macro arguments) was not valid C. He also suggested using begin/end markers instead, which is what we do now. [rebased for mainline inclusion] Signed-off-by: Vegard Nossum --- include/linux/kmemcheck.h | 50 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) (limited to 'include/linux') diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h index 093d23969b1..47b39b7c7e8 100644 --- a/include/linux/kmemcheck.h +++ b/include/linux/kmemcheck.h @@ -33,6 +33,7 @@ void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n); int kmemcheck_show_addr(unsigned long address); int kmemcheck_hide_addr(unsigned long address); + #else #define kmemcheck_enabled 0 @@ -100,4 +101,53 @@ static inline void kmemcheck_mark_initialized_pages(struct page *p, #endif /* CONFIG_KMEMCHECK */ +/* + * Bitfield annotations + * + * How to use: If you have a struct using bitfields, for example + * + * struct a { + * int x:8, y:8; + * }; + * + * then this should be rewritten as + * + * struct a { + * kmemcheck_bitfield_begin(flags); + * int x:8, y:8; + * kmemcheck_bitfield_end(flags); + * }; + * + * Now the "flags_begin" and "flags_end" members may be used to refer to the + * beginning and end, respectively, of the bitfield (and things like + * &x.flags_begin is allowed). As soon as the struct is allocated, the bit- + * fields should be annotated: + * + * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL); + * kmemcheck_annotate_bitfield(a, flags); + * + * Note: We provide the same definitions for both kmemcheck and non- + * kmemcheck kernels. This makes it harder to introduce accidental errors. It + * is also allowed to pass NULL pointers to kmemcheck_annotate_bitfield(). + */ +#define kmemcheck_bitfield_begin(name) \ + int name##_begin[0]; + +#define kmemcheck_bitfield_end(name) \ + int name##_end[0]; + +#define kmemcheck_annotate_bitfield(ptr, name) \ + do if (ptr) { \ + int _n = (long) &((ptr)->name##_end) \ + - (long) &((ptr)->name##_begin); \ + BUILD_BUG_ON(_n < 0); \ + \ + kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \ + } while (0) + +#define kmemcheck_annotate_variable(var) \ + do { \ + kmemcheck_mark_initialized(&(var), sizeof(var)); \ + } while (0) \ + #endif /* LINUX_KMEMCHECK_H */ -- cgit v1.2.3 From fe55f6d5c0cfec4a710ef6ff63f162b99d5f7842 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sat, 30 Aug 2008 12:16:35 +0200 Subject: net: use kmemcheck bitfields API for skbuff Signed-off-by: Vegard Nossum --- include/linux/skbuff.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5fd389162f0..ed6537fc5b4 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -15,6 +15,7 @@ #define _LINUX_SKBUFF_H #include +#include #include #include #include @@ -346,6 +347,7 @@ struct sk_buff { }; }; __u32 priority; + kmemcheck_bitfield_begin(flags1); __u8 local_df:1, cloned:1, ip_summed:2, @@ -356,6 +358,7 @@ struct sk_buff { ipvs_property:1, peeked:1, nf_trace:1; + kmemcheck_bitfield_end(flags1); __be16 protocol; void (*destructor)(struct sk_buff *skb); @@ -375,6 +378,8 @@ struct sk_buff { __u16 tc_verd; /* traffic control verdict */ #endif #endif + + kmemcheck_bitfield_begin(flags2); #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; #endif @@ -382,6 +387,8 @@ struct sk_buff { __u8 do_not_encrypt:1; __u8 requeue:1; #endif + kmemcheck_bitfield_end(flags2); + /* 0/13/14 bit hole */ #ifdef CONFIG_NET_DMA -- cgit v1.2.3 From c53bd2e1949ddbe06fe2a6079c0658d58ce25edb Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Thu, 26 Feb 2009 14:05:59 +0100 Subject: c2port: annotate bitfield for kmemcheck This silences a false positive warning with kmemcheck. Signed-off-by: Vegard Nossum --- include/linux/c2port.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/c2port.h b/include/linux/c2port.h index 7b5a2388ba6..2a5cd867c36 100644 --- a/include/linux/c2port.h +++ b/include/linux/c2port.h @@ -10,6 +10,7 @@ */ #include +#include #define C2PORT_NAME_LEN 32 @@ -20,8 +21,10 @@ /* Main struct */ struct c2port_ops; struct c2port_device { + kmemcheck_bitfield_begin(flags); unsigned int access:1; unsigned int flash_access:1; + kmemcheck_bitfield_end(flags); int id; char name[C2PORT_NAME_LEN]; -- cgit v1.2.3 From 1744a21d57d9c60136461adb6afa85e51b3e94d9 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sat, 28 Feb 2009 08:29:44 +0100 Subject: trace: annotate bitfields in struct ring_buffer_event This gets rid of a heap of false-positive warnings from the tracer code due to the use of bitfields. [rebased for mainline inclusion] Signed-off-by: Vegard Nossum --- include/linux/ring_buffer.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 8670f1575fe..29f8599e6be 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -1,6 +1,7 @@ #ifndef _LINUX_RING_BUFFER_H #define _LINUX_RING_BUFFER_H +#include #include #include @@ -11,7 +12,10 @@ struct ring_buffer_iter; * Don't refer to this struct directly, use functions below. */ struct ring_buffer_event { + kmemcheck_bitfield_begin(bitfield); u32 type_len:5, time_delta:27; + kmemcheck_bitfield_end(bitfield); + u32 array[]; }; -- cgit v1.2.3 From 3446a8aa7ebcbc0a799e5e8fc4f2da0738d6bc21 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sat, 16 May 2009 11:22:14 +0200 Subject: fs: introduce __getname_gfp() The purpose of this change is to allow __getname() users to pass a custom GFP mask to kmem_cache_alloc(). This is needed for annotating a certain kmemcheck false positive. Cc: Al Viro Signed-off-by: Vegard Nossum --- include/linux/fs.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index ede84fa7da5..6d12174fbe1 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1919,8 +1919,9 @@ extern void __init vfs_caches_init(unsigned long); extern struct kmem_cache *names_cachep; -#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL) -#define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) +#define __getname_gfp(gfp) kmem_cache_alloc(names_cachep, (gfp)) +#define __getname() __getname_gfp(GFP_KERNEL) +#define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) #ifndef CONFIG_AUDITSYSCALL #define putname(name) __putname(name) #else -- cgit v1.2.3