From 7c756e6e19e71f0327760d8955f7077118ebb2b1 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Jun 2009 15:13:50 +0900 Subject: percpu: implement optional weak percpu definitions Some archs (alpha and s390) need to use weak definitions for percpu variables in modules so that the compiler generates external references for them. This patch implements weak percpu definitions which arch can enable by defining ARCH_NEEDS_WEAK_PER_CPU in arch percpu header file. This weak definition adds the following two restrictions on percpu variable definitions. 1. percpu symbols must be unique whether static or not 2. percpu variables can't be defined inside a function To ensure that these restrictions are observed in generic code, config option DEBUG_FORCE_WEAK_PER_CPU enables weak percpu definitions for all cases. This patch is inspired by Ivan Kokshaysky's alpha percpu patch. [ Impact: stricter rules for percpu variables, one more debug config option ] Signed-off-by: Tejun Heo Cc: Ingo Molnar Cc: David Howells Cc: Ivan Kokshaysky --- lib/Kconfig.debug | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 23067ab1a73..77e0d8b1b7c 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -777,6 +777,21 @@ config DEBUG_BLOCK_EXT_DEVT Say N if you are unsure. +config DEBUG_FORCE_WEAK_PER_CPU + bool "Force weak per-cpu definitions" + depends on DEBUG_KERNEL + help + s390 and alpha require percpu variables in modules to be + defined weak to work around addressing range issue which + puts the following two restrictions on percpu variable + definitions. + + 1. percpu symbols must be unique whether static or not + 2. percpu variables can't be defined inside a function + + To ensure that generic code follows the above rules, this + option forces all percpu variables to be defined as weak. + config LKDTM tristate "Linux Kernel Dump Test Tool Module" depends on DEBUG_KERNEL -- cgit v1.2.3 From d2e3ee9b29f5de5b01e611b04e6fb29760589b01 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 17 Jul 2009 09:09:36 +1000 Subject: kernel: fix is_single_threaded - Fix the comment, is_single_threaded(p) actually means that nobody shares ->mm with p. I think this helper should be renamed, and it should not have arguments. With or without this patch it must not be used unless p == current, otherwise we can't safely use p->signal or p->mm. - "if (atomic_read(&p->signal->count) != 1)" is not right when we have a zombie group leader, use signal->live instead. - Add PF_KTHREAD check to skip kernel threads which may borrow p->mm, otherwise we can return the wrong "false". - Use for_each_process() instead of do_each_thread(), all threads must use the same ->mm. - Use down_write(mm->mmap_sem) + rcu_read_lock() instead of tasklist_lock to iterate over the process list. If there is another CLONE_VM process it can't pass exit_mm() which takes the same mm->mmap_sem. We can miss a freshly forked CLONE_VM task, but this doesn't matter because we must see its parent and return false. Signed-off-by: Oleg Nesterov Cc: David Howells Cc: James Morris Cc: Roland McGrath Cc: Stephen Smalley Signed-off-by: Andrew Morton Signed-off-by: James Morris --- lib/is_single_threaded.c | 62 ++++++++++++++++++++++++++++-------------------- 1 file changed, 36 insertions(+), 26 deletions(-) (limited to 'lib') diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c index f1ed2fe76c6..2762516e0a5 100644 --- a/lib/is_single_threaded.c +++ b/lib/is_single_threaded.c @@ -12,34 +12,44 @@ #include -/** - * is_single_threaded - Determine if a thread group is single-threaded or not - * @p: A task in the thread group in question - * - * This returns true if the thread group to which a task belongs is single - * threaded, false if it is not. +/* + * Returns true if the task does not share ->mm with another thread/process. */ -bool is_single_threaded(struct task_struct *p) +bool is_single_threaded(struct task_struct *task) { - struct task_struct *g, *t; - struct mm_struct *mm = p->mm; - - if (atomic_read(&p->signal->count) != 1) - goto no; - - if (atomic_read(&p->mm->mm_users) != 1) { - read_lock(&tasklist_lock); - do_each_thread(g, t) { - if (t->mm == mm && t != p) - goto no_unlock; - } while_each_thread(g, t); - read_unlock(&tasklist_lock); - } + struct mm_struct *mm = task->mm; + struct task_struct *p, *t; + bool ret; + + might_sleep(); + + if (atomic_read(&task->signal->live) != 1) + return false; - return true; + if (atomic_read(&mm->mm_users) == 1) + return true; + + ret = false; + down_write(&mm->mmap_sem); + rcu_read_lock(); + for_each_process(p) { + if (unlikely(p->flags & PF_KTHREAD)) + continue; + if (unlikely(p == task->group_leader)) + continue; + + t = p; + do { + if (unlikely(t->mm == mm)) + goto found; + if (likely(t->mm)) + break; + } while_each_thread(p, t); + } + ret = true; +found: + rcu_read_unlock(); + up_write(&mm->mmap_sem); -no_unlock: - read_unlock(&tasklist_lock); -no: - return false; + return ret; } -- cgit v1.2.3 From 5bb459bb45d1ad3c177485dcf0af01580aa31125 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 10 Jul 2009 03:48:23 +0200 Subject: kernel: rename is_single_threaded(task) to current_is_single_threaded(void) - is_single_threaded(task) is not safe unless task == current, we can't use task->signal or task->mm. - it doesn't make sense unless task == current, the task can fork right after the check. Rename it to current_is_single_threaded() and kill the argument. Signed-off-by: Oleg Nesterov Acked-by: David Howells Signed-off-by: James Morris --- lib/is_single_threaded.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c index 2762516e0a5..434010980bd 100644 --- a/lib/is_single_threaded.c +++ b/lib/is_single_threaded.c @@ -15,8 +15,9 @@ /* * Returns true if the task does not share ->mm with another thread/process. */ -bool is_single_threaded(struct task_struct *task) +bool current_is_single_threaded(void) { + struct task_struct *task = current; struct mm_struct *mm = task->mm; struct task_struct *p, *t; bool ret; -- cgit v1.2.3 From 967cc5371113f9806b39a2ebb2174af2883d96fe Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Jul 2009 23:28:49 +0200 Subject: kernel: is_current_single_threaded: don't use ->mmap_sem is_current_single_threaded() can safely miss a freshly forked CLONE_VM task, but in this case it must not miss its parent. That is why we take mm->mmap_sem for writing to make sure a thread/task with the same ->mm can't pass exit_mm() and disappear. However we can avoid ->mmap_sem and rely on rcu/barriers: - if we do not see the exiting parent on thread/process list we see the result of list_del_rcu(), in this case we must also see the result of list_add_rcu() which does wmb(). - if we do see the parent but its ->mm == NULL, we need rmb() to make sure we can't miss the child. Signed-off-by: Oleg Nesterov Acked-by: David Howells Signed-off-by: James Morris --- lib/is_single_threaded.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c index 434010980bd..bd2bea96336 100644 --- a/lib/is_single_threaded.c +++ b/lib/is_single_threaded.c @@ -22,8 +22,6 @@ bool current_is_single_threaded(void) struct task_struct *p, *t; bool ret; - might_sleep(); - if (atomic_read(&task->signal->live) != 1) return false; @@ -31,7 +29,6 @@ bool current_is_single_threaded(void) return true; ret = false; - down_write(&mm->mmap_sem); rcu_read_lock(); for_each_process(p) { if (unlikely(p->flags & PF_KTHREAD)) @@ -45,12 +42,17 @@ bool current_is_single_threaded(void) goto found; if (likely(t->mm)) break; + /* + * t->mm == NULL. Make sure next_thread/next_task + * will see other CLONE_VM tasks which might be + * forked before exiting. + */ + smp_rmb(); } while_each_thread(p, t); } ret = true; found: rcu_read_unlock(); - up_write(&mm->mmap_sem); return ret; } -- cgit v1.2.3 From 3885123da8335dc6b67387e5e626acbffc56f664 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Fri, 10 Jul 2009 10:04:50 +0900 Subject: swiotlb: remove unused swiotlb_alloc_boot() Nobody uses swiotlb_alloc_boot(). Signed-off-by: FUJITA Tomonori Acked-by: Becky Bruce --- lib/swiotlb.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'lib') diff --git a/lib/swiotlb.c b/lib/swiotlb.c index bffe6d7ef9d..9edfdd442ed 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -114,11 +114,6 @@ setup_io_tlb_npages(char *str) __setup("swiotlb=", setup_io_tlb_npages); /* make io_tlb_overflow tunable too? */ -void * __weak __init swiotlb_alloc_boot(size_t size, unsigned long nslabs) -{ - return alloc_bootmem_low_pages(size); -} - void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs) { return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); @@ -189,7 +184,7 @@ swiotlb_init_with_default_size(size_t default_size) /* * Get IO TLB memory from the low pages */ - io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs); + io_tlb_start = alloc_bootmem_low_pages(bytes); if (!io_tlb_start) panic("Cannot allocate SWIOTLB buffer"); io_tlb_end = io_tlb_start + bytes; -- cgit v1.2.3 From bb52196be37ce154ddc50b1f39496146d181cbe7 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Fri, 10 Jul 2009 10:04:51 +0900 Subject: swiotlb: remove unused swiotlb_alloc() Nobody uses swiotlb_alloc(). Signed-off-by: FUJITA Tomonori Acked-by: Becky Bruce --- lib/swiotlb.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'lib') diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 9edfdd442ed..3c4c21cdf43 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -114,11 +114,6 @@ setup_io_tlb_npages(char *str) __setup("swiotlb=", setup_io_tlb_npages); /* make io_tlb_overflow tunable too? */ -void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs) -{ - return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); -} - dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) { return paddr; @@ -240,7 +235,8 @@ swiotlb_late_init_with_default_size(size_t default_size) bytes = io_tlb_nslabs << IO_TLB_SHIFT; while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { - io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs); + io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, + order); if (io_tlb_start) break; order--; -- cgit v1.2.3 From cf56e3f2e8a8d5b7bc719980869b0e7985c256f3 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Fri, 10 Jul 2009 10:04:52 +0900 Subject: swiotlb: remove swiotlb_arch_range_needs_mapping Nobody uses swiotlb_arch_range_needs_mapping(). Signed-off-by: FUJITA Tomonori Acked-by: Becky Bruce --- lib/swiotlb.c | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) (limited to 'lib') diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 3c4c21cdf43..dc1cd1f5369 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -141,11 +141,6 @@ int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev, return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); } -int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) -{ - return 0; -} - static void swiotlb_print_info(unsigned long bytes) { phys_addr_t pstart, pend; @@ -312,11 +307,6 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) return swiotlb_arch_address_needs_mapping(hwdev, addr, size); } -static inline int range_needs_mapping(phys_addr_t paddr, size_t size) -{ - return swiotlb_force || swiotlb_arch_range_needs_mapping(paddr, size); -} - static int is_swiotlb_buffer(char *addr) { return addr >= io_tlb_start && addr < io_tlb_end; @@ -646,8 +636,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, * we can safely return the device addr and not worry about bounce * buffering it. */ - if (!address_needs_mapping(dev, dev_addr, size) && - !range_needs_mapping(phys, size)) + if (!address_needs_mapping(dev, dev_addr, size) && !swiotlb_force) return dev_addr; /* @@ -810,7 +799,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, phys_addr_t paddr = sg_phys(sg); dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr); - if (range_needs_mapping(paddr, sg->length) || + if (swiotlb_force || address_needs_mapping(hwdev, dev_addr, sg->length)) { void *map = map_single(hwdev, sg_phys(sg), sg->length, dir); -- cgit v1.2.3 From 02ca646e73f3cb9be69e339841b94edae675e248 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Thu, 23 Jul 2009 11:18:49 +0900 Subject: swiotlb: remove unnecessary swiotlb_bus_to_virt swiotlb_bus_to_virt is unncessary; we can use swiotlb_bus_to_phys and phys_to_virt instead. Signed-off-by: FUJITA Tomonori Acked-by: Becky Bruce --- lib/swiotlb.c | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) (limited to 'lib') diff --git a/lib/swiotlb.c b/lib/swiotlb.c index dc1cd1f5369..cba3db809fb 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -124,17 +124,13 @@ phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr) return baddr; } +/* Note that this doesn't work with highmem page */ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, volatile void *address) { return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); } -void * __weak swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t address) -{ - return phys_to_virt(swiotlb_bus_to_phys(hwdev, address)); -} - int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) { @@ -307,9 +303,10 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) return swiotlb_arch_address_needs_mapping(hwdev, addr, size); } -static int is_swiotlb_buffer(char *addr) +static int is_swiotlb_buffer(phys_addr_t paddr) { - return addr >= io_tlb_start && addr < io_tlb_end; + return paddr >= virt_to_phys(io_tlb_start) && + paddr < virt_to_phys(io_tlb_end); } /* @@ -582,11 +579,13 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent); void swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, - dma_addr_t dma_handle) + dma_addr_t dev_addr) { + phys_addr_t paddr = swiotlb_bus_to_phys(hwdev, dev_addr); + WARN_ON(irqs_disabled()); - if (!is_swiotlb_buffer(vaddr)) - free_pages((unsigned long) vaddr, get_order(size)); + if (!is_swiotlb_buffer(paddr)) + free_pages((unsigned long)vaddr, get_order(size)); else /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); @@ -671,19 +670,25 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page); static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir) { - char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); + phys_addr_t paddr = swiotlb_bus_to_phys(hwdev, dev_addr); BUG_ON(dir == DMA_NONE); - if (is_swiotlb_buffer(dma_addr)) { - do_unmap_single(hwdev, dma_addr, size, dir); + if (is_swiotlb_buffer(paddr)) { + do_unmap_single(hwdev, phys_to_virt(paddr), size, dir); return; } if (dir != DMA_FROM_DEVICE) return; - dma_mark_clean(dma_addr, size); + /* + * phys_to_virt doesn't work with hihgmem page but we could + * call dma_mark_clean() with hihgmem page here. However, we + * are fine since dma_mark_clean() is null on POWERPC. We can + * make dma_mark_clean() take a physical address if necessary. + */ + dma_mark_clean(phys_to_virt(paddr), size); } void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, @@ -708,19 +713,19 @@ static void swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir, int target) { - char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); + phys_addr_t paddr = swiotlb_bus_to_phys(hwdev, dev_addr); BUG_ON(dir == DMA_NONE); - if (is_swiotlb_buffer(dma_addr)) { - sync_single(hwdev, dma_addr, size, dir, target); + if (is_swiotlb_buffer(paddr)) { + sync_single(hwdev, phys_to_virt(paddr), size, dir, target); return; } if (dir != DMA_FROM_DEVICE) return; - dma_mark_clean(dma_addr, size); + dma_mark_clean(phys_to_virt(paddr), size); } void -- cgit v1.2.3 From b9394647ac88faad9db0f9e92eac4db434faded6 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Fri, 10 Jul 2009 10:04:58 +0900 Subject: swiotlb: use dma_capable() This converts swiotlb to use dma_capable() instead of swiotlb_arch_address_needs_mapping() and is_buffer_dma_capable(). Signed-off-by: FUJITA Tomonori Acked-by: Becky Bruce --- lib/swiotlb.c | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) (limited to 'lib') diff --git a/lib/swiotlb.c b/lib/swiotlb.c index cba3db809fb..a012d93792b 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -131,12 +131,6 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); } -int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev, - dma_addr_t addr, size_t size) -{ - return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); -} - static void swiotlb_print_info(unsigned long bytes) { phys_addr_t pstart, pend; @@ -297,12 +291,6 @@ cleanup1: return -ENOMEM; } -static inline int -address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) -{ - return swiotlb_arch_address_needs_mapping(hwdev, addr, size); -} - static int is_swiotlb_buffer(phys_addr_t paddr) { return paddr >= virt_to_phys(io_tlb_start) && @@ -539,9 +527,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_mask = hwdev->coherent_dma_mask; ret = (void *)__get_free_pages(flags, order); - if (ret && - !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret), - size)) { + if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) { /* * The allocated memory isn't reachable by the device. */ @@ -563,7 +549,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, dev_addr = swiotlb_virt_to_bus(hwdev, ret); /* Confirm address can be DMA'd by device */ - if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { + if (dev_addr + size > dma_mask) { printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", (unsigned long long)dma_mask, (unsigned long long)dev_addr); @@ -635,7 +621,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, * we can safely return the device addr and not worry about bounce * buffering it. */ - if (!address_needs_mapping(dev, dev_addr, size) && !swiotlb_force) + if (dma_capable(dev, dev_addr, size) && !swiotlb_force) return dev_addr; /* @@ -652,7 +638,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, /* * Ensure that the address returned is DMA'ble */ - if (address_needs_mapping(dev, dev_addr, size)) + if (!dma_capable(dev, dev_addr, size)) panic("map_single: bounce buffer is not DMA'ble"); return dev_addr; @@ -805,7 +791,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr); if (swiotlb_force || - address_needs_mapping(hwdev, dev_addr, sg->length)) { + !dma_capable(hwdev, dev_addr, sg->length)) { void *map = map_single(hwdev, sg_phys(sg), sg->length, dir); if (!map) { -- cgit v1.2.3 From 862d196b27bd30a5ab8109d5cdb37980d81b1fe9 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Fri, 10 Jul 2009 10:05:02 +0900 Subject: swiotlb: use phys_to_dma and dma_to_phys This converts swiotlb to use phys_to_dma and dma_to_phys instead of swiotlb_phys_to_bus() and swiotlb_bus_to_phys(). swiotlb_phys_to_bus() and swiotlb_bus_to_phys() are not necessary so this patch also removes them. Signed-off-by: FUJITA Tomonori Acked-by: Becky Bruce --- lib/swiotlb.c | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) (limited to 'lib') diff --git a/lib/swiotlb.c b/lib/swiotlb.c index a012d93792b..9e2fe3e1804 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -114,21 +114,11 @@ setup_io_tlb_npages(char *str) __setup("swiotlb=", setup_io_tlb_npages); /* make io_tlb_overflow tunable too? */ -dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) -{ - return paddr; -} - -phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr) -{ - return baddr; -} - /* Note that this doesn't work with highmem page */ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, volatile void *address) { - return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); + return phys_to_dma(hwdev, virt_to_phys(address)); } static void swiotlb_print_info(unsigned long bytes) @@ -567,7 +557,7 @@ void swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dev_addr) { - phys_addr_t paddr = swiotlb_bus_to_phys(hwdev, dev_addr); + phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); WARN_ON(irqs_disabled()); if (!is_swiotlb_buffer(paddr)) @@ -612,7 +602,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, struct dma_attrs *attrs) { phys_addr_t phys = page_to_phys(page) + offset; - dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); + dma_addr_t dev_addr = phys_to_dma(dev, phys); void *map; BUG_ON(dir == DMA_NONE); @@ -656,7 +646,7 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page); static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir) { - phys_addr_t paddr = swiotlb_bus_to_phys(hwdev, dev_addr); + phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); BUG_ON(dir == DMA_NONE); @@ -699,7 +689,7 @@ static void swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir, int target) { - phys_addr_t paddr = swiotlb_bus_to_phys(hwdev, dev_addr); + phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); BUG_ON(dir == DMA_NONE); @@ -788,7 +778,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, for_each_sg(sgl, sg, nelems, i) { phys_addr_t paddr = sg_phys(sg); - dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr); + dma_addr_t dev_addr = phys_to_dma(hwdev, paddr); if (swiotlb_force || !dma_capable(hwdev, dev_addr, sg->length)) { -- cgit v1.2.3 From bbdc16f58ed84523e8991f103dceb586e9053e04 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 15 Jul 2009 15:25:22 +0000 Subject: kmemleak: Allow kmemleak to be built on powerpc Very lightly tested, doesn't crash the kernel. Signed-off-by: Michael Ellerman Acked-by: Catalin Marinas Signed-off-by: Benjamin Herrenschmidt --- lib/Kconfig.debug | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 12327b2bb78..d5ca9a5a31c 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -338,7 +338,7 @@ config SLUB_STATS config DEBUG_KMEMLEAK bool "Kernel memory leak detector" - depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM) && \ + depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM || PPC) && \ !MEMORY_HOTPLUG select DEBUG_FS if SYSFS select STACKTRACE if STACKTRACE_SUPPORT -- cgit v1.2.3 From c7084b35eb1a4d3353a501508baf9d3d82822c93 Mon Sep 17 00:00:00 2001 From: Casey Dahlin Date: Thu, 20 Aug 2009 16:27:56 -0700 Subject: lib/swiotlb.c: Fix strange panic message selection logic when swiotlb fills up swiotlb_full() in lib/swiotlb.c throws one of two panic messages based on whether the direction of transfer is from the device or to the device. The logic around this is somewhat weird in the case of bidirectional transfers. It appears to want to throw both in succession, but since its a panic only the first makes it. This patch adds a third, separate error for DMA_BIDIRECTIONAL to make things a bit clearer. Signed-off-by: Casey Dahlin Cc: FUJITA Tomonori Cc: Becky Bruce [ further fixed the error message ] Signed-off-by: Andrew Morton LKML-Reference: <200908202327.n7KNRuqK001504@imap1.linux-foundation.org> Signed-off-by: Ingo Molnar --- lib/swiotlb.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'lib') diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 9e2fe3e1804..ac25cd28e80 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -581,12 +581,15 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at " "device %s\n", size, dev ? dev_name(dev) : "?"); - if (size > io_tlb_overflow && do_panic) { - if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) - panic("DMA: Memory would be corrupted\n"); - if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) - panic("DMA: Random memory would be DMAed\n"); - } + if (size <= io_tlb_overflow || !do_panic) + return; + + if (dir == DMA_BIDIRECTIONAL) + panic("DMA: Random memory could be DMA accessed\n"); + if (dir == DMA_FROM_DEVICE) + panic("DMA: Random memory could be DMA written\n"); + if (dir == DMA_TO_DEVICE) + panic("DMA: Random memory could be DMA read\n"); } /* -- cgit v1.2.3 From f41d911f8c49a5d65c86504c19e8204bb605c4fd Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 22 Aug 2009 13:56:52 -0700 Subject: rcu: Merge preemptable-RCU functionality into hierarchical RCU Create a kernel/rcutree_plugin.h file that contains definitions for preemptable RCU (or, under the #else branch of the #ifdef, empty definitions for the classic non-preemptable semantics). These definitions fit into plugins defined in kernel/rcutree.c for this purpose. This variant of preemptable RCU uses a new algorithm whose read-side expense is roughly that of classic hierarchical RCU under CONFIG_PREEMPT. This new algorithm's update-side expense is similar to that of classic hierarchical RCU, and, in absence of read-side preemption or blocking, is exactly that of classic hierarchical RCU. Perhaps more important, this new algorithm has a much simpler implementation, saving well over 1,000 lines of code compared to mainline's implementation of preemptable RCU, which will hopefully be retired in favor of this new algorithm. The simplifications are obtained by maintaining per-task nesting state for running tasks, and using a simple lock-protected algorithm to handle accounting when tasks block within RCU read-side critical sections, making use of lessons learned while creating numerous user-level RCU implementations over the past 18 months. Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josht@linux.vnet.ibm.com Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org LKML-Reference: <12509746134003-git-send-email-> Signed-off-by: Ingo Molnar --- lib/Kconfig.debug | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 12327b2bb78..f87fb0c8f92 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -725,7 +725,7 @@ config RCU_TORTURE_TEST_RUNNABLE config RCU_CPU_STALL_DETECTOR bool "Check for stalled CPUs delaying RCU grace periods" - depends on CLASSIC_RCU || TREE_RCU + depends on CLASSIC_RCU || TREE_RCU || TREE_PREEMPT_RCU default n help This option causes RCU to printk information on which -- cgit v1.2.3 From 6b3ef48adf847f7adf11c870e3ffacac150f1564 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 22 Aug 2009 13:56:53 -0700 Subject: rcu: Remove CONFIG_PREEMPT_RCU Now that CONFIG_TREE_PREEMPT_RCU is in place, there is no further need for CONFIG_PREEMPT_RCU. Remove it, along with whatever subtle bugs it may (or may not) contain. Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josht@linux.vnet.ibm.com Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org LKML-Reference: <125097461396-git-send-email-> Signed-off-by: Ingo Molnar --- lib/Kconfig.debug | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index f87fb0c8f92..82fbc49728d 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -725,7 +725,7 @@ config RCU_TORTURE_TEST_RUNNABLE config RCU_CPU_STALL_DETECTOR bool "Check for stalled CPUs delaying RCU grace periods" - depends on CLASSIC_RCU || TREE_RCU || TREE_PREEMPT_RCU + depends on TREE_RCU || TREE_PREEMPT_RCU default n help This option causes RCU to printk information on which -- cgit v1.2.3 From 8a27f7c90ffcb791eed7574922b51fb60b08fc89 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 17 Aug 2009 12:29:44 +0000 Subject: lib/vsprintf.c: Add "%pI6c" - print pointer as compressed ipv6 address Signed-off-by: Joe Perches Tested-by: Jens Rosenboom Signed-off-by: David S. Miller --- lib/vsprintf.c | 199 +++++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 152 insertions(+), 47 deletions(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 756ccafa9ce..cb8a112030b 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -25,6 +25,7 @@ #include #include #include +#include #include /* for PAGE_SIZE */ #include @@ -630,60 +631,156 @@ static char *resource_string(char *buf, char *end, struct resource *res, } static char *mac_address_string(char *buf, char *end, u8 *addr, - struct printf_spec spec) + struct printf_spec spec, const char *fmt) { - char mac_addr[6 * 3]; /* (6 * 2 hex digits), 5 colons and trailing zero */ + char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")]; char *p = mac_addr; int i; for (i = 0; i < 6; i++) { p = pack_hex_byte(p, addr[i]); - if (!(spec.flags & SPECIAL) && i != 5) + if (fmt[0] == 'M' && i != 5) *p++ = ':'; } *p = '\0'; - spec.flags &= ~SPECIAL; return string(buf, end, mac_addr, spec); } -static char *ip6_addr_string(char *buf, char *end, u8 *addr, - struct printf_spec spec) +static char *ip4_string(char *p, const u8 *addr, bool leading_zeros) +{ + int i; + + for (i = 0; i < 4; i++) { + char temp[3]; /* hold each IP quad in reverse order */ + int digits = put_dec_trunc(temp, addr[i]) - temp; + if (leading_zeros) { + if (digits < 3) + *p++ = '0'; + if (digits < 2) + *p++ = '0'; + } + /* reverse the digits in the quad */ + while (digits--) + *p++ = temp[digits]; + if (i < 3) + *p++ = '.'; + } + + *p = '\0'; + return p; +} + +static char *ip6_compressed_string(char *p, const struct in6_addr *addr) { - char ip6_addr[8 * 5]; /* (8 * 4 hex digits), 7 colons and trailing zero */ - char *p = ip6_addr; int i; + int j; + int range; + unsigned char zerolength[8]; + int longest = 1; + int colonpos = -1; + u16 word; + u8 hi; + u8 lo; + bool needcolon = false; + bool useIPv4 = ipv6_addr_v4mapped(addr) || ipv6_addr_is_isatap(addr); + + memset(zerolength, 0, sizeof(zerolength)); + + if (useIPv4) + range = 6; + else + range = 8; + + /* find position of longest 0 run */ + for (i = 0; i < range; i++) { + for (j = i; j < range; j++) { + if (addr->s6_addr16[j] != 0) + break; + zerolength[i]++; + } + } + for (i = 0; i < range; i++) { + if (zerolength[i] > longest) { + longest = zerolength[i]; + colonpos = i; + } + } + + /* emit address */ + for (i = 0; i < range; i++) { + if (i == colonpos) { + if (needcolon || i == 0) + *p++ = ':'; + *p++ = ':'; + needcolon = false; + i += longest - 1; + continue; + } + if (needcolon) { + *p++ = ':'; + needcolon = false; + } + /* hex u16 without leading 0s */ + word = ntohs(addr->s6_addr16[i]); + hi = word >> 8; + lo = word & 0xff; + if (hi) { + if (hi > 0x0f) + p = pack_hex_byte(p, hi); + else + *p++ = hex_asc_lo(hi); + } + if (hi || lo > 0x0f) + p = pack_hex_byte(p, lo); + else + *p++ = hex_asc_lo(lo); + needcolon = true; + } + + if (useIPv4) { + if (needcolon) + *p++ = ':'; + p = ip4_string(p, &addr->s6_addr[12], false); + } + *p = '\0'; + return p; +} + +static char *ip6_string(char *p, const struct in6_addr *addr, const char *fmt) +{ + int i; for (i = 0; i < 8; i++) { - p = pack_hex_byte(p, addr[2 * i]); - p = pack_hex_byte(p, addr[2 * i + 1]); - if (!(spec.flags & SPECIAL) && i != 7) + p = pack_hex_byte(p, addr->s6_addr[2 * i]); + p = pack_hex_byte(p, addr->s6_addr[2 * i + 1]); + if (fmt[0] == 'I' && i != 7) *p++ = ':'; } + *p = '\0'; - spec.flags &= ~SPECIAL; + return p; +} + +static char *ip6_addr_string(char *buf, char *end, const u8 *addr, + struct printf_spec spec, const char *fmt) +{ + char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")]; + + if (fmt[0] == 'I' && fmt[2] == 'c') + ip6_compressed_string(ip6_addr, (const struct in6_addr *)addr); + else + ip6_string(ip6_addr, (const struct in6_addr *)addr, fmt); return string(buf, end, ip6_addr, spec); } -static char *ip4_addr_string(char *buf, char *end, u8 *addr, - struct printf_spec spec) +static char *ip4_addr_string(char *buf, char *end, const u8 *addr, + struct printf_spec spec, const char *fmt) { - char ip4_addr[4 * 4]; /* (4 * 3 decimal digits), 3 dots and trailing zero */ - char temp[3]; /* hold each IP quad in reverse order */ - char *p = ip4_addr; - int i, digits; + char ip4_addr[sizeof("255.255.255.255")]; - for (i = 0; i < 4; i++) { - digits = put_dec_trunc(temp, addr[i]) - temp; - /* reverse the digits in the quad */ - while (digits--) - *p++ = temp[digits]; - if (i != 3) - *p++ = '.'; - } - *p = '\0'; - spec.flags &= ~SPECIAL; + ip4_string(ip4_addr, addr, fmt[0] == 'i'); return string(buf, end, ip4_addr, spec); } @@ -702,11 +799,15 @@ static char *ip4_addr_string(char *buf, char *end, u8 *addr, * addresses (not the name nor the flags) * - 'M' For a 6-byte MAC address, it prints the address in the * usual colon-separated hex notation - * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way (dot-separated - * decimal for v4 and colon separated network-order 16 bit hex for v6) - * - 'i' [46] for 'raw' IPv4/IPv6 addresses, IPv6 omits the colons, IPv4 is - * currently the same - * + * - 'm' For a 6-byte MAC address, it prints the hex address without colons + * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way + * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) + * IPv6 uses colon separated network-order 16 bit hex with leading 0's + * - 'i' [46] for 'raw' IPv4/IPv6 addresses + * IPv6 omits the colons (01020304...0f) + * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) + * - 'I6c' for IPv6 addresses printed as specified by + * http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 * function pointers are really function descriptors, which contain a * pointer to the real address. @@ -726,20 +827,24 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, return symbol_string(buf, end, ptr, spec, *fmt); case 'R': return resource_string(buf, end, ptr, spec); - case 'm': - spec.flags |= SPECIAL; - /* Fallthrough */ - case 'M': - return mac_address_string(buf, end, ptr, spec); - case 'i': - spec.flags |= SPECIAL; - /* Fallthrough */ - case 'I': - if (fmt[1] == '6') - return ip6_addr_string(buf, end, ptr, spec); - if (fmt[1] == '4') - return ip4_addr_string(buf, end, ptr, spec); - spec.flags &= ~SPECIAL; + case 'M': /* Colon separated: 00:01:02:03:04:05 */ + case 'm': /* Contiguous: 000102030405 */ + return mac_address_string(buf, end, ptr, spec, fmt); + case 'I': /* Formatted IP supported + * 4: 1.2.3.4 + * 6: 0001:0203:...:0708 + * 6c: 1::708 or 1::1.2.3.4 + */ + case 'i': /* Contiguous: + * 4: 001.002.003.004 + * 6: 000102...0f + */ + switch (fmt[1]) { + case '6': + return ip6_addr_string(buf, end, ptr, spec, fmt); + case '4': + return ip4_addr_string(buf, end, ptr, spec, fmt); + } break; } spec.flags |= SMALL; -- cgit v1.2.3 From e0e817392b9acf2c98d3be80c233dddb1b52003d Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 2 Sep 2009 09:13:40 +0100 Subject: CRED: Add some configurable debugging [try #6] Add a config option (CONFIG_DEBUG_CREDENTIALS) to turn on some debug checking for credential management. The additional code keeps track of the number of pointers from task_structs to any given cred struct, and checks to see that this number never exceeds the usage count of the cred struct (which includes all references, not just those from task_structs). Furthermore, if SELinux is enabled, the code also checks that the security pointer in the cred struct is never seen to be invalid. This attempts to catch the bug whereby inode_has_perm() faults in an nfsd kernel thread on seeing cred->security be a NULL pointer (it appears that the credential struct has been previously released): http://www.kerneloops.org/oops.php?number=252883 Signed-off-by: David Howells Signed-off-by: James Morris --- lib/Kconfig.debug | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 12327b2bb78..fbb87cf138c 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -653,6 +653,21 @@ config DEBUG_NOTIFIERS This is a relatively cheap check but if you care about maximum performance, say N. +config DEBUG_CREDENTIALS + bool "Debug credential management" + depends on DEBUG_KERNEL + help + Enable this to turn on some debug checking for credential + management. The additional code keeps track of the number of + pointers from task_structs to any given cred struct, and checks to + see that this number never exceeds the usage count of the cred + struct. + + Furthermore, if SELinux is enabled, this also checks that the + security pointer in the cred struct is never seen to be invalid. + + If unsure, say N. + # # Select this config option from the architecture Kconfig, if it # it is preferred to always offer frame pointers as a config -- cgit v1.2.3 From 2f82af08fcc7dc01a7e98a49a5995a77e32a2925 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 14 Sep 2009 03:25:28 -0400 Subject: Nicolas Pitre has a new email address Due to problems at cam.org, my nico@cam.org email address is no longer valid. FRom now on, nico@fluxnic.net should be used instead. Signed-off-by: Nicolas Pitre Signed-off-by: Linus Torvalds --- lib/inflate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/inflate.c b/lib/inflate.c index 1a8e8a97812..d10255973a9 100644 --- a/lib/inflate.c +++ b/lib/inflate.c @@ -7,7 +7,7 @@ * Adapted for booting Linux by Hannu Savolainen 1993 * based on gzip-1.0.3 * - * Nicolas Pitre , 1999/04/14 : + * Nicolas Pitre , 1999/04/14 : * Little mods for all variable to reside either into rodata or bss segments * by marking constant variables with 'const' and initializing all the others * at run-time only. This allows for the kernel uncompressor to run -- cgit v1.2.3 From 91adcd2c4b104a8ce2973e6e84b01fd48735ffc6 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 16 Sep 2009 20:03:06 -0400 Subject: vsprintf: add %ps that is the same as %pS but is like %pf On PowerPC64 function pointers do not point directly at the functions, but instead point to pointers to the functions. The output of %pF expects to point to a pointer to the function, whereas %pS will show the function itself. mcount returns the direct pointer to the function and not the pointer to the pointer. Thus %pS must be used to show this. The function tracer requires printing of the functions without offsets and uses the %pf instead. %pF produces run_local_timers+0x4/0x1f %pf produces just run_local_timers For PowerPC64, we need to use the direct pointer, and we only have %pS which will produce .run_local_timers+0x4/0x1f This patch creates a %ps that matches the %pf as %pS matches %pF. Cc: Linus Torvalds Cc: Zhao Lei Acked-by: Benjamin Herrenschmidt Signed-off-by: Steven Rostedt --- lib/vsprintf.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index cb8a112030b..c8f3ed62cee 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -581,7 +581,7 @@ static char *symbol_string(char *buf, char *end, void *ptr, unsigned long value = (unsigned long) ptr; #ifdef CONFIG_KALLSYMS char sym[KSYM_SYMBOL_LEN]; - if (ext != 'f') + if (ext != 'f' && ext != 's') sprint_symbol(sym, value); else kallsyms_lookup(value, NULL, NULL, NULL, sym); @@ -822,6 +822,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, case 'F': case 'f': ptr = dereference_function_descriptor(ptr); + case 's': /* Fallthrough */ case 'S': return symbol_string(buf, end, ptr, spec, *fmt); @@ -1063,7 +1064,8 @@ qualifier: * @args: Arguments for the format string * * This function follows C99 vsnprintf, but has some extensions: - * %pS output the name of a text symbol + * %pS output the name of a text symbol with offset + * %ps output the name of a text symbol without offset * %pF output the name of a function pointer with its offset * %pf output the name of a function pointer without its offset * %pR output the address range in a struct resource -- cgit v1.2.3 From 0efb4d20723d58edbad29d1ff98a86b631adb5e6 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 17 Sep 2009 09:27:29 -0400 Subject: vsnprintf: remove duplicate comment of vsnprintf Remove the duplicate comment of bstr_printf that is the same as the vsnprintf. Add the 's' option to the comment for the pointer function. This is more of an internal function so the little duplication of the comment here is OK. Reported-by: Zhaolei Cc: Linus Torvalds Signed-off-by: Steven Rostedt --- lib/vsprintf.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index c8f3ed62cee..d320c1816a7 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -794,7 +794,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr, * * - 'F' For symbolic function descriptor pointers with offset * - 'f' For simple symbolic function names without offset - * - 'S' For symbolic direct pointers + * - 'S' For symbolic direct pointers with offset + * - 's' For symbolic direct pointers without offset * - 'R' For a struct resource pointer, it prints the range of * addresses (not the name nor the flags) * - 'M' For a 6-byte MAC address, it prints the address in the @@ -1069,6 +1070,7 @@ qualifier: * %pF output the name of a function pointer with its offset * %pf output the name of a function pointer without its offset * %pR output the address range in a struct resource + * %n is ignored * * The return value is the number of characters which would * be generated for the given input, excluding the trailing @@ -1524,11 +1526,7 @@ EXPORT_SYMBOL_GPL(vbin_printf); * a binary buffer that generated by vbin_printf. * * The format follows C99 vsnprintf, but has some extensions: - * %pS output the name of a text symbol - * %pF output the name of a function pointer with its offset - * %pf output the name of a function pointer without its offset - * %pR output the address range in a struct resource - * %n is ignored + * see vsnprintf comment for details. * * The return value is the number of characters which would * be generated for the given input, excluding the trailing -- cgit v1.2.3