From 9a926d86b29a25456f1dd8c9df938e151b1c3978 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 27 Jul 2009 18:10:28 -0700 Subject: sparc64: Sign extend length arg to truncate syscalls when compat. The first thing sys_truncate() and sys_ftruncate() do is sign extend the unsigned length arg to a signed type. Thanks to Benjamin Herrenschmidt for the tip. Signed-off-by: David S. Miller --- arch/sparc/kernel/sys32.S | 2 ++ arch/sparc/kernel/systbls_64.S | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/sparc/kernel') diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S index f061c4dda9e..577f8fab8b6 100644 --- a/arch/sparc/kernel/sys32.S +++ b/arch/sparc/kernel/sys32.S @@ -138,6 +138,8 @@ SIGN2(sys32_splice, sys_splice, %o0, %o1) SIGN2(sys32_sync_file_range, compat_sync_file_range, %o0, %o5) SIGN2(sys32_tee, sys_tee, %o0, %o1) SIGN1(sys32_vmsplice, compat_sys_vmsplice, %o0) +SIGN1(sys32_truncate, sys_truncate, %o1) +SIGN1(sys32_ftruncate, sys_ftruncate, %o1) .globl sys32_mmap2 sys32_mmap2: diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 6b3ee88e253..2ee7250ba7a 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -43,8 +43,8 @@ sys_call_table32: /*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall .word sys32_getgroups, compat_sys_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd /*120*/ .word compat_sys_readv, compat_sys_writev, compat_sys_settimeofday, sys_fchown16, sys_fchmod - .word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate -/*130*/ .word sys_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall + .word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, sys32_truncate +/*130*/ .word sys32_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall .word sys_nis_syscall, sys32_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64 /*140*/ .word sys32_sendfile64, sys_nis_syscall, sys32_futex, sys_gettid, compat_sys_getrlimit .word compat_sys_setrlimit, sys_pivot_root, sys32_prctl, sys_pciconfig_read, sys_pciconfig_write -- cgit v1.2.3 From 74d46d6b2d23d44d72c37df4c6a5d2e782f7b088 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 21 Jul 2009 17:11:50 +0900 Subject: percpu, sparc64: fix sparse possible cpu map handling percpu code has been assuming num_possible_cpus() == nr_cpu_ids which is incorrect if cpu_possible_map contains holes. This causes percpu code to access beyond allocated memories and vmalloc areas. On a sparc64 machine with cpus 0 and 2 (u60), this triggers the following warning or fails boot. WARNING: at /devel/tj/os/work/mm/vmalloc.c:106 vmap_page_range_noflush+0x1f0/0x240() Modules linked in: Call Trace: [00000000004b17d0] vmap_page_range_noflush+0x1f0/0x240 [00000000004b1840] map_vm_area+0x20/0x60 [00000000004b1950] __vmalloc_area_node+0xd0/0x160 [0000000000593434] deflate_init+0x14/0xe0 [0000000000583b94] __crypto_alloc_tfm+0xd4/0x1e0 [00000000005844f0] crypto_alloc_base+0x50/0xa0 [000000000058b898] alg_test_comp+0x18/0x80 [000000000058dad4] alg_test+0x54/0x180 [000000000058af00] cryptomgr_test+0x40/0x60 [0000000000473098] kthread+0x58/0x80 [000000000042b590] kernel_thread+0x30/0x60 [0000000000472fd0] kthreadd+0xf0/0x160 ---[ end trace 429b268a213317ba ]--- This patch fixes generic percpu functions and sparc64 setup_per_cpu_areas() so that they handle sparse cpu_possible_map properly. Please note that on x86, cpu_possible_map() doesn't contain holes and thus num_possible_cpus() == nr_cpu_ids and this patch doesn't cause any behavior difference. Signed-off-by: Tejun Heo Acked-by: David S. Miller Cc: Ingo Molnar --- arch/sparc/kernel/smp_64.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/sparc/kernel') diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index fa44eaf8d89..3691907a43b 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1499,7 +1499,7 @@ void __init setup_per_cpu_areas(void) dyn_size = pcpur_size - static_size - PERCPU_MODULE_RESERVE; - ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0])); + ptrs_size = PFN_ALIGN(nr_cpu_ids * sizeof(pcpur_ptrs[0])); pcpur_ptrs = alloc_bootmem(ptrs_size); for_each_possible_cpu(cpu) { @@ -1514,7 +1514,7 @@ void __init setup_per_cpu_areas(void) /* allocate address and map */ vm.flags = VM_ALLOC; - vm.size = num_possible_cpus() * PCPU_CHUNK_SIZE; + vm.size = nr_cpu_ids * PCPU_CHUNK_SIZE; vm_area_register_early(&vm, PCPU_CHUNK_SIZE); for_each_possible_cpu(cpu) { -- cgit v1.2.3 From e2c6cbd9ace61039d3de39e717195e38f1492aee Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Tue, 18 Aug 2009 20:16:55 -0700 Subject: sparc: sys32.S incorrect compat-layer splice() system call I think arch/sparc/kernel/sys32.S has an incorrect splice definition: SIGN2(sys32_splice, sys_splice, %o0, %o1) The splice() prototype looks like : long splice(int fd_in, loff_t *off_in, int fd_out, loff_t *off_out, size_t len, unsigned int flags); So I think we should have : SIGN2(sys32_splice, sys_splice, %o0, %o2) Signed-off-by: Mathieu Desnoyers Signed-off-by: David S. Miller --- arch/sparc/kernel/sys32.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sparc/kernel') diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S index 577f8fab8b6..aed94869ad6 100644 --- a/arch/sparc/kernel/sys32.S +++ b/arch/sparc/kernel/sys32.S @@ -134,7 +134,7 @@ SIGN1(sys32_getpeername, sys_getpeername, %o0) SIGN1(sys32_getsockname, sys_getsockname, %o0) SIGN2(sys32_ioprio_get, sys_ioprio_get, %o0, %o1) SIGN3(sys32_ioprio_set, sys_ioprio_set, %o0, %o1, %o2) -SIGN2(sys32_splice, sys_splice, %o0, %o1) +SIGN2(sys32_splice, sys_splice, %o0, %o2) SIGN2(sys32_sync_file_range, compat_sync_file_range, %o0, %o5) SIGN2(sys32_tee, sys_tee, %o0, %o1) SIGN1(sys32_vmsplice, compat_sys_vmsplice, %o0) -- cgit v1.2.3 From a9919646d12a13bea7eb74b996686f900dffb120 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 18 Aug 2009 23:44:08 -0700 Subject: sparc32: Kill trap table freeing code. Normally, srmmu uses different trap table register values to allow determination of the cpu we're on. All of the trap tables have identical content, they just sit at different offsets from the first trap table, and the offset shifted down and masked out determines the cpu we are on. The code tries to free them up when they aren't actually used (don't have all 4 cpus, we're on sun4d, etc.) but that causes problems. For one thing it triggers false positives in the DMA debugging code. And fixing that up while preserving this relative offset thing isn't trivial. So just kill the freeing code, it costs us at most 3 pages, big deal... Signed-off-by: David S. Miller --- arch/sparc/kernel/sun4d_smp.c | 22 ---------------------- arch/sparc/kernel/sun4m_smp.c | 26 -------------------------- 2 files changed, 48 deletions(-) (limited to 'arch/sparc/kernel') diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index 54fb02468f0..68791cad7b7 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c @@ -162,9 +162,6 @@ extern void cpu_panic(void); */ extern struct linux_prom_registers smp_penguin_ctable; -extern unsigned long trapbase_cpu1[]; -extern unsigned long trapbase_cpu2[]; -extern unsigned long trapbase_cpu3[]; void __init smp4d_boot_cpus(void) { @@ -235,25 +232,6 @@ void __init smp4d_smp_done(void) *prev = first; local_flush_cache_all(); - /* Free unneeded trap tables */ - ClearPageReserved(virt_to_page(trapbase_cpu1)); - init_page_count(virt_to_page(trapbase_cpu1)); - free_page((unsigned long)trapbase_cpu1); - totalram_pages++; - num_physpages++; - - ClearPageReserved(virt_to_page(trapbase_cpu2)); - init_page_count(virt_to_page(trapbase_cpu2)); - free_page((unsigned long)trapbase_cpu2); - totalram_pages++; - num_physpages++; - - ClearPageReserved(virt_to_page(trapbase_cpu3)); - init_page_count(virt_to_page(trapbase_cpu3)); - free_page((unsigned long)trapbase_cpu3); - totalram_pages++; - num_physpages++; - /* Ok, they are spinning and ready to go. */ smp_processors_ready = 1; sun4d_distribute_irqs(); diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c index 960b113d000..762d6eedd94 100644 --- a/arch/sparc/kernel/sun4m_smp.c +++ b/arch/sparc/kernel/sun4m_smp.c @@ -121,9 +121,6 @@ void __cpuinit smp4m_callin(void) */ extern struct linux_prom_registers smp_penguin_ctable; -extern unsigned long trapbase_cpu1[]; -extern unsigned long trapbase_cpu2[]; -extern unsigned long trapbase_cpu3[]; void __init smp4m_boot_cpus(void) { @@ -193,29 +190,6 @@ void __init smp4m_smp_done(void) *prev = first; local_flush_cache_all(); - /* Free unneeded trap tables */ - if (!cpu_isset(1, cpu_present_map)) { - ClearPageReserved(virt_to_page(trapbase_cpu1)); - init_page_count(virt_to_page(trapbase_cpu1)); - free_page((unsigned long)trapbase_cpu1); - totalram_pages++; - num_physpages++; - } - if (!cpu_isset(2, cpu_present_map)) { - ClearPageReserved(virt_to_page(trapbase_cpu2)); - init_page_count(virt_to_page(trapbase_cpu2)); - free_page((unsigned long)trapbase_cpu2); - totalram_pages++; - num_physpages++; - } - if (!cpu_isset(3, cpu_present_map)) { - ClearPageReserved(virt_to_page(trapbase_cpu3)); - init_page_count(virt_to_page(trapbase_cpu3)); - free_page((unsigned long)trapbase_cpu3); - totalram_pages++; - num_physpages++; - } - /* Ok, they are spinning and ready to go. */ } -- cgit v1.2.3 From d8ed1d43e17898761c7221014a15a4c7501d2ff3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 25 Aug 2009 16:47:46 -0700 Subject: sparc64: Validate linear D-TLB misses. When page alloc debugging is not enabled, we essentially accept any virtual address for linear kernel TLB misses. But with kgdb, kernel address probing, and other facilities we can try to access arbitrary crap. So, make sure the address we miss on will translate to physical memory that actually exists. In order to make this work we have to embed the valid address bitmap into the kernel image. And in order to make that less expensive we make an adjustment, in that the max physical memory address is decreased to "1 << 41", even on the chips that support a 42-bit physical address space. We can do this because bit 41 indicates "I/O space" and thus covers non-memory ranges. The result of this is that: 1) kpte_linear_bitmap shrinks from 2K to 1K in size 2) we need 64K more for the valid address bitmap We can't let the valid address bitmap be dynamically allocated once we start using it to validate TLB misses, otherwise we have crazy issues to deal with wrt. recursive TLB misses and such. If we're in a TLB miss it could be the deepest trap level that's legal inside of the cpu. So if we TLB miss referencing the bitmap, the cpu will be out of trap levels and enter RED state. To guard against out-of-range accesses to the bitmap, we have to check to make sure no bits in the physical address above bit 40 are set. We could export and use last_valid_pfn for this check, but that's just an unnecessary extra memory reference. On the plus side of all this, since we load all of these translations into the special 4MB mapping TSB, and we check the TSB first for TLB misses, there should be absolutely no real cost for these new checks in the TLB miss path. Reported-by: heyongli@gmail.com Signed-off-by: David S. Miller --- arch/sparc/kernel/ktlb.S | 42 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 38 insertions(+), 4 deletions(-) (limited to 'arch/sparc/kernel') diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S index cef8defcd7a..3ea6e8cde8c 100644 --- a/arch/sparc/kernel/ktlb.S +++ b/arch/sparc/kernel/ktlb.S @@ -151,12 +151,46 @@ kvmap_dtlb_4v: * Must preserve %g1 and %g6 (TAG). */ kvmap_dtlb_tsb4m_miss: - sethi %hi(kpte_linear_bitmap), %g2 - or %g2, %lo(kpte_linear_bitmap), %g2 + /* Clear the PAGE_OFFSET top virtual bits, shift + * down to get PFN, and make sure PFN is in range. + */ + sllx %g4, 21, %g5 - /* Clear the PAGE_OFFSET top virtual bits, then shift - * down to get a 256MB physical address index. + /* Check to see if we know about valid memory at the 4MB + * chunk this physical address will reside within. */ + srlx %g5, 21 + 41, %g2 + brnz,pn %g2, kvmap_dtlb_longpath + nop + + /* This unconditional branch and delay-slot nop gets patched + * by the sethi sequence once the bitmap is properly setup. + */ + .globl valid_addr_bitmap_insn +valid_addr_bitmap_insn: + ba,pt %xcc, 2f + nop + .subsection 2 + .globl valid_addr_bitmap_patch +valid_addr_bitmap_patch: + sethi %hi(sparc64_valid_addr_bitmap), %g7 + or %g7, %lo(sparc64_valid_addr_bitmap), %g7 + .previous + + srlx %g5, 21 + 22, %g2 + srlx %g2, 6, %g5 + and %g2, 63, %g2 + sllx %g5, 3, %g5 + ldx [%g7 + %g5], %g5 + mov 1, %g7 + sllx %g7, %g2, %g7 + andcc %g5, %g7, %g0 + be,pn %xcc, kvmap_dtlb_longpath + +2: sethi %hi(kpte_linear_bitmap), %g2 + or %g2, %lo(kpte_linear_bitmap), %g2 + + /* Get the 256MB physical address index. */ sllx %g4, 21, %g5 mov 1, %g7 srlx %g5, 21 + 28, %g5 -- cgit v1.2.3