From 2651fa2bcb1a7b06793441d50dcbf98efda592c6 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 7 Jul 2009 16:37:08 +0200 Subject: [S390] perf_counter build fix Add PERF_COUNTER_INDEX_OFFSET define to fix this build bug: kernel/perf_counter.c: In function 'perf_counter_index': kernel/perf_counter.c:1889: error: 'PERF_COUNTER_INDEX_OFFSET' undeclared Same fix as for FRV since s390 doesn't support hw counters. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/perf_counter.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/perf_counter.h b/arch/s390/include/asm/perf_counter.h index a7205a3828c..7015188c2cc 100644 --- a/arch/s390/include/asm/perf_counter.h +++ b/arch/s390/include/asm/perf_counter.h @@ -6,3 +6,5 @@ static inline void set_perf_counter_pending(void) {} static inline void clear_perf_counter_pending(void) {} + +#define PERF_COUNTER_INDEX_OFFSET 0 -- cgit v1.2.3 From 25ca1251dc55673da1f3c6ddc1bf93fbb7fc83fc Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 7 Jul 2009 16:37:12 +0200 Subject: [S390] add generic atomic64 support for 31 bit Performance counters need 64 bit atomic operations. To keep the patch small we use the simple generic atomic64_t implementation. The native implementation follows with the next kernel. Fixes this build bug: In file included from kernel/sched.c:42: include/linux/perf_counter.h:427: error: expected specifier-qualifier-list before 'atomic64_t' Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/atomic.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index fca9dffcc66..c7d0abfb0f0 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -268,7 +268,12 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #undef __CSG_LOOP -#endif + +#else /* __s390x__ */ + +#include + +#endif /* __s390x__ */ #define smp_mb__before_atomic_dec() smp_mb() #define smp_mb__after_atomic_dec() smp_mb() -- cgit v1.2.3 From c99e6efe1ba04561e7d93a81f0be07e37427e835 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 10 Jul 2009 14:57:56 +0200 Subject: sched: INIT_PREEMPT_COUNT Pull the initial preempt_count value into a single definition site. Maintainers for: alpha, ia64 and m68k, please have a look, your arch code is funny. The header magic is a bit odd, but similar to the KERNEL_DS one, CPP waits with expanding these macros until the INIT_THREAD_INFO macro itself is expanded, which is in arch/*/kernel/init_task.c where we've already included sched.h so we're good. Cc: tony.luck@intel.com Cc: rth@twiddle.net Cc: geert@linux-m68k.org Signed-off-by: Peter Zijlstra Acked-by: Matt Mackall Signed-off-by: Linus Torvalds --- arch/s390/include/asm/thread_info.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 925bcc64903..ba1cab9fc1f 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -61,7 +61,7 @@ struct thread_info { .exec_domain = &default_exec_domain, \ .flags = 0, \ .cpu = 0, \ - .preempt_count = 1, \ + .preempt_count = INIT_PREEMPT_COUNT, \ .restart_block = { \ .fn = do_no_restart_syscall, \ }, \ -- cgit v1.2.3 From 9e1b32caa525cb236e80e9c671e179bcecccc657 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 22 Jul 2009 15:44:28 +1000 Subject: mm: Pass virtual address to [__]p{te,ud,md}_free_tlb() mm: Pass virtual address to [__]p{te,ud,md}_free_tlb() Upcoming paches to support the new 64-bit "BookE" powerpc architecture will need to have the virtual address corresponding to PTE page when freeing it, due to the way the HW table walker works. Basically, the TLB can be loaded with "large" pages that cover the whole virtual space (well, sort-of, half of it actually) represented by a PTE page, and which contain an "indirect" bit indicating that this TLB entry RPN points to an array of PTEs from which the TLB can then create direct entries. Thus, in order to invalidate those when PTE pages are deleted, we need the virtual address to pass to tlbilx or tlbivax instructions. The old trick of sticking it somewhere in the PTE page struct page sucks too much, the address is almost readily available in all call sites and almost everybody implemets these as macros, so we may as well add the argument everywhere. I added it to the pmd and pud variants for consistency. Signed-off-by: Benjamin Herrenschmidt Acked-by: David Howells [MN10300 & FRV] Acked-by: Nick Piggin Acked-by: Martin Schwidefsky [s390] Signed-off-by: Linus Torvalds --- arch/s390/include/asm/tlb.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'arch/s390/include') diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 3d8a96d39d9..81150b05368 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -96,7 +96,8 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) * pte_free_tlb frees a pte table and clears the CRSTE for the * page table from the tlb. */ -static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte) +static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, + unsigned long address) { if (!tlb->fullmm) { tlb->array[tlb->nr_ptes++] = pte; @@ -113,7 +114,8 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte) * as the pgd. pmd_free_tlb checks the asce_limit against 2GB * to avoid the double free of the pmd in this case. */ -static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) +static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, + unsigned long address) { #ifdef __s390x__ if (tlb->mm->context.asce_limit <= (1UL << 31)) @@ -134,7 +136,8 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) * as the pgd. pud_free_tlb checks the asce_limit against 4TB * to avoid the double free of the pud in this case. */ -static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) +static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, + unsigned long address) { #ifdef __s390x__ if (tlb->mm->context.asce_limit <= (1UL << 42)) -- cgit v1.2.3