From f6430a938dc6d77e33722aaf6a58382b3423935d Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 24 Jun 2009 23:38:56 +0100 Subject: [ARM] 5565/2: Use PAGE_SIZE and RO_DATA() in link script Update the link script for ARM to use PAGE_SIZE instead of hard- coded 4096. Also the old RODATA macro is deprecated for the RO_DATA(PAGE_SIZE) macro. As a consequence the PAGE_SIZE was changed from (1UL << PAGE_SHIFT) to (_AC(1,UL) << PAGE_SHIFT) because the linker does not understand the "UL" suffix to numeric constants. Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/include/asm/page.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index be962c1349c..9c746af1bf6 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -12,7 +12,7 @@ /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 -#define PAGE_SIZE (1UL << PAGE_SHIFT) +#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) #ifndef __ASSEMBLY__ -- cgit v1.2.3 From fb93a1c75eb646fde35985e9af23da936775ae52 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 5 Jul 2009 11:30:15 +0100 Subject: [ARM] pgtable: swp pte layout documentation, definitions, and check Document the layout of our swp PTE entries, adding definitions for the bit masks/shifts/sizes, and implement MAX_SWAPFILES_CHECK() such that we fail to build if we are unable to properly encode the swp type field. Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 1cd2d6416bd..c1d97938f3e 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -384,16 +384,36 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; -/* Encode and decode a swap entry. +/* + * Encode and decode a swap entry. Swap entries are stored in the Linux + * page tables as follows: + * + * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * <--------------- offset --------------------> <--- type --> 0 0 * - * We support up to 32GB of swap on 4k machines + * This gives us up to 127 swap files and 32GB per swap file. Note that + * the offset field is always non-zero. */ -#define __swp_type(x) (((x).val >> 2) & 0x7f) -#define __swp_offset(x) ((x).val >> 9) -#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) }) +#define __SWP_TYPE_SHIFT 2 +#define __SWP_TYPE_BITS 7 +#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) +#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) + +#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) +#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) +#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) + #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) +/* + * It is an error for the kernel to have more swap files than we can + * encode in the PTEs. This ensures that we know when MAX_SWAPFILES + * is increased beyond what we presently support. + */ +#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) + /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ /* FIXME: this is not correct */ #define kern_addr_valid(addr) (1) -- cgit v1.2.3 From 65b1bfc13e8f50034187e339aa12b81cd6785bd5 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 5 Jul 2009 11:52:21 +0100 Subject: [ARM] pgtable: file pte layout documentation Document the layout of our file PTE entries. Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index c1d97938f3e..c433c6c7311 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -285,15 +285,6 @@ extern struct page *empty_zero_page; #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) #define pte_special(pte) (0) -/* - * The following only works if pte_present() is not true. - */ -#define pte_file(pte) (pte_val(pte) & L_PTE_FILE) -#define pte_to_pgoff(x) (pte_val(x) >> 2) -#define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE) - -#define PTE_FILE_MAX_BITS 30 - #define PTE_BIT_FUNC(fn,op) \ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } @@ -414,6 +405,20 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; */ #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) +/* + * Encode and decode a file entry. File entries are stored in the Linux + * page tables as follows: + * + * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * <------------------------ offset -------------------------> 1 0 + */ +#define pte_file(pte) (pte_val(pte) & L_PTE_FILE) +#define pte_to_pgoff(x) (pte_val(x) >> 2) +#define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE) + +#define PTE_FILE_MAX_BITS 30 + /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ /* FIXME: this is not correct */ #define kern_addr_valid(addr) (1) -- cgit v1.2.3 From c99e6efe1ba04561e7d93a81f0be07e37427e835 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 10 Jul 2009 14:57:56 +0200 Subject: sched: INIT_PREEMPT_COUNT Pull the initial preempt_count value into a single definition site. Maintainers for: alpha, ia64 and m68k, please have a look, your arch code is funny. The header magic is a bit odd, but similar to the KERNEL_DS one, CPP waits with expanding these macros until the INIT_THREAD_INFO macro itself is expanded, which is in arch/*/kernel/init_task.c where we've already included sched.h so we're good. Cc: tony.luck@intel.com Cc: rth@twiddle.net Cc: geert@linux-m68k.org Signed-off-by: Peter Zijlstra Acked-by: Matt Mackall Signed-off-by: Linus Torvalds --- arch/arm/include/asm/thread_info.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 4f8848260ee..73394e50cbc 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -73,7 +73,7 @@ struct thread_info { .task = &tsk, \ .exec_domain = &default_exec_domain, \ .flags = 0, \ - .preempt_count = 1, \ + .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ -- cgit v1.2.3 From feecaf73bb437cf72a44bd71598c6532d357f78e Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Wed, 8 Jul 2009 20:01:39 +0530 Subject: ARM: includecheck fix: atomic.h fix the following 'make includecheck' warning: arch/arm/include/asm/atomic.h: asm/system.h is included more than once. Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Russell King --- arch/arm/include/asm/atomic.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 9e07fe50702..9ed2377fe8e 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -159,8 +159,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) #else /* ARM_ARCH_6 */ -#include - #ifdef CONFIG_SMP #error SMP not supported on pre-ARMv6 CPUs #endif -- cgit v1.2.3 From 9e1b32caa525cb236e80e9c671e179bcecccc657 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 22 Jul 2009 15:44:28 +1000 Subject: mm: Pass virtual address to [__]p{te,ud,md}_free_tlb() mm: Pass virtual address to [__]p{te,ud,md}_free_tlb() Upcoming paches to support the new 64-bit "BookE" powerpc architecture will need to have the virtual address corresponding to PTE page when freeing it, due to the way the HW table walker works. Basically, the TLB can be loaded with "large" pages that cover the whole virtual space (well, sort-of, half of it actually) represented by a PTE page, and which contain an "indirect" bit indicating that this TLB entry RPN points to an array of PTEs from which the TLB can then create direct entries. Thus, in order to invalidate those when PTE pages are deleted, we need the virtual address to pass to tlbilx or tlbivax instructions. The old trick of sticking it somewhere in the PTE page struct page sucks too much, the address is almost readily available in all call sites and almost everybody implemets these as macros, so we may as well add the argument everywhere. I added it to the pmd and pud variants for consistency. Signed-off-by: Benjamin Herrenschmidt Acked-by: David Howells [MN10300 & FRV] Acked-by: Nick Piggin Acked-by: Martin Schwidefsky [s390] Signed-off-by: Linus Torvalds --- arch/arm/include/asm/tlb.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 321c83e43a1..f41a6f57cd1 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -102,8 +102,8 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) } #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) -#define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep) -#define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp) +#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) +#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) #define tlb_migrate_finish(mm) do { } while (0) -- cgit v1.2.3