From 2f47f44790a9c8fc43e515df3c6be19a35ee5de5 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Tue, 10 Mar 2009 15:49:54 +0900 Subject: sh: Support fixed 32-bit PMB mappings from bootloader. This provides a method for supporting fixed PMB mappings inherited from the bootloader, as an alternative to the dynamic PMB mapping currently used by the kernel. In the future these methods will be combined. P1/P2 area is handled like a regular 29-bit physical address, and local bus device are assigned P3 area addresses. Signed-off-by: Yoshihiro Shimoda Signed-off-by: Paul Mundt --- arch/sh/boot/Makefile | 20 ++++++++++-------- arch/sh/include/asm/addrspace.h | 4 ++-- arch/sh/include/asm/io.h | 4 ++-- arch/sh/include/asm/page.h | 7 ++++++- arch/sh/kernel/vmlinux_32.lds.S | 5 ++++- arch/sh/mm/Kconfig | 29 +++++++++++++++++++++++++- arch/sh/mm/Makefile_32 | 1 + arch/sh/mm/ioremap_32.c | 6 ++++-- arch/sh/mm/pmb-fixed.c | 45 +++++++++++++++++++++++++++++++++++++++++ 9 files changed, 104 insertions(+), 17 deletions(-) create mode 100644 arch/sh/mm/pmb-fixed.c (limited to 'arch/sh') diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile index c16ccd4bfa1..95483d16125 100644 --- a/arch/sh/boot/Makefile +++ b/arch/sh/boot/Makefile @@ -33,20 +33,24 @@ $(obj)/zImage: $(obj)/compressed/vmlinux FORCE $(obj)/compressed/vmlinux: FORCE $(Q)$(MAKE) $(build)=$(obj)/compressed $@ -ifeq ($(CONFIG_32BIT),y) -KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \ - $$[$(CONFIG_PAGE_OFFSET) + \ - $(CONFIG_ZERO_PAGE_OFFSET)]') -else +KERNEL_MEMORY := 0x00000000 +ifeq ($(CONFIG_PMB_FIXED),y) +KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \ + $$[$(CONFIG_MEMORY_START) & 0x1fffffff]') +endif +ifeq ($(CONFIG_29BIT),y) +KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \ + $$[$(CONFIG_MEMORY_START)]') +endif + KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \ $$[$(CONFIG_PAGE_OFFSET) + \ - $(CONFIG_MEMORY_START) + \ + $(KERNEL_MEMORY) + \ $(CONFIG_ZERO_PAGE_OFFSET)]') -endif KERNEL_ENTRY := $(shell /bin/bash -c 'printf "0x%08x" \ $$[$(CONFIG_PAGE_OFFSET) + \ - $(CONFIG_MEMORY_START) + \ + $(KERNEL_MEMORY) + \ $(CONFIG_ZERO_PAGE_OFFSET) + $(CONFIG_ENTRY_OFFSET)]') quiet_cmd_uimage = UIMAGE $@ diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h index 36736c7e93d..80d40813e05 100644 --- a/arch/sh/include/asm/addrspace.h +++ b/arch/sh/include/asm/addrspace.h @@ -31,7 +31,7 @@ /* Returns the physical address of a PnSEG (n=1,2) address */ #define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff) -#ifdef CONFIG_29BIT +#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED) /* * Map an address to a certain privileged segment */ @@ -43,7 +43,7 @@ ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG)) #define P4SEGADDR(a) \ ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG)) -#endif /* 29BIT */ +#endif /* 29BIT || PMB_FIXED */ #endif /* P1SEG */ /* Check if an address can be reached in 29 bits */ diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 61f6dae4053..0454f8d6805 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -238,7 +238,7 @@ extern void onchip_unmap(unsigned long vaddr); static inline void __iomem * __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) { -#ifdef CONFIG_SUPERH32 +#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) unsigned long last_addr = offset + size - 1; #endif void __iomem *ret; @@ -247,7 +247,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) if (ret) return ret; -#ifdef CONFIG_SUPERH32 +#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) /* * For P1 and P2 space this is trivial, as everything is already * mapped. Uncached access for P1 addresses are done through P2. diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index 5871d78e47e..9c6d21ec024 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -129,7 +129,12 @@ typedef struct page *pgtable_t; * is not visible (it is part of the PMB mapping) and so needs to be * added or subtracted as required. */ -#ifdef CONFIG_32BIT +#if defined(CONFIG_PMB_FIXED) +/* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */ +#define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START)) +#define __pa(x) ((unsigned long)(x) - PMB_OFFSET) +#define __va(x) ((void *)((unsigned long)(x) + PMB_OFFSET)) +#elif defined(CONFIG_32BIT) #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START)) #else diff --git a/arch/sh/kernel/vmlinux_32.lds.S b/arch/sh/kernel/vmlinux_32.lds.S index 7b4b82bd115..d0b2a715cd1 100644 --- a/arch/sh/kernel/vmlinux_32.lds.S +++ b/arch/sh/kernel/vmlinux_32.lds.S @@ -15,7 +15,10 @@ OUTPUT_ARCH(sh) ENTRY(_start) SECTIONS { -#ifdef CONFIG_32BIT +#ifdef CONFIG_PMB_FIXED + . = CONFIG_PAGE_OFFSET + (CONFIG_MEMORY_START & 0x1fffffff) + + CONFIG_ZERO_PAGE_OFFSET; +#elif defined(CONFIG_32BIT) . = CONFIG_PAGE_OFFSET + CONFIG_ZERO_PAGE_OFFSET; #else . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET; diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 555ec9714b9..10c24356d2d 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -57,7 +57,7 @@ config 32BIT bool default y if CPU_SH5 -config PMB +config PMB_ENABLE bool "Support 32-bit physical addressing through PMB" depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) select 32BIT @@ -67,6 +67,33 @@ config PMB 32-bits through the SH-4A PMB. If this is not set, legacy 29-bit physical addressing will be used. +choice + prompt "PMB handling type" + depends on PMB_ENABLE + default PMB_FIXED + +config PMB + bool "PMB" + depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) + select 32BIT + help + If you say Y here, physical addressing will be extended to + 32-bits through the SH-4A PMB. If this is not set, legacy + 29-bit physical addressing will be used. + +config PMB_FIXED + bool "fixed PMB" + depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || \ + CPU_SUBTYPE_SH7785) + select 32BIT + help + If this option is enabled, fixed PMB mappings are inherited + from the boot loader, and the kernel does not attempt dynamic + management. This is the closest to legacy 29-bit physical mode, + and allows systems to support up to 512MiB of system memory. + +endchoice + config X2TLB bool "Enable extended TLB mode" depends on (CPU_SHX2 || CPU_SHX3) && MMU && EXPERIMENTAL diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32 index cb2f3f29959..469ff167245 100644 --- a/arch/sh/mm/Makefile_32 +++ b/arch/sh/mm/Makefile_32 @@ -35,6 +35,7 @@ endif obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_PMB) += pmb.o +obj-$(CONFIG_PMB_FIXED) += pmb-fixed.o obj-$(CONFIG_NUMA) += numa.o EXTRA_CFLAGS += -Werror diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c index 8dc77026a0b..60cc486d2c2 100644 --- a/arch/sh/mm/ioremap_32.c +++ b/arch/sh/mm/ioremap_32.c @@ -59,11 +59,13 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, if (is_pci_memaddr(phys_addr) && is_pci_memaddr(last_addr)) return (void __iomem *)phys_addr; +#if !defined(CONFIG_PMB_FIXED) /* * Don't allow anybody to remap normal RAM that we're using.. */ if (phys_addr < virt_to_phys(high_memory)) return NULL; +#endif /* * Mappings have to be page-aligned @@ -81,7 +83,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, area->phys_addr = phys_addr; orig_addr = addr = (unsigned long)area->addr; -#ifdef CONFIG_32BIT +#ifdef CONFIG_PMB /* * First try to remap through the PMB once a valid VMA has been * established. Smaller allocations (or the rest of the size @@ -122,7 +124,7 @@ void __iounmap(void __iomem *addr) if (seg < P3SEG || vaddr >= P3_ADDR_MAX || is_pci_memaddr(vaddr)) return; -#ifdef CONFIG_32BIT +#ifdef CONFIG_PMB /* * Purge any PMB entries that may have been established for this * mapping, then proceed with conventional VMA teardown. diff --git a/arch/sh/mm/pmb-fixed.c b/arch/sh/mm/pmb-fixed.c new file mode 100644 index 00000000000..43c8eac4d8a --- /dev/null +++ b/arch/sh/mm/pmb-fixed.c @@ -0,0 +1,45 @@ +/* + * arch/sh/mm/fixed_pmb.c + * + * Copyright (C) 2009 Renesas Solutions Corp. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include + +static int __uses_jump_to_uncached fixed_pmb_init(void) +{ + int i; + unsigned long addr, data; + + jump_to_uncached(); + + for (i = 0; i < PMB_ENTRY_MAX; i++) { + addr = PMB_DATA + (i << PMB_E_SHIFT); + data = ctrl_inl(addr); + if (!(data & PMB_V)) + continue; + + if (data & PMB_C) { +#if defined(CONFIG_CACHE_WRITETHROUGH) + data |= PMB_WT; +#elif defined(CONFIG_CACHE_WRITEBACK) + data &= ~PMB_WT; +#else + data &= ~(PMB_C | PMB_WT); +#endif + } + ctrl_outl(data, addr); + } + + back_to_cached(); + + return 0; +} +arch_initcall(fixed_pmb_init); -- cgit v1.2.3