aboutsummaryrefslogtreecommitdiff
path: root/arch/sh/mm
diff options
context:
space:
mode:
authorMatt Fleming <matt@console-pimps.org>2009-10-06 21:22:30 +0000
committerPaul Mundt <lethal@linux-sh.org>2009-10-10 21:52:03 +0900
commit3105121949b609964f370d42d1b90fe7fc01d6b1 (patch)
tree20a7a6afa6d8023d20dcc7509a253268e0afdebc /arch/sh/mm
parentedd7de803c79c7df117bf3f0e22ffdba1b1ef256 (diff)
sh: Remap physical memory into P1 and P2 in pmb_init()
Eventually we'll have complete control over what physical memory gets mapped where and we can probably do other interesting things. For now though, when the MMU is in 32-bit mode, we map physical memory into the P1 and P2 virtual address ranges with the same semantics as they have in 29-bit mode. Signed-off-by: Matt Fleming <matt@console-pimps.org> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/consistent.c2
-rw-r--r--arch/sh/mm/pmb.c54
2 files changed, 16 insertions, 40 deletions
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index e098ec158dd..9a8403d9344 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -85,7 +85,7 @@ EXPORT_SYMBOL(dma_free_coherent);
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
-#ifdef CONFIG_CPU_SH5
+#if defined(CONFIG_CPU_SH5) || defined(CONFIG_PMB)
void *p1addr = vaddr;
#else
void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index baf365fcdb4..2d009bdcf90 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -38,26 +38,6 @@ static void __pmb_unmap(struct pmb_entry *);
static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
static unsigned long pmb_map;
-static struct pmb_entry pmb_init_map[] = {
- /* vpn ppn flags (ub/sz/c/wt) */
-
- /* P1 Section Mappings */
- { 0x80000000, 0x00000000, PMB_SZ_64M | PMB_C, },
- { 0x84000000, 0x04000000, PMB_SZ_64M | PMB_C, },
- { 0x88000000, 0x08000000, PMB_SZ_128M | PMB_C, },
- { 0x90000000, 0x10000000, PMB_SZ_64M | PMB_C, },
- { 0x94000000, 0x14000000, PMB_SZ_64M | PMB_C, },
- { 0x98000000, 0x18000000, PMB_SZ_64M | PMB_C, },
-
- /* P2 Section Mappings */
- { 0xa0000000, 0x00000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
- { 0xa4000000, 0x04000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
- { 0xa8000000, 0x08000000, PMB_UB | PMB_SZ_128M | PMB_WT, },
- { 0xb0000000, 0x10000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
- { 0xb4000000, 0x14000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
- { 0xb8000000, 0x18000000, PMB_UB | PMB_SZ_64M | PMB_WT, },
-};
-
static inline unsigned long mk_pmb_entry(unsigned int entry)
{
return (entry & PMB_E_MASK) << PMB_E_SHIFT;
@@ -156,13 +136,7 @@ static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
unsigned int entry = pmbe->entry;
unsigned long addr;
- /*
- * Don't allow clearing of wired init entries, P1 or P2 access
- * without a corresponding mapping in the PMB will lead to reset
- * by the TLB.
- */
- if (unlikely(entry < ARRAY_SIZE(pmb_init_map) ||
- entry >= NR_PMB_ENTRIES))
+ if (unlikely(entry >= NR_PMB_ENTRIES))
return;
jump_to_uncached();
@@ -300,28 +274,30 @@ static void __pmb_unmap(struct pmb_entry *pmbe)
int __uses_jump_to_uncached pmb_init(void)
{
- unsigned int nr_entries = ARRAY_SIZE(pmb_init_map);
- unsigned int entry, i;
-
- BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));
+ unsigned int i;
+ long size;
jump_to_uncached();
/*
- * Ordering is important, P2 must be mapped in the PMB before we
- * can set PMB.SE, and P1 must be mapped before we jump back to
- * P1 space.
+ * Insert PMB entries for the P1 and P2 areas so that, after
+ * we've switched the MMU to 32-bit mode, the semantics of P1
+ * and P2 are the same as in 29-bit mode, e.g.
+ *
+ * P1 - provides a cached window onto physical memory
+ * P2 - provides an uncached window onto physical memory
*/
- for (entry = 0; entry < nr_entries; entry++) {
- struct pmb_entry *pmbe = pmb_init_map + entry;
+ size = pmb_remap(P2SEG, __MEMORY_START, __MEMORY_SIZE,
+ PMB_WT | PMB_UB);
+ BUG_ON(size != __MEMORY_SIZE);
- __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, entry);
- }
+ size = pmb_remap(P1SEG, __MEMORY_START, __MEMORY_SIZE, PMB_C);
+ BUG_ON(size != __MEMORY_SIZE);
ctrl_outl(0, PMB_IRMCR);
/* PMB.SE and UB[7] */
- ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR);
+ ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR);
/* Flush out the TLB */
i = ctrl_inl(MMUCR);