aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig21
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c318
-rw-r--r--arch/arm/mm/proc-feroceon.S242
4 files changed, 553 insertions, 30 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 33ed048502a..a2c8b006d71 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -180,8 +180,8 @@ config CPU_ARM925T
# ARM926T
config CPU_ARM926T
bool "Support ARM926T processor"
- depends on ARCH_INTEGRATOR || ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX || MACH_REALVIEW_EB || ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || ARCH_AT91SAM9260 || ARCH_AT91SAM9261 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9 || ARCH_NS9XXX || ARCH_DAVINCI
- default y if ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX || ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || ARCH_AT91SAM9260 || ARCH_AT91SAM9261 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9 || ARCH_NS9XXX || ARCH_DAVINCI
+ depends on ARCH_INTEGRATOR || ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX || MACH_REALVIEW_EB || ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || ARCH_AT91SAM9260 || ARCH_AT91SAM9261 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G20 || ARCH_AT91CAP9 || ARCH_NS9XXX || ARCH_DAVINCI
+ default y if ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX || ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || ARCH_AT91SAM9260 || ARCH_AT91SAM9261 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G20 || ARCH_AT91CAP9 || ARCH_NS9XXX || ARCH_DAVINCI
select CPU_32v5
select CPU_ABRT_EV5TJ
select CPU_PABRT_NOIFAR
@@ -365,7 +365,7 @@ config CPU_XSC3
# Feroceon
config CPU_FEROCEON
bool
- depends on ARCH_ORION5X
+ depends on ARCH_ORION5X || ARCH_LOKI || ARCH_KIRKWOOD || ARCH_MV78XX0
default y
select CPU_32v5
select CPU_ABRT_EV5T
@@ -373,7 +373,7 @@ config CPU_FEROCEON
select CPU_CACHE_VIVT
select CPU_CP15_MMU
select CPU_COPY_FEROCEON if MMU
- select CPU_TLB_V4WBI if MMU
+ select CPU_TLB_FEROCEON if MMU
config CPU_FEROCEON_OLD_ID
bool "Accept early Feroceon cores with an ARM926 ID"
@@ -551,6 +551,11 @@ config CPU_TLB_V4WBI
ARM Architecture Version 4 TLB with writeback cache and invalidate
instruction cache entry.
+config CPU_TLB_FEROCEON
+ bool
+ help
+ Feroceon TLB (v4wbi with non-outer-cachable page table walks).
+
config CPU_TLB_V6
bool
@@ -709,6 +714,14 @@ config OUTER_CACHE
bool
default n
+config CACHE_FEROCEON_L2
+ bool "Enable the Feroceon L2 cache controller"
+ depends on ARCH_KIRKWOOD || ARCH_MV78XX0
+ default y
+ select OUTER_CACHE
+ help
+ This option enables the Feroceon L2 cache controller.
+
config CACHE_L2X0
bool "Enable the L2x0 outer cache controller"
depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 32b2d2d213a..f64b92557b1 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_CPU_TLB_V3) += tlb-v3.o
obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o
obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o
obj-$(CONFIG_CPU_TLB_V4WBI) += tlb-v4wbi.o
+obj-$(CONFIG_CPU_TLB_FEROCEON) += tlb-v4wbi.o # reuse v4wbi TLB functions
obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o
obj-$(CONFIG_CPU_TLB_V7) += tlb-v7.o
@@ -73,4 +74,5 @@ obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o
obj-$(CONFIG_CPU_V6) += proc-v6.o
obj-$(CONFIG_CPU_V7) += proc-v7.o
+obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o
obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
new file mode 100644
index 00000000000..20eec4ba173
--- /dev/null
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -0,0 +1,318 @@
+/*
+ * arch/arm/mm/cache-feroceon-l2.c - Feroceon L2 cache controller support
+ *
+ * Copyright (C) 2008 Marvell Semiconductor
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * References:
+ * - Unified Layer 2 Cache for Feroceon CPU Cores,
+ * Document ID MV-S104858-00, Rev. A, October 23 2007.
+ */
+
+#include <linux/init.h>
+#include <asm/cacheflush.h>
+#include <asm/plat-orion/cache-feroceon-l2.h>
+
+
+/*
+ * Low-level cache maintenance operations.
+ *
+ * As well as the regular 'clean/invalidate/flush L2 cache line by
+ * MVA' instructions, the Feroceon L2 cache controller also features
+ * 'clean/invalidate L2 range by MVA' operations.
+ *
+ * Cache range operations are initiated by writing the start and
+ * end addresses to successive cp15 registers, and process every
+ * cache line whose first byte address lies in the inclusive range
+ * [start:end].
+ *
+ * The cache range operations stall the CPU pipeline until completion.
+ *
+ * The range operations require two successive cp15 writes, in
+ * between which we don't want to be preempted.
+ */
+static inline void l2_clean_pa(unsigned long addr)
+{
+ __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
+}
+
+static inline void l2_clean_mva_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ /*
+ * Make sure 'start' and 'end' reference the same page, as
+ * L2 is PIPT and range operations only do a TLB lookup on
+ * the start address.
+ */
+ BUG_ON((start ^ end) & ~(PAGE_SIZE - 1));
+
+ raw_local_irq_save(flags);
+ __asm__("mcr p15, 1, %0, c15, c9, 4" : : "r" (start));
+ __asm__("mcr p15, 1, %0, c15, c9, 5" : : "r" (end));
+ raw_local_irq_restore(flags);
+}
+
+static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
+{
+ l2_clean_mva_range(__phys_to_virt(start), __phys_to_virt(end));
+}
+
+static inline void l2_clean_inv_pa(unsigned long addr)
+{
+ __asm__("mcr p15, 1, %0, c15, c10, 3" : : "r" (addr));
+}
+
+static inline void l2_inv_pa(unsigned long addr)
+{
+ __asm__("mcr p15, 1, %0, c15, c11, 3" : : "r" (addr));
+}
+
+static inline void l2_inv_mva_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ /*
+ * Make sure 'start' and 'end' reference the same page, as
+ * L2 is PIPT and range operations only do a TLB lookup on
+ * the start address.
+ */
+ BUG_ON((start ^ end) & ~(PAGE_SIZE - 1));
+
+ raw_local_irq_save(flags);
+ __asm__("mcr p15, 1, %0, c15, c11, 4" : : "r" (start));
+ __asm__("mcr p15, 1, %0, c15, c11, 5" : : "r" (end));
+ raw_local_irq_restore(flags);
+}
+
+static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
+{
+ l2_inv_mva_range(__phys_to_virt(start), __phys_to_virt(end));
+}
+
+
+/*
+ * Linux primitives.
+ *
+ * Note that the end addresses passed to Linux primitives are
+ * noninclusive, while the hardware cache range operations use
+ * inclusive start and end addresses.
+ */
+#define CACHE_LINE_SIZE 32
+#define MAX_RANGE_SIZE 1024
+
+static int l2_wt_override;
+
+static unsigned long calc_range_end(unsigned long start, unsigned long end)
+{
+ unsigned long range_end;
+
+ BUG_ON(start & (CACHE_LINE_SIZE - 1));
+ BUG_ON(end & (CACHE_LINE_SIZE - 1));
+
+ /*
+ * Try to process all cache lines between 'start' and 'end'.
+ */
+ range_end = end;
+
+ /*
+ * Limit the number of cache lines processed at once,
+ * since cache range operations stall the CPU pipeline
+ * until completion.
+ */
+ if (range_end > start + MAX_RANGE_SIZE)
+ range_end = start + MAX_RANGE_SIZE;
+
+ /*
+ * Cache range operations can't straddle a page boundary.
+ */
+ if (range_end > (start | (PAGE_SIZE - 1)) + 1)
+ range_end = (start | (PAGE_SIZE - 1)) + 1;
+
+ return range_end;
+}
+
+static void feroceon_l2_inv_range(unsigned long start, unsigned long end)
+{
+ /*
+ * Clean and invalidate partial first cache line.
+ */
+ if (start & (CACHE_LINE_SIZE - 1)) {
+ l2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1));
+ start = (start | (CACHE_LINE_SIZE - 1)) + 1;
+ }
+
+ /*
+ * Clean and invalidate partial last cache line.
+ */
+ if (end & (CACHE_LINE_SIZE - 1)) {
+ l2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
+ end &= ~(CACHE_LINE_SIZE - 1);
+ }
+
+ /*
+ * Invalidate all full cache lines between 'start' and 'end'.
+ */
+ while (start != end) {
+ unsigned long range_end = calc_range_end(start, end);
+ l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE);
+ start = range_end;
+ }
+
+ dsb();
+}
+
+static void feroceon_l2_clean_range(unsigned long start, unsigned long end)
+{
+ /*
+ * If L2 is forced to WT, the L2 will always be clean and we
+ * don't need to do anything here.
+ */
+ if (!l2_wt_override) {
+ start &= ~(CACHE_LINE_SIZE - 1);
+ end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
+ while (start != end) {
+ unsigned long range_end = calc_range_end(start, end);
+ l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE);
+ start = range_end;
+ }
+ }
+
+ dsb();
+}
+
+static void feroceon_l2_flush_range(unsigned long start, unsigned long end)
+{
+ start &= ~(CACHE_LINE_SIZE - 1);
+ end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
+ while (start != end) {
+ unsigned long range_end = calc_range_end(start, end);
+ if (!l2_wt_override)
+ l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE);
+ l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE);
+ start = range_end;
+ }
+
+ dsb();
+}
+
+
+/*
+ * Routines to disable and re-enable the D-cache and I-cache at run
+ * time. These are necessary because the L2 cache can only be enabled
+ * or disabled while the L1 Dcache and Icache are both disabled.
+ */
+static void __init invalidate_and_disable_dcache(void)
+{
+ u32 cr;
+
+ cr = get_cr();
+ if (cr & CR_C) {
+ unsigned long flags;
+
+ raw_local_irq_save(flags);
+ flush_cache_all();
+ set_cr(cr & ~CR_C);
+ raw_local_irq_restore(flags);
+ }
+}
+
+static void __init enable_dcache(void)
+{
+ u32 cr;
+
+ cr = get_cr();
+ if (!(cr & CR_C))
+ set_cr(cr | CR_C);
+}
+
+static void __init __invalidate_icache(void)
+{
+ int dummy;
+
+ __asm__ __volatile__("mcr p15, 0, %0, c7, c5, 0\n" : "=r" (dummy));
+}
+
+static void __init invalidate_and_disable_icache(void)
+{
+ u32 cr;
+
+ cr = get_cr();
+ if (cr & CR_I) {
+ set_cr(cr & ~CR_I);
+ __invalidate_icache();
+ }
+}
+
+static void __init enable_icache(void)
+{
+ u32 cr;
+
+ cr = get_cr();
+ if (!(cr & CR_I))
+ set_cr(cr | CR_I);
+}
+
+static inline u32 read_extra_features(void)
+{
+ u32 u;
+
+ __asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u));
+
+ return u;
+}
+
+static inline void write_extra_features(u32 u)
+{
+ __asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u));
+}
+
+static void __init disable_l2_prefetch(void)
+{
+ u32 u;
+
+ /*
+ * Read the CPU Extra Features register and verify that the
+ * Disable L2 Prefetch bit is set.
+ */
+ u = read_extra_features();
+ if (!(u & 0x01000000)) {
+ printk(KERN_INFO "Feroceon L2: Disabling L2 prefetch.\n");
+ write_extra_features(u | 0x01000000);
+ }
+}
+
+static void __init enable_l2(void)
+{
+ u32 u;
+
+ u = read_extra_features();
+ if (!(u & 0x00400000)) {
+ printk(KERN_INFO "Feroceon L2: Enabling L2\n");
+
+ invalidate_and_disable_dcache();
+ invalidate_and_disable_icache();
+ write_extra_features(u | 0x00400000);
+ enable_icache();
+ enable_dcache();
+ }
+}
+
+void __init feroceon_l2_init(int __l2_wt_override)
+{
+ l2_wt_override = __l2_wt_override;
+
+ disable_l2_prefetch();
+
+ outer_cache.inv_range = feroceon_l2_inv_range;
+ outer_cache.clean_range = feroceon_l2_clean_range;
+ outer_cache.flush_range = feroceon_l2_flush_range;
+
+ enable_l2();
+
+ printk(KERN_INFO "Feroceon L2: Cache support initialised%s.\n",
+ l2_wt_override ? ", in WT override mode" : "");
+}
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index a02c1712b52..f2e5884c513 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -44,11 +44,31 @@
*/
#define CACHE_DLINESIZE 32
+ .bss
+ .align 3
+__cache_params_loc:
+ .space 8
+
.text
+__cache_params:
+ .word __cache_params_loc
+
/*
* cpu_feroceon_proc_init()
*/
ENTRY(cpu_feroceon_proc_init)
+ mrc p15, 0, r0, c0, c0, 1 @ read cache type register
+ ldr r1, __cache_params
+ mov r2, #(16 << 5)
+ tst r0, #(1 << 16) @ get way
+ mov r0, r0, lsr #18 @ get cache size order
+ movne r3, #((4 - 1) << 30) @ 4-way
+ and r0, r0, #0xf
+ moveq r3, #0 @ 1-way
+ mov r2, r2, lsl r0 @ actual cache size
+ movne r2, r2, lsr #2 @ turned into # of sets
+ sub r2, r2, #(1 << 5)
+ stmia r1, {r2, r3}
mov pc, lr
/*
@@ -59,6 +79,13 @@ ENTRY(cpu_feroceon_proc_fin)
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip
bl feroceon_flush_kern_cache_all
+
+#if defined(CONFIG_CACHE_FEROCEON_L2) && !defined(CONFIG_L2_CACHE_WRITETHROUGH)
+ mov r0, #0
+ mcr p15, 1, r0, c15, c9, 0 @ clean L2
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+#endif
+
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
@@ -117,11 +144,19 @@ ENTRY(feroceon_flush_user_cache_all)
*/
ENTRY(feroceon_flush_kern_cache_all)
mov r2, #VM_EXEC
- mov ip, #0
+
__flush_whole_cache:
-1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate
- bne 1b
+ ldr r1, __cache_params
+ ldmia r1, {r1, r3}
+1: orr ip, r1, r3
+2: mcr p15, 0, ip, c7, c14, 2 @ clean + invalidate D set/way
+ subs ip, ip, #(1 << 30) @ next way
+ bcs 2b
+ subs r1, r1, #(1 << 5) @ next set
+ bcs 1b
+
tst r2, #VM_EXEC
+ mov ip, #0
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
@@ -138,7 +173,6 @@ __flush_whole_cache:
*/
.align 5
ENTRY(feroceon_flush_user_cache_range)
- mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bgt __flush_whole_cache
@@ -152,6 +186,7 @@ ENTRY(feroceon_flush_user_cache_range)
cmp r0, r1
blo 1b
tst r2, #VM_EXEC
+ mov ip, #0
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
@@ -209,6 +244,20 @@ ENTRY(feroceon_flush_kern_dcache_page)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
+ .align 5
+ENTRY(feroceon_range_flush_kern_dcache_page)
+ mrs r2, cpsr
+ add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive
+ orr r3, r2, #PSR_I_BIT
+ msr cpsr_c, r3 @ disable interrupts
+ mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start
+ mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
+ msr cpsr_c, r2 @ restore interrupts
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov pc, lr
+
/*
* dma_inv_range(start, end)
*
@@ -225,10 +274,10 @@ ENTRY(feroceon_flush_kern_dcache_page)
.align 5
ENTRY(feroceon_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
+ bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
- bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -236,6 +285,22 @@ ENTRY(feroceon_dma_inv_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
+ .align 5
+ENTRY(feroceon_range_dma_inv_range)
+ mrs r2, cpsr
+ tst r0, #CACHE_DLINESIZE - 1
+ mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
+ tst r1, #CACHE_DLINESIZE - 1
+ mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
+ cmp r1, r0
+ subne r1, r1, #1 @ top address is inclusive
+ orr r3, r2, #PSR_I_BIT
+ msr cpsr_c, r3 @ disable interrupts
+ mcr p15, 5, r0, c15, c14, 0 @ D inv range start
+ mcr p15, 5, r1, c15, c14, 1 @ D inv range top
+ msr cpsr_c, r2 @ restore interrupts
+ mov pc, lr
+
/*
* dma_clean_range(start, end)
*
@@ -256,6 +321,19 @@ ENTRY(feroceon_dma_clean_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
+ .align 5
+ENTRY(feroceon_range_dma_clean_range)
+ mrs r2, cpsr
+ cmp r1, r0
+ subne r1, r1, #1 @ top address is inclusive
+ orr r3, r2, #PSR_I_BIT
+ msr cpsr_c, r3 @ disable interrupts
+ mcr p15, 5, r0, c15, c13, 0 @ D clean range start
+ mcr p15, 5, r1, c15, c13, 1 @ D clean range top
+ msr cpsr_c, r2 @ restore interrupts
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov pc, lr
+
/*
* dma_flush_range(start, end)
*
@@ -274,6 +352,19 @@ ENTRY(feroceon_dma_flush_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
+ .align 5
+ENTRY(feroceon_range_dma_flush_range)
+ mrs r2, cpsr
+ cmp r1, r0
+ subne r1, r1, #1 @ top address is inclusive
+ orr r3, r2, #PSR_I_BIT
+ msr cpsr_c, r3 @ disable interrupts
+ mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start
+ mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
+ msr cpsr_c, r2 @ restore interrupts
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov pc, lr
+
ENTRY(feroceon_cache_fns)
.long feroceon_flush_kern_cache_all
.long feroceon_flush_user_cache_all
@@ -285,12 +376,33 @@ ENTRY(feroceon_cache_fns)
.long feroceon_dma_clean_range
.long feroceon_dma_flush_range
+ENTRY(feroceon_range_cache_fns)
+ .long feroceon_flush_kern_cache_all
+ .long feroceon_flush_user_cache_all
+ .long feroceon_flush_user_cache_range
+ .long feroceon_coherent_kern_range
+ .long feroceon_coherent_user_range
+ .long feroceon_range_flush_kern_dcache_page
+ .long feroceon_range_dma_inv_range
+ .long feroceon_range_dma_clean_range
+ .long feroceon_range_dma_flush_range
+
.align 5
ENTRY(cpu_feroceon_dcache_clean_area)
+#if defined(CONFIG_CACHE_FEROCEON_L2) && !defined(CONFIG_L2_CACHE_WRITETHROUGH)
+ mov r2, r0
+ mov r3, r1
+#endif
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
+#if defined(CONFIG_CACHE_FEROCEON_L2) && !defined(CONFIG_L2_CACHE_WRITETHROUGH)
+1: mcr p15, 1, r2, c15, c9, 1 @ clean L2 entry
+ add r2, r2, #CACHE_DLINESIZE
+ subs r3, r3, #CACHE_DLINESIZE
+ bhi 1b
+#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
@@ -306,16 +418,25 @@ ENTRY(cpu_feroceon_dcache_clean_area)
.align 5
ENTRY(cpu_feroceon_switch_mm)
#ifdef CONFIG_MMU
- mov ip, #0
-@ && 'Clean & Invalidate whole DCache'
-1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate
- bne 1b
- mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
- mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ /*
+ * Note: we wish to call __flush_whole_cache but we need to preserve
+ * lr to do so. The only way without touching main memory is to
+ * use r2 which is normally used to test the VM_EXEC flag, and
+ * compensate locally for the skipped ops if it is not set.
+ */
+ mov r2, lr @ abuse r2 to preserve lr
+ bl __flush_whole_cache
+ @ if r2 contains the VM_EXEC bit then the next 2 ops are done already
+ tst r2, #VM_EXEC
+ mcreq p15, 0, ip, c7, c5, 0 @ invalidate I cache
+ mcreq p15, 0, ip, c7, c10, 4 @ drain WB
+
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
-#endif
+ mov pc, r2
+#else
mov pc, lr
+#endif
/*
* cpu_feroceon_set_pte_ext(ptep, pte, ext)
@@ -345,6 +466,9 @@ ENTRY(cpu_feroceon_set_pte_ext)
str r2, [r0] @ hardware version
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+#if defined(CONFIG_CACHE_FEROCEON_L2) && !defined(CONFIG_L2_CACHE_WRITETHROUGH)
+ mcr p15, 1, r0, c15, c9, 1 @ clean L2 entry
+#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif
mov pc, lr
@@ -369,14 +493,15 @@ __feroceon_setup:
.size __feroceon_setup, . - __feroceon_setup
/*
- * R
- * .RVI ZFRS BLDP WCAM
- * .011 0001 ..11 0101
+ * B
+ * R P
+ * .RVI UFRS BLDP WCAM
+ * .011 .001 ..11 0101
*
*/
.type feroceon_crval, #object
feroceon_crval:
- crval clear=0x00007f3f, mmuset=0x00003135, ucset=0x00001134
+ crval clear=0x0000773f, mmuset=0x00003135, ucset=0x00001134
__INITDATA
@@ -414,6 +539,21 @@ cpu_feroceon_name:
.asciz "Feroceon"
.size cpu_feroceon_name, . - cpu_feroceon_name
+ .type cpu_88fr531_name, #object
+cpu_88fr531_name:
+ .asciz "Feroceon 88FR531-vd"
+ .size cpu_88fr531_name, . - cpu_88fr531_name
+
+ .type cpu_88fr571_name, #object
+cpu_88fr571_name:
+ .asciz "Feroceon 88FR571-vd"
+ .size cpu_88fr571_name, . - cpu_88fr571_name
+
+ .type cpu_88fr131_name, #object
+cpu_88fr131_name:
+ .asciz "Feroceon 88FR131"
+ .size cpu_88fr131_name, . - cpu_88fr131_name
+
.align
.section ".proc.info.init", #alloc, #execinstr
@@ -421,15 +561,15 @@ cpu_feroceon_name:
#ifdef CONFIG_CPU_FEROCEON_OLD_ID
.type __feroceon_old_id_proc_info,#object
__feroceon_old_id_proc_info:
- .long 0x41069260
- .long 0xfffffff0
- .long PMD_TYPE_SECT | \
+ .long 0x41009260
+ .long 0xff00fff0
+ .long PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
+ .long PMD_TYPE_SECT | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
@@ -445,17 +585,17 @@ __feroceon_old_id_proc_info:
.size __feroceon_old_id_proc_info, . - __feroceon_old_id_proc_info
#endif
- .type __feroceon_proc_info,#object
-__feroceon_proc_info:
+ .type __88fr531_proc_info,#object
+__88fr531_proc_info:
.long 0x56055310
.long 0xfffffff0
- .long PMD_TYPE_SECT | \
+ .long PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
+ .long PMD_TYPE_SECT | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
@@ -463,9 +603,59 @@ __feroceon_proc_info:
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
- .long cpu_feroceon_name
+ .long cpu_88fr531_name
.long feroceon_processor_functions
.long v4wbi_tlb_fns
.long feroceon_user_fns
.long feroceon_cache_fns
- .size __feroceon_proc_info, . - __feroceon_proc_info
+ .size __88fr531_proc_info, . - __88fr531_proc_info
+
+ .type __88fr571_proc_info,#object
+__88fr571_proc_info:
+ .long 0x56155710
+ .long 0xfffffff0
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_BUFFERABLE | \
+ PMD_SECT_CACHEABLE | \
+ PMD_BIT4 | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ .long PMD_TYPE_SECT | \
+ PMD_BIT4 | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ b __feroceon_setup
+ .long cpu_arch_name
+ .long cpu_elf_name
+ .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
+ .long cpu_88fr571_name
+ .long feroceon_processor_functions
+ .long v4wbi_tlb_fns
+ .long feroceon_user_fns
+ .long feroceon_range_cache_fns
+ .size __88fr571_proc_info, . - __88fr571_proc_info
+
+ .type __88fr131_proc_info,#object
+__88fr131_proc_info:
+ .long 0x56251310
+ .long 0xfffffff0
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_BUFFERABLE | \
+ PMD_SECT_CACHEABLE | \
+ PMD_BIT4 | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ .long PMD_TYPE_SECT | \
+ PMD_BIT4 | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ b __feroceon_setup
+ .long cpu_arch_name
+ .long cpu_elf_name
+ .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
+ .long cpu_88fr131_name
+ .long feroceon_processor_functions
+ .long v4wbi_tlb_fns
+ .long feroceon_user_fns
+ .long feroceon_range_cache_fns
+ .size __88fr131_proc_info, . - __88fr131_proc_info