aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2005-10-31 10:51:57 -0800
committerTony Luck <tony.luck@intel.com>2005-10-31 10:51:57 -0800
commitc7fb577e2a6cb04732541f2dc402bd46747f7558 (patch)
treedf3b1a1922ed13bfbcc45d08650c38beeb1a7bd1 /arch/mips/mm
parent9cec58dc138d6fcad9f447a19c8ff69f6540e667 (diff)
parent581c1b14394aee60aff46ea67d05483261ed6527 (diff)
manual update from upstream:
Applied Al's change 06a544971fad0992fe8b92c5647538d573089dd4 to new location of swiotlb.c Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/Makefile2
-rw-r--r--arch/mips/mm/c-r3k.c6
-rw-r--r--arch/mips/mm/c-r4k.c145
-rw-r--r--arch/mips/mm/c-sb1.c10
-rw-r--r--arch/mips/mm/c-tx39.c16
-rw-r--r--arch/mips/mm/cache.c106
-rw-r--r--arch/mips/mm/cerr-sb1.c54
-rw-r--r--arch/mips/mm/cex-sb1.S5
-rw-r--r--arch/mips/mm/dma-coherent.c6
-rw-r--r--arch/mips/mm/dma-ip27.c4
-rw-r--r--arch/mips/mm/dma-ip32.c4
-rw-r--r--arch/mips/mm/dma-noncoherent.c50
-rw-r--r--arch/mips/mm/fault.c17
-rw-r--r--arch/mips/mm/highmem.c19
-rw-r--r--arch/mips/mm/init.c34
-rw-r--r--arch/mips/mm/ioremap.c32
-rw-r--r--arch/mips/mm/pg-r4k.c21
-rw-r--r--arch/mips/mm/pg-sb1.c65
-rw-r--r--arch/mips/mm/pgtable-32.c36
-rw-r--r--arch/mips/mm/sc-rm7k.c39
-rw-r--r--arch/mips/mm/tlb-andes.c4
-rw-r--r--arch/mips/mm/tlb-r4k.c70
-rw-r--r--arch/mips/mm/tlb-sb1.c376
-rw-r--r--arch/mips/mm/tlbex.c245
24 files changed, 513 insertions, 853 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index b56a0abdc3d..b0178da019f 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_CPU_R8000) += c-r4k.o cex-gen.o pg-r4k.o tlb-r8k.o
obj-$(CONFIG_CPU_RM7000) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
obj-$(CONFIG_CPU_RM9000) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
obj-$(CONFIG_CPU_SB1) += c-sb1.o cerr-sb1.o cex-sb1.o pg-sb1.o \
- tlb-sb1.o
+ tlb-r4k.o
obj-$(CONFIG_CPU_TX39XX) += c-tx39.o pg-r4k.o tlb-r3k.o
obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index c659f99eb39..27f4fa25e8c 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -221,12 +221,14 @@ static inline unsigned long get_phys_page (unsigned long addr,
struct mm_struct *mm)
{
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long physpage;
pgd = pgd_offset(mm, addr);
- pmd = pmd_offset(pgd, addr);
+ pud = pud_offset(pgd, addr);
+ pmd = pmd_offset(pud, addr);
pte = pte_offset(pmd, addr);
if ((physpage = pte_val(*pte)) & _PAGE_VALID)
@@ -317,7 +319,7 @@ static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
r3k_flush_dcache_range(start, start + size);
}
-void __init ld_mmu_r23000(void)
+void __init r3k_cache_init(void)
{
extern void build_clear_page(void);
extern void build_copy_page(void);
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 5ea84bc98c6..38223b44d96 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -16,6 +16,7 @@
#include <asm/bcache.h>
#include <asm/bootinfo.h>
+#include <asm/cache.h>
#include <asm/cacheops.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
@@ -26,8 +27,14 @@
#include <asm/system.h>
#include <asm/mmu_context.h>
#include <asm/war.h>
+#include <asm/cacheflush.h> /* for run_uncached() */
-static unsigned long icache_size, dcache_size, scache_size;
+/*
+ * Must die.
+ */
+static unsigned long icache_size __read_mostly;
+static unsigned long dcache_size __read_mostly;
+static unsigned long scache_size __read_mostly;
/*
* Dummy cache handling routines for machines without boardcaches
@@ -43,8 +50,8 @@ static struct bcache_ops no_sc_ops = {
struct bcache_ops *bcops = &no_sc_ops;
-#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x2010)
-#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x2020)
+#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
+#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
#define R4600_HIT_CACHEOP_WAR_IMPL \
do { \
@@ -190,12 +197,12 @@ static inline void r4k_blast_icache_page_indexed_setup(void)
if (ic_lsize == 16)
r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
else if (ic_lsize == 32) {
- if (TX49XX_ICACHE_INDEX_INV_WAR)
- r4k_blast_icache_page_indexed =
- tx49_blast_icache32_page_indexed;
- else if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
+ if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
r4k_blast_icache_page_indexed =
blast_icache32_r4600_v1_page_indexed;
+ else if (TX49XX_ICACHE_INDEX_INV_WAR)
+ r4k_blast_icache_page_indexed =
+ tx49_blast_icache32_page_indexed;
else
r4k_blast_icache_page_indexed =
blast_icache32_page_indexed;
@@ -361,24 +368,33 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
struct flush_cache_page_args {
struct vm_area_struct *vma;
- unsigned long page;
+ unsigned long addr;
};
static inline void local_r4k_flush_cache_page(void *args)
{
struct flush_cache_page_args *fcp_args = args;
struct vm_area_struct *vma = fcp_args->vma;
- unsigned long page = fcp_args->page;
+ unsigned long addr = fcp_args->addr;
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgdp;
+ pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
- page &= PAGE_MASK;
- pgdp = pgd_offset(mm, page);
- pmdp = pmd_offset(pgdp, page);
- ptep = pte_offset(pmdp, page);
+ /*
+ * If ownes no valid ASID yet, cannot possibly have gotten
+ * this page into the cache.
+ */
+ if (cpu_context(smp_processor_id(), mm) == 0)
+ return;
+
+ addr &= PAGE_MASK;
+ pgdp = pgd_offset(mm, addr);
+ pudp = pud_offset(pgdp, addr);
+ pmdp = pmd_offset(pudp, addr);
+ ptep = pte_offset(pmdp, addr);
/*
* If the page isn't marked valid, the page cannot possibly be
@@ -395,12 +411,12 @@ static inline void local_r4k_flush_cache_page(void *args)
*/
if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
- r4k_blast_dcache_page(page);
+ r4k_blast_dcache_page(addr);
if (exec && !cpu_icache_snoops_remote_store)
- r4k_blast_scache_page(page);
+ r4k_blast_scache_page(addr);
}
if (exec)
- r4k_blast_icache_page(page);
+ r4k_blast_icache_page(addr);
return;
}
@@ -409,36 +425,30 @@ static inline void local_r4k_flush_cache_page(void *args)
* Do indexed flush, too much work to get the (possible) TLB refills
* to work correctly.
*/
- page = INDEX_BASE + (page & (dcache_size - 1));
+ addr = INDEX_BASE + (addr & (dcache_size - 1));
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
- r4k_blast_dcache_page_indexed(page);
+ r4k_blast_dcache_page_indexed(addr);
if (exec && !cpu_icache_snoops_remote_store)
- r4k_blast_scache_page_indexed(page);
+ r4k_blast_scache_page_indexed(addr);
}
if (exec) {
if (cpu_has_vtag_icache) {
int cpu = smp_processor_id();
- if (cpu_context(cpu, vma->vm_mm) != 0)
- drop_mmu_context(vma->vm_mm, cpu);
+ if (cpu_context(cpu, mm) != 0)
+ drop_mmu_context(mm, cpu);
} else
- r4k_blast_icache_page_indexed(page);
+ r4k_blast_icache_page_indexed(addr);
}
}
-static void r4k_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
+static void r4k_flush_cache_page(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn)
{
struct flush_cache_page_args args;
- /*
- * If ownes no valid ASID yet, cannot possibly have gotten
- * this page into the cache.
- */
- if (cpu_context(smp_processor_id(), vma->vm_mm) == 0)
- return;
-
args.vma = vma;
- args.page = page;
+ args.addr = addr;
on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
}
@@ -454,16 +464,16 @@ static void r4k_flush_data_cache_page(unsigned long addr)
}
struct flush_icache_range_args {
- unsigned long start;
- unsigned long end;
+ unsigned long __user start;
+ unsigned long __user end;
};
static inline void local_r4k_flush_icache_range(void *args)
{
struct flush_icache_range_args *fir_args = args;
- unsigned long dc_lsize = current_cpu_data.dcache.linesz;
- unsigned long ic_lsize = current_cpu_data.icache.linesz;
- unsigned long sc_lsize = current_cpu_data.scache.linesz;
+ unsigned long dc_lsize = cpu_dcache_line_size();
+ unsigned long ic_lsize = cpu_icache_line_size();
+ unsigned long sc_lsize = cpu_scache_line_size();
unsigned long start = fir_args->start;
unsigned long end = fir_args->end;
unsigned long addr, aend;
@@ -472,6 +482,7 @@ static inline void local_r4k_flush_icache_range(void *args)
if (end - start > dcache_size) {
r4k_blast_dcache();
} else {
+ R4600_HIT_CACHEOP_WAR_IMPL;
addr = start & ~(dc_lsize - 1);
aend = (end - 1) & ~(dc_lsize - 1);
@@ -492,7 +503,7 @@ static inline void local_r4k_flush_icache_range(void *args)
aend = (end - 1) & ~(sc_lsize - 1);
while (1) {
- /* Hit_Writeback_Inv_D */
+ /* Hit_Writeback_Inv_SD */
protected_writeback_scache_line(addr);
if (addr == aend)
break;
@@ -517,7 +528,8 @@ static inline void local_r4k_flush_icache_range(void *args)
}
}
-static void r4k_flush_icache_range(unsigned long start, unsigned long end)
+static void r4k_flush_icache_range(unsigned long __user start,
+ unsigned long __user end)
{
struct flush_icache_range_args args;
@@ -525,6 +537,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
args.end = end;
on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
+ instruction_hazard();
}
/*
@@ -613,7 +626,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
BUG_ON(size == 0);
if (cpu_has_subset_pcaches) {
- unsigned long sc_lsize = current_cpu_data.scache.linesz;
+ unsigned long sc_lsize = cpu_scache_line_size();
if (size >= scache_size) {
r4k_blast_scache();
@@ -639,7 +652,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
if (size >= dcache_size) {
r4k_blast_dcache();
} else {
- unsigned long dc_lsize = current_cpu_data.dcache.linesz;
+ unsigned long dc_lsize = cpu_dcache_line_size();
R4600_HIT_CACHEOP_WAR_IMPL;
a = addr & ~(dc_lsize - 1);
@@ -663,7 +676,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
BUG_ON(size == 0);
if (cpu_has_subset_pcaches) {
- unsigned long sc_lsize = current_cpu_data.scache.linesz;
+ unsigned long sc_lsize = cpu_scache_line_size();
if (size >= scache_size) {
r4k_blast_scache();
@@ -684,7 +697,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
if (size >= dcache_size) {
r4k_blast_dcache();
} else {
- unsigned long dc_lsize = current_cpu_data.dcache.linesz;
+ unsigned long dc_lsize = cpu_dcache_line_size();
R4600_HIT_CACHEOP_WAR_IMPL;
a = addr & ~(dc_lsize - 1);
@@ -708,9 +721,9 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
*/
static void local_r4k_flush_cache_sigtramp(void * arg)
{
- unsigned long ic_lsize = current_cpu_data.icache.linesz;
- unsigned long dc_lsize = current_cpu_data.dcache.linesz;
- unsigned long sc_lsize = current_cpu_data.scache.linesz;
+ unsigned long ic_lsize = cpu_icache_line_size();
+ unsigned long dc_lsize = cpu_dcache_line_size();
+ unsigned long sc_lsize = cpu_scache_line_size();
unsigned long addr = (unsigned long) arg;
R4600_HIT_CACHEOP_WAR_IMPL;
@@ -762,6 +775,7 @@ static inline void rm7k_erratum31(void)
for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
__asm__ __volatile__ (
+ ".set push\n\t"
".set noreorder\n\t"
".set mips3\n\t"
"cache\t%1, 0(%0)\n\t"
@@ -776,8 +790,7 @@ static inline void rm7k_erratum31(void)
"cache\t%1, 0x1000(%0)\n\t"
"cache\t%1, 0x2000(%0)\n\t"
"cache\t%1, 0x3000(%0)\n\t"
- ".set\tmips0\n\t"
- ".set\treorder\n\t"
+ ".set pop\n"
:
: "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
}
@@ -1011,9 +1024,19 @@ static void __init probe_pcache(void)
* normally they'd suffer from aliases but magic in the hardware deals
* with that for us so we don't need to take care ourselves.
*/
- if (c->cputype != CPU_R10000 && c->cputype != CPU_R12000)
- if (c->dcache.waysize > PAGE_SIZE)
- c->dcache.flags |= MIPS_CACHE_ALIASES;
+ switch (c->cputype) {
+ case CPU_20KC:
+ case CPU_25KF:
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_SB1:
+ break;
+ case CPU_24K:
+ if (!(read_c0_config7() & (1 << 16)))
+ default:
+ if (c->dcache.waysize > PAGE_SIZE)
+ c->dcache.flags |= MIPS_CACHE_ALIASES;
+ }
switch (c->cputype) {
case CPU_20KC:
@@ -1024,7 +1047,11 @@ static void __init probe_pcache(void)
c->icache.flags |= MIPS_CACHE_VTAG;
break;
+ case CPU_AU1000:
case CPU_AU1500:
+ case CPU_AU1100:
+ case CPU_AU1550:
+ case CPU_AU1200:
c->icache.flags |= MIPS_CACHE_IC_F_DC;
break;
}
@@ -1102,7 +1129,6 @@ static int __init probe_scache(void)
return 1;
}
-typedef int (*probe_func_t)(unsigned long);
extern int r5k_sc_init(void);
extern int rm7k_sc_init(void);
@@ -1110,7 +1136,6 @@ static void __init setup_scache(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config = read_c0_config();
- probe_func_t probe_scache_kseg1;
int sc_present = 0;
/*
@@ -1123,8 +1148,7 @@ static void __init setup_scache(void)
case CPU_R4000MC:
case CPU_R4400SC:
case CPU_R4400MC:
- probe_scache_kseg1 = (probe_func_t) (CKSEG1ADDR(&probe_scache));
- sc_present = probe_scache_kseg1(config);
+ sc_present = run_uncached(probe_scache);
if (sc_present)
c->options |= MIPS_CPU_CACHE_CDEX_S;
break;
@@ -1198,7 +1222,7 @@ static inline void coherency_setup(void)
}
}
-void __init ld_mmu_r4xx0(void)
+void __init r4k_cache_init(void)
{
extern void build_clear_page(void);
extern void build_copy_page(void);
@@ -1206,15 +1230,11 @@ void __init ld_mmu_r4xx0(void)
struct cpuinfo_mips *c = &current_cpu_data;
/* Default cache error handler for R4000 and R5000 family */
- memcpy((void *)(CAC_BASE + 0x100), &except_vec2_generic, 0x80);
- memcpy((void *)(UNCAC_BASE + 0x100), &except_vec2_generic, 0x80);
+ set_uncached_handler (0x100, &except_vec2_generic, 0x80);
probe_pcache();
setup_scache();
- if (c->dcache.sets * c->dcache.ways > PAGE_SIZE)
- c->dcache.flags |= MIPS_CACHE_ALIASES;
-
r4k_blast_dcache_page_setup();
r4k_blast_dcache_page_indexed_setup();
r4k_blast_dcache_setup();
@@ -1252,9 +1272,8 @@ void __init ld_mmu_r4xx0(void)
_dma_cache_inv = r4k_dma_cache_inv;
#endif
- __flush_cache_all();
- coherency_setup();
-
build_clear_page();
build_copy_page();
+ local_r4k___flush_cache_all(NULL);
+ coherency_setup();
}
diff --git a/arch/mips/mm/c-sb1.c b/arch/mips/mm/c-sb1.c
index 502f68c664b..2f08b535f20 100644
--- a/arch/mips/mm/c-sb1.c
+++ b/arch/mips/mm/c-sb1.c
@@ -235,7 +235,7 @@ static inline void __sb1_flush_icache_range(unsigned long start,
/*
* Invalidate all caches on this CPU
*/
-static void local_sb1___flush_cache_all(void)
+static void __attribute_used__ local_sb1___flush_cache_all(void)
{
__sb1_writeback_inv_dcache_all();
__sb1_flush_icache_all();
@@ -492,19 +492,17 @@ static __init void probe_cache_sizes(void)
}
/*
- * This is called from loadmmu.c. We have to set up all the
+ * This is called from cache.c. We have to set up all the
* memory management function pointers, as well as initialize
* the caches and tlbs
*/
-void ld_mmu_sb1(void)
+void sb1_cache_init(void)
{
extern char except_vec2_sb1;
extern char handle_vec2_sb1;
/* Special cache error handler for SB1 */
- memcpy((void *)(CAC_BASE + 0x100), &except_vec2_sb1, 0x80);
- memcpy((void *)(UNCAC_BASE + 0x100), &except_vec2_sb1, 0x80);
- memcpy((void *)CKSEG1ADDR(&handle_vec2_sb1), &handle_vec2_sb1, 0x80);
+ set_uncached_handler (0x100, &except_vec2_sb1, 0x80);
probe_cache_sizes();
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index ff5afab64b2..0a97a9434eb 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -167,15 +167,16 @@ static void tx39_flush_cache_mm(struct mm_struct *mm)
static void tx39_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- struct mm_struct *mm = vma->vm_mm;
+ int exec;
- if (!cpu_has_dc_aliases)
+ if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
return;
- if (cpu_context(smp_processor_id(), mm) != 0) {
+ exec = vma->vm_flags & VM_EXEC;
+ if (cpu_has_dc_aliases || exec)
tx39_blast_dcache();
+ if (exec)
tx39_blast_icache();
- }
}
static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
@@ -183,6 +184,7 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgdp;
+ pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -195,7 +197,8 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page
page &= PAGE_MASK;
pgdp = pgd_offset(mm, page);
- pmdp = pmd_offset(pgdp, page);
+ pudp = pud_offset(pgdp, page);
+ pmdp = pmd_offset(pudp, page);
ptep = pte_offset(pmdp, page);
/*
@@ -407,7 +410,7 @@ static __init void tx39_probe_cache(void)
}
}
-void __init ld_mmu_tx39(void)
+void __init tx39_cache_init(void)
{
extern void build_clear_page(void);
extern void build_copy_page(void);
@@ -490,4 +493,5 @@ void __init ld_mmu_tx39(void)
build_clear_page();
build_copy_page();
+ tx39h_flush_icache_all();
}
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 1d95cdb77be..314701a66b1 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -23,8 +23,10 @@ void (*__flush_cache_all)(void);
void (*flush_cache_mm)(struct mm_struct *mm);
void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
-void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
-void (*flush_icache_range)(unsigned long start, unsigned long end);
+void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
+ unsigned long pfn);
+void (*flush_icache_range)(unsigned long __user start,
+ unsigned long __user end);
void (*flush_icache_page)(struct vm_area_struct *vma, struct page *page);
/* MIPS specific cache operations */
@@ -32,6 +34,8 @@ void (*flush_cache_sigtramp)(unsigned long addr);
void (*flush_data_cache_page)(unsigned long addr);
void (*flush_icache_all)(void);
+EXPORT_SYMBOL(flush_data_cache_page);
+
#ifdef CONFIG_DMA_NONCOHERENT
/* DMA cache operations. */
@@ -49,10 +53,12 @@ EXPORT_SYMBOL(_dma_cache_inv);
* We could optimize the case where the cache argument is not BCACHE but
* that seems very atypical use ...
*/
-asmlinkage int sys_cacheflush(unsigned long addr, unsigned long int bytes,
- unsigned int cache)
+asmlinkage int sys_cacheflush(unsigned long __user addr,
+ unsigned long bytes, unsigned int cache)
{
- if (!access_ok(VERIFY_WRITE, (void *) addr, bytes))
+ if (bytes == 0)
+ return 0;
+ if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
return -EFAULT;
flush_icache_range(addr, addr + bytes);
@@ -100,58 +106,48 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address,
}
}
-extern void ld_mmu_r23000(void);
-extern void ld_mmu_r4xx0(void);
-extern void ld_mmu_tx39(void);
-extern void ld_mmu_r6000(void);
-extern void ld_mmu_tfp(void);
-extern void ld_mmu_andes(void);
-extern void ld_mmu_sb1(void);
+#define __weak __attribute__((weak))
+
+static char cache_panic[] __initdata = "Yeee, unsupported cache architecture.";
void __init cpu_cache_init(void)
{
- if (cpu_has_4ktlb) {
-#if defined(CONFIG_CPU_R4X00) || defined(CONFIG_CPU_VR41XX) || \
- defined(CONFIG_CPU_R4300) || defined(CONFIG_CPU_R5000) || \
- defined(CONFIG_CPU_NEVADA) || defined(CONFIG_CPU_R5432) || \
- defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_MIPS32) || \
- defined(CONFIG_CPU_MIPS64) || defined(CONFIG_CPU_TX49XX) || \
- defined(CONFIG_CPU_RM7000) || defined(CONFIG_CPU_RM9000)
- ld_mmu_r4xx0();
-#endif
- } else switch (current_cpu_data.cputype) {
-#ifdef CONFIG_CPU_R3000
- case CPU_R2000:
- case CPU_R3000:
- case CPU_R3000A:
- case CPU_R3081E:
- ld_mmu_r23000();
- break;
-#endif
-#ifdef CONFIG_CPU_TX39XX
- case CPU_TX3912:
- case CPU_TX3922:
- case CPU_TX3927:
- ld_mmu_tx39();
- break;
-#endif
-#ifdef CONFIG_CPU_R10000
- case CPU_R10000:
- case CPU_R12000:
- ld_mmu_r4xx0();
- break;
-#endif
-#ifdef CONFIG_CPU_SB1
- case CPU_SB1:
- ld_mmu_sb1();
- break;
-#endif
-
- case CPU_R8000:
- panic("R8000 is unsupported");
- break;
-
- default:
- panic("Yeee, unsupported cache architecture.");
+ if (cpu_has_3k_cache) {
+ extern void __weak r3k_cache_init(void);
+
+ r3k_cache_init();
+ return;
+ }
+ if (cpu_has_6k_cache) {
+ extern void __weak r6k_cache_init(void);
+
+ r6k_cache_init();
+ return;
+ }
+ if (cpu_has_4k_cache) {
+ extern void __weak r4k_cache_init(void);
+
+ r4k_cache_init();
+ return;
}
+ if (cpu_has_8k_cache) {
+ extern void __weak r8k_cache_init(void);
+
+ r8k_cache_init();
+ return;
+ }
+ if (cpu_has_tx39_cache) {
+ extern void __weak tx39_cache_init(void);
+
+ tx39_cache_init();
+ return;
+ }
+ if (cpu_has_sb1_cache) {
+ extern void __weak sb1_cache_init(void);
+
+ sb1_cache_init();
+ return;
+ }
+
+ panic(cache_panic);
}
diff --git a/arch/mips/mm/cerr-sb1.c b/arch/mips/mm/cerr-sb1.c
index 7166ffe6350..1cf3c6006cc 100644
--- a/arch/mips/mm/cerr-sb1.c
+++ b/arch/mips/mm/cerr-sb1.c
@@ -19,13 +19,19 @@
#include <linux/sched.h>
#include <asm/mipsregs.h>
#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_regs.h>
-#ifndef CONFIG_SIBYTE_BUS_WATCHER
+#if !defined(CONFIG_SIBYTE_BUS_WATCHER) || defined(CONFIG_SIBYTE_BW_TRACE)
#include <asm/io.h>
-#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_scd.h>
#endif
+/*
+ * We'd like to dump the L2_ECC_TAG register on errors, but errata make
+ * that unsafe... So for now we don't. (BCM1250/BCM112x erratum SOC-48.)
+ */
+#undef DUMP_L2_ECC_TAG_ON_ERROR
+
/* SB1 definitions */
/* XXX should come from config1 XXX */
@@ -139,12 +145,18 @@ static inline void breakout_cerrd(unsigned int val)
static void check_bus_watcher(void)
{
uint32_t status, l2_err, memio_err;
+#ifdef DUMP_L2_ECC_TAG_ON_ERROR
+ uint64_t l2_tag;
+#endif
/* Destructive read, clears register and interrupt */
status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS));
/* Bit 31 is always on, but there's no #define for that */
if (status & ~(1UL << 31)) {
l2_err = csr_in32(IOADDR(A_BUS_L2_ERRORS));
+#ifdef DUMP_L2_ECC_TAG_ON_ERROR
+ l2_tag = in64(IO_SPACE_BASE | A_L2_ECC_TAG);
+#endif
memio_err = csr_in32(IOADDR(A_BUS_MEM_IO_ERRORS));
prom_printf("Bus watcher error counters: %08x %08x\n", l2_err, memio_err);
prom_printf("\nLast recorded signature:\n");
@@ -153,6 +165,9 @@ static void check_bus_watcher(void)
(int)(G_SCD_BERR_TID(status) >> 6),
(int)G_SCD_BERR_RID(status),
(int)G_SCD_BERR_DCODE(status));
+#ifdef DUMP_L2_ECC_TAG_ON_ERROR
+ prom_printf("Last L2 tag w/ bad ECC: %016llx\n", l2_tag);
+#endif
} else {
prom_printf("Bus watcher indicates no error\n");
}
@@ -166,6 +181,16 @@ asmlinkage void sb1_cache_error(void)
uint64_t cerr_dpa;
uint32_t errctl, cerr_i, cerr_d, dpalo, dpahi, eepc, res;
+#ifdef CONFIG_SIBYTE_BW_TRACE
+ /* Freeze the trace buffer now */
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+ csr_out32(M_BCM1480_SCD_TRACE_CFG_FREEZE, IO_SPACE_BASE | A_SCD_TRACE_CFG);
+#else
+ csr_out32(M_SCD_TRACE_CFG_FREEZE, IO_SPACE_BASE | A_SCD_TRACE_CFG);
+#endif
+ prom_printf("Trace buffer frozen\n");
+#endif
+
prom_printf("Cache error exception on CPU %x:\n",
(read_c0_prid() >> 25) & 0x7);
@@ -229,11 +254,19 @@ asmlinkage void sb1_cache_error(void)
check_bus_watcher();
- while (1);
/*
- * This tends to make things get really ugly; let's just stall instead.
- * panic("Can't handle the cache error!");
+ * Calling panic() when a fatal cache error occurs scrambles the
+ * state of the system (and the cache), making it difficult to
+ * investigate after the fact. However, if you just stall the CPU,
+ * the other CPU may keep on running, which is typically very
+ * undesirable.
*/
+#ifdef CONFIG_SB1_CERR_STALL
+ while (1)
+ ;
+#else
+ panic("unhandled cache error");
+#endif
}
@@ -434,7 +467,8 @@ static struct dc_state dc_states[] = {
};
#define DC_TAG_VALID(state) \
- (((state) == 0xf) || ((state) == 0x13) || ((state) == 0x19) || ((state == 0x16)) || ((state) == 0x1c))
+ (((state) == 0x0) || ((state) == 0xf) || ((state) == 0x13) || \
+ ((state) == 0x19) || ((state) == 0x16) || ((state) == 0x1c))
static char *dc_state_str(unsigned char state)
{
@@ -505,6 +539,7 @@ static uint32_t extract_dc(unsigned short addr, int data)
uint64_t datalo;
uint32_t datalohi, datalolo, datahi;
int offset;
+ char bad_ecc = 0;
for (offset = 0; offset < 4; offset++) {
/* Index-load-data-D */
@@ -525,8 +560,7 @@ static uint32_t extract_dc(unsigned short addr, int data)
ecc = dc_ecc(datalo);
if (ecc != datahi) {
int bits = 0;
- prom_printf(" ** bad ECC (%02x %02x) ->",
- datahi, ecc);
+ bad_ecc |= 1 << (3-offset);
ecc ^= datahi;
while (ecc) {
if (ecc & 1) bits++;
@@ -537,6 +571,10 @@ static uint32_t extract_dc(unsigned short addr, int data)
prom_printf(" %02X-%016llX", datahi, datalo);
}
prom_printf("\n");
+ if (bad_ecc)
+ prom_printf(" dwords w/ bad ECC: %d %d %d %d\n",
+ !!(bad_ecc & 8), !!(bad_ecc & 4),
+ !!(bad_ecc & 2), !!(bad_ecc & 1));
}
}
return res;
diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S
index 2c3a23aa88c..0e71580774f 100644
--- a/arch/mips/mm/cex-sb1.S
+++ b/arch/mips/mm/cex-sb1.S
@@ -64,6 +64,10 @@ LEAF(except_vec2_sb1)
sd k0,0x170($0)
sd k1,0x178($0)
+#if CONFIG_SB1_CEX_ALWAYS_FATAL
+ j handle_vec2_sb1
+ nop
+#else
/*
* M_ERRCTL_RECOVERABLE is bit 31, which makes it easy to tell
* if we can fast-path out of here for a h/w-recovered error.
@@ -134,6 +138,7 @@ unrecoverable:
/* Unrecoverable Icache or Dcache error; log it and/or fail */
j handle_vec2_sb1
nop
+#endif
END(except_vec2_sb1)
diff --git a/arch/mips/mm/dma-coherent.c b/arch/mips/mm/dma-coherent.c
index 97a50d38c98..f6b3c722230 100644
--- a/arch/mips/mm/dma-coherent.c
+++ b/arch/mips/mm/dma-coherent.c
@@ -9,16 +9,16 @@
*/
#include <linux/config.h>
#include <linux/types.h>
+#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/string.h>
-#include <linux/pci.h>
#include <asm/cache.h>
#include <asm/io.h>
void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, int gfp)
+ dma_addr_t * dma_handle, gfp_t gfp)
{
void *ret;
/* ignore region specifiers */
@@ -39,7 +39,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
EXPORT_SYMBOL(dma_alloc_noncoherent);
void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, int gfp)
+ dma_addr_t * dma_handle, gfp_t gfp)
__attribute__((alias("dma_alloc_noncoherent")));
EXPORT_SYMBOL(dma_alloc_coherent);
diff --git a/arch/mips/mm/dma-ip27.c b/arch/mips/mm/dma-ip27.c
index aa7c94b5d78..8da19fd22ac 100644
--- a/arch/mips/mm/dma-ip27.c
+++ b/arch/mips/mm/dma-ip27.c
@@ -22,7 +22,7 @@
pdev_to_baddr(to_pci_dev(dev), (addr))
void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, int gfp)
+ dma_addr_t * dma_handle, gfp_t gfp)
{
void *ret;
@@ -44,7 +44,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
EXPORT_SYMBOL(dma_alloc_noncoherent);
void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, int gfp)
+ dma_addr_t * dma_handle, gfp_t gfp)
__attribute__((alias("dma_alloc_noncoherent")));
EXPORT_SYMBOL(dma_alloc_coherent);
diff --git a/arch/mips/mm/dma-ip32.c b/arch/mips/mm/dma-ip32.c
index 2cbe196c35f..a7e3072ff78 100644
--- a/arch/mips/mm/dma-ip32.c
+++ b/arch/mips/mm/dma-ip32.c
@@ -37,7 +37,7 @@
#define RAM_OFFSET_MASK 0x3fffffff
void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, int gfp)
+ dma_addr_t * dma_handle, gfp_t gfp)
{
void *ret;
/* ignore region specifiers */
@@ -61,7 +61,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
EXPORT_SYMBOL(dma_alloc_noncoherent);
void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, int gfp)
+ dma_addr_t * dma_handle, gfp_t gfp)
{
void *ret;
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index 59e54f12212..cd4ea8474f8 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -24,7 +24,7 @@
*/
void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, int gfp)
+ dma_addr_t * dma_handle, gfp_t gfp)
{
void *ret;
/* ignore region specifiers */
@@ -45,7 +45,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
EXPORT_SYMBOL(dma_alloc_noncoherent);
void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, int gfp)
+ dma_addr_t * dma_handle, gfp_t gfp)
{
void *ret;
@@ -105,22 +105,7 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
{
unsigned long addr = (unsigned long) ptr;
- switch (direction) {
- case DMA_TO_DEVICE:
- dma_cache_wback(addr, size);
- break;
-
- case DMA_FROM_DEVICE:
- dma_cache_inv(addr, size);
- break;
-
- case DMA_BIDIRECTIONAL:
- dma_cache_wback_inv(addr, size);
- break;
-
- default:
- BUG();
- }
+ __dma_sync(addr, size, direction);
return virt_to_phys(ptr);
}
@@ -133,22 +118,7 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
unsigned long addr;
addr = dma_addr + PAGE_OFFSET;
- switch (direction) {
- case DMA_TO_DEVICE:
- //dma_cache_wback(addr, size);
- break;
-
- case DMA_FROM_DEVICE:
- //dma_cache_inv(addr, size);
- break;
-
- case DMA_BIDIRECTIONAL:
- //dma_cache_wback_inv(addr, size);
- break;
-
- default:
- BUG();
- }
+ //__dma_sync(addr, size, direction);
}
EXPORT_SYMBOL(dma_unmap_single);
@@ -164,10 +134,11 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
unsigned long addr;
addr = (unsigned long) page_address(sg->page);
- if (addr)
+ if (addr) {
__dma_sync(addr + sg->offset, sg->length, direction);
- sg->dma_address = (dma_addr_t)
- (page_to_phys(sg->page) + sg->offset);
+ sg->dma_address = (dma_addr_t)page_to_phys(sg->page)
+ + sg->offset;
+ }
}
return nents;
@@ -218,9 +189,8 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
for (i = 0; i < nhwentries; i++, sg++) {
addr = (unsigned long) page_address(sg->page);
- if (!addr)
- continue;
- dma_cache_wback_inv(addr + sg->offset, sg->length);
+ if (addr)
+ __dma_sync(addr + sg->offset, sg->length, direction);
}
}
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index ec8077c74e9..2d9624fd10e 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -25,6 +25,7 @@
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
+#include <asm/highmem.h> /* For VMALLOC_END */
/*
* This routine handles page faults. It determines the address,
@@ -57,7 +58,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
* only copy the information from the master page table,
* nothing more.
*/
- if (unlikely(address >= VMALLOC_START))
+ if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
goto vmalloc_fault;
/*
@@ -140,7 +141,7 @@ bad_area_nosemaphore:
info.si_signo = SIGSEGV;
info.si_errno = 0;
/* info.si_code has been set above */
- info.si_addr = (void *) address;
+ info.si_addr = (void __user *) address;
force_sig_info(SIGSEGV, &info, tsk);
return;
}
@@ -196,7 +197,7 @@ do_sigbus:
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
- info.si_addr = (void *) address;
+ info.si_addr = (void __user *) address;
force_sig_info(SIGBUS, &info, tsk);
return;
@@ -212,6 +213,7 @@ vmalloc_fault:
*/
int offset = __pgd_offset(address);
pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
@@ -222,8 +224,13 @@ vmalloc_fault:
goto no_context;
set_pgd(pgd, *pgd_k);
- pmd = pmd_offset(pgd, address);
- pmd_k = pmd_offset(pgd_k, address);
+ pud = pud_offset(pgd, address);
+ pud_k = pud_offset(pgd_k, address);
+ if (!pud_present(*pud_k))
+ goto no_context;
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
goto no_context;
set_pmd(pmd, *pmd_k);
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index dd5e2e31885..1f7b37b38f5 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -83,6 +83,25 @@ void __kunmap_atomic(void *kvaddr, enum km_type type)
preempt_check_resched();
}
+/*
+ * This is the same as kmap_atomic() but can map memory that doesn't
+ * have a struct page associated with it.
+ */
+void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+{
+ enum fixed_addresses idx;
+ unsigned long vaddr;
+
+ inc_preempt_count();
+
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
+ flush_tlb_one(vaddr);
+
+ return (void*) vaddr;
+}
+
struct page *__kmap_atomic_to_page(void *ptr)
{
unsigned long idx, vaddr = (unsigned long)ptr;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index dc6830b10fa..f75ab748e8c 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -83,7 +83,7 @@ pte_t *kmap_pte;
pgprot_t kmap_prot;
#define kmap_get_fixmap_pte(vaddr) \
- pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
+ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
static void __init kmap_init(void)
{
@@ -96,36 +96,42 @@ static void __init kmap_init(void)
kmap_prot = PAGE_KERNEL;
}
-#ifdef CONFIG_64BIT
-static void __init fixrange_init(unsigned long start, unsigned long end,
+#ifdef CONFIG_32BIT
+void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
{
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
- int i, j;
+ int i, j, k;
unsigned long vaddr;
vaddr = start;
i = __pgd_offset(vaddr);
- j = __pmd_offset(vaddr);
+ j = __pud_offset(vaddr);
+ k = __pmd_offset(vaddr);
pgd = pgd_base + i;
for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
- pmd = (pmd_t *)pgd;
- for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
- if (pmd_none(*pmd)) {
- pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- set_pmd(pmd, __pmd(pte));
- if (pte != pte_offset_kernel(pmd, 0))
- BUG();
+ pud = (pud_t *)pgd;
+ for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
+ pmd = (pmd_t *)pud;
+ for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
+ if (pmd_none(*pmd)) {
+ pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ set_pmd(pmd, __pmd(pte));
+ if (pte != pte_offset_kernel(pmd, 0))
+ BUG();
+ }
+ vaddr += PMD_SIZE;
}
- vaddr += PMD_SIZE;
+ k = 0;
}
j = 0;
}
}
-#endif /* CONFIG_64BIT */
+#endif /* CONFIG_32BIT */
#endif /* CONFIG_HIGHMEM */
#ifndef CONFIG_NEED_MULTIPLE_NODES
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index adf352273f6..3101d1db559 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -55,7 +55,7 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
if (address >= end)
BUG();
do {
- pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
+ pte_t * pte = pte_alloc_kernel(pmd, address);
if (!pte)
return -ENOMEM;
remap_area_pte(pte, address, end - address, address + phys_addr, flags);
@@ -77,11 +77,15 @@ static int remap_area_pages(unsigned long address, phys_t phys_addr,
flush_cache_all();
if (address >= end)
BUG();
- spin_lock(&init_mm.page_table_lock);
do {
+ pud_t *pud;
pmd_t *pmd;
- pmd = pmd_alloc(&init_mm, dir, address);
+
error = -ENOMEM;
+ pud = pud_alloc(&init_mm, dir, address);
+ if (!pud)
+ break;
+ pmd = pmd_alloc(&init_mm, pud, address);
if (!pmd)
break;
if (remap_area_pmd(pmd, address, end - address,
@@ -91,21 +95,11 @@ static int remap_area_pages(unsigned long address, phys_t phys_addr,
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
} while (address && (address < end));
- spin_unlock(&init_mm.page_table_lock);
flush_tlb_all();
return error;
}
/*
- * Allow physical addresses to be fixed up to help 36 bit peripherals.
- */
-phys_t __attribute__ ((weak))
-fixup_bigphys_addr(phys_t phys_addr, phys_t size)
-{
- return phys_addr;
-}
-
-/*
* Generic mapping function (not visible outside):
*/
@@ -121,7 +115,7 @@ fixup_bigphys_addr(phys_t phys_addr, phys_t size)
#define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
-void * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
+void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
{
struct vm_struct * area;
unsigned long offset;
@@ -141,7 +135,7 @@ void * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
*/
if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
flags == _CACHE_UNCACHED)
- return (void *) KSEG1ADDR(phys_addr);
+ return (void __iomem *) CKSEG1ADDR(phys_addr);
/*
* Don't allow anybody to remap normal RAM that we're using..
@@ -177,10 +171,10 @@ void * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
return NULL;
}
- return (void *) (offset + (char *)addr);
+ return (void __iomem *) (offset + (char *)addr);
}
-#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == KSEG1)
+#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
void __iounmap(volatile void __iomem *addr)
{
@@ -190,10 +184,8 @@ void __iounmap(volatile void __iomem *addr)
return;
p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
- if (!p) {
+ if (!p)
printk(KERN_ERR "iounmap: bad address %p\n", addr);
- return;
- }
kfree(p);
}
diff --git a/arch/mips/mm/pg-r4k.c b/arch/mips/mm/pg-r4k.c
index 9f8b1654157..f51e180072e 100644
--- a/arch/mips/mm/pg-r4k.c
+++ b/arch/mips/mm/pg-r4k.c
@@ -25,7 +25,10 @@
#include <asm/cpu.h>
#include <asm/war.h>
-#define half_scache_line_size() (cpu_scache_line_size() >> 1)
+#define half_scache_line_size() (cpu_scache_line_size() >> 1)
+#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
+#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
+
/*
* Maximum sizes:
@@ -198,15 +201,15 @@ static inline void build_cdex_p(void)
if (store_offset & (cpu_dcache_line_size() - 1))
return;
- if (R4600_V1_HIT_CACHEOP_WAR && ((read_c0_prid() & 0xfff0) == 0x2010)) {
+ if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
build_nop();
build_nop();
build_nop();
build_nop();
}
- if (R4600_V2_HIT_CACHEOP_WAR && ((read_c0_prid() & 0xfff0) == 0x2020))
- build_insn_word(0x8c200000); /* lw $zero, ($at) */
+ if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
+ build_insn_word(0x3c01a000); /* lui $at, 0xa000 */
mi.c_format.opcode = cache_op;
mi.c_format.rs = 4; /* $a0 */
@@ -361,7 +364,7 @@ void __init build_clear_page(void)
build_addiu_a2_a0(PAGE_SIZE - (cpu_has_prefetch ? pref_offset_clear : 0));
- if (R4600_V2_HIT_CACHEOP_WAR && ((read_c0_prid() & 0xfff0) == 0x2020))
+ if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
build_insn_word(0x3c01a000); /* lui $at, 0xa000 */
dest = label();
@@ -404,9 +407,6 @@ dest = label();
build_jr_ra();
- flush_icache_range((unsigned long)&clear_page_array,
- (unsigned long) epc);
-
BUG_ON(epc > clear_page_array + ARRAY_SIZE(clear_page_array));
}
@@ -420,7 +420,7 @@ void __init build_copy_page(void)
build_addiu_a2_a0(PAGE_SIZE - (cpu_has_prefetch ? pref_offset_copy : 0));
- if (R4600_V2_HIT_CACHEOP_WAR && ((read_c0_prid() & 0xfff0) == 0x2020))
+ if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
build_insn_word(0x3c01a000); /* lui $at, 0xa000 */
dest = label();
@@ -482,8 +482,5 @@ dest = label();
build_jr_ra();
- flush_icache_range((unsigned long)&copy_page_array,
- (unsigned long) epc);
-
BUG_ON(epc > copy_page_array + ARRAY_SIZE(copy_page_array));
}
diff --git a/arch/mips/mm/pg-sb1.c b/arch/mips/mm/pg-sb1.c
index 1b6df7133c1..148c65b9cd8 100644
--- a/arch/mips/mm/pg-sb1.c
+++ b/arch/mips/mm/pg-sb1.c
@@ -60,7 +60,8 @@ static inline void clear_page_cpu(void *page)
" .set noreorder \n"
#ifdef CONFIG_CPU_HAS_PREFETCH
" daddiu %0, %0, 128 \n"
- " pref " SB1_PREF_STORE_STREAMED_HINT ", -128(%0) \n" /* Prefetch the first 4 lines */
+ " pref " SB1_PREF_STORE_STREAMED_HINT ", -128(%0) \n"
+ /* Prefetch the first 4 lines */
" pref " SB1_PREF_STORE_STREAMED_HINT ", -96(%0) \n"
" pref " SB1_PREF_STORE_STREAMED_HINT ", -64(%0) \n"
" pref " SB1_PREF_STORE_STREAMED_HINT ", -32(%0) \n"
@@ -106,7 +107,8 @@ static inline void copy_page_cpu(void *to, void *from)
#ifdef CONFIG_CPU_HAS_PREFETCH
" daddiu %0, %0, 128 \n"
" daddiu %1, %1, 128 \n"
- " pref " SB1_PREF_LOAD_STREAMED_HINT ", -128(%0)\n" /* Prefetch the first 4 lines */
+ " pref " SB1_PREF_LOAD_STREAMED_HINT ", -128(%0)\n"
+ /* Prefetch the first 4 lines */
" pref " SB1_PREF_STORE_STREAMED_HINT ", -128(%1)\n"
" pref " SB1_PREF_LOAD_STREAMED_HINT ", -96(%0)\n"
" pref " SB1_PREF_STORE_STREAMED_HINT ", -96(%1)\n"
@@ -207,66 +209,73 @@ typedef struct dmadscr_s {
u64 pad_b;
} dmadscr_t;
-static dmadscr_t page_descr[NR_CPUS] __attribute__((aligned(SMP_CACHE_BYTES)));
+static dmadscr_t page_descr[DM_NUM_CHANNELS]
+ __attribute__((aligned(SMP_CACHE_BYTES)));
void sb1_dma_init(void)
{
- int cpu = smp_processor_id();
- u64 base_val = CPHYSADDR(&page_descr[cpu]) | V_DM_DSCR_BASE_RINGSZ(1);
+ int i;
- bus_writeq(base_val,
- (void *)IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
- bus_writeq(base_val | M_DM_DSCR_BASE_RESET,
- (void *)IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
- bus_writeq(base_val | M_DM_DSCR_BASE_ENABL,
- (void *)IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
+ for (i = 0; i < DM_NUM_CHANNELS; i++) {
+ const u64 base_val = CPHYSADDR(&page_descr[i]) |
+ V_DM_DSCR_BASE_RINGSZ(1);
+ volatile void *base_reg =
+ IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
+
+ __raw_writeq(base_val, base_reg);
+ __raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg);
+ __raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg);
+ }
}
void clear_page(void *page)
{
- int cpu = smp_processor_id();
+ u64 to_phys = CPHYSADDR(page);
+ unsigned int cpu = smp_processor_id();
- /* if the page is above Kseg0, use old way */
+ /* if the page is not in KSEG0, use old way */
if ((long)KSEGX(page) != (long)CKSEG0)
return clear_page_cpu(page);
- page_descr[cpu].dscr_a = CPHYSADDR(page) | M_DM_DSCRA_ZERO_MEM | M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
+ page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM |
+ M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
- bus_writeq(1, (void *)IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
+ __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
/*
* Don't really want to do it this way, but there's no
* reliable way to delay completion detection.
*/
- while (!(bus_readq((void *)(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)) &
- M_DM_DSCR_BASE_INTERRUPT))))
+ while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
+ & M_DM_DSCR_BASE_INTERRUPT))
;
- bus_readq((void *)IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
+ __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
}
void copy_page(void *to, void *from)
{
- unsigned long from_phys = CPHYSADDR(from);
- unsigned long to_phys = CPHYSADDR(to);
- int cpu = smp_processor_id();
+ u64 from_phys = CPHYSADDR(from);
+ u64 to_phys = CPHYSADDR(to);
+ unsigned int cpu = smp_processor_id();
- /* if either page is above Kseg0, use old way */
+ /* if any page is not in KSEG0, use old way */
if ((long)KSEGX(to) != (long)CKSEG0
|| (long)KSEGX(from) != (long)CKSEG0)
return copy_page_cpu(to, from);
- page_descr[cpu].dscr_a = CPHYSADDR(to_phys) | M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
- page_descr[cpu].dscr_b = CPHYSADDR(from_phys) | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
- bus_writeq(1, (void *)IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
+ page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST |
+ M_DM_DSCRA_INTERRUPT;
+ page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
+ __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
/*
* Don't really want to do it this way, but there's no
* reliable way to delay completion detection.
*/
- while (!(bus_readq((void *)(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)) &
- M_DM_DSCR_BASE_INTERRUPT))))
+ while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
+ & M_DM_DSCR_BASE_INTERRUPT))
;
- bus_readq((void *)IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
+ __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
}
#else /* !CONFIG_SIBYTE_DMA_PAGEOPS */
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index 4f07f81e850..4a3c4919e31 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
+#include <asm/fixmap.h>
#include <asm/pgtable.h>
void pgd_init(unsigned long page)
@@ -29,42 +30,12 @@ void pgd_init(unsigned long page)
}
}
-#ifdef CONFIG_HIGHMEM
-static void __init fixrange_init (unsigned long start, unsigned long end,
- pgd_t *pgd_base)
-{
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *pte;
- int i, j;
- unsigned long vaddr;
-
- vaddr = start;
- i = __pgd_offset(vaddr);
- j = __pmd_offset(vaddr);
- pgd = pgd_base + i;
-
- for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
- pmd = (pmd_t *)pgd;
- for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
- if (pmd_none(*pmd)) {
- pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- set_pmd(pmd, __pmd((unsigned long)pte));
- if (pte != pte_offset_kernel(pmd, 0))
- BUG();
- }
- vaddr += PMD_SIZE;
- }
- j = 0;
- }
-}
-#endif
-
void __init pagetable_init(void)
{
#ifdef CONFIG_HIGHMEM
unsigned long vaddr;
pgd_t *pgd, *pgd_base;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
#endif
@@ -90,7 +61,8 @@ void __init pagetable_init(void)
fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
pgd = swapper_pg_dir + __pgd_offset(vaddr);
- pmd = pmd_offset(pgd, vaddr);
+ pud = pud_offset(pgd, vaddr);
+ pmd = pmd_offset(pud, vaddr);
pte = pte_offset_kernel(pmd, vaddr);
pkmap_page_table = pte;
#endif
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c
index 4e92f931aab..9e8ff8badb1 100644
--- a/arch/mips/mm/sc-rm7k.c
+++ b/arch/mips/mm/sc-rm7k.c
@@ -15,6 +15,7 @@
#include <asm/cacheops.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
+#include <asm/cacheflush.h> /* for run_uncached() */
/* Primary cache parameters. */
#define sc_lsize 32
@@ -96,25 +97,13 @@ static void rm7k_sc_inv(unsigned long addr, unsigned long size)
}
/*
- * This function is executed in the uncached segment CKSEG1.
- * It must not touch the stack, because the stack pointer still points
- * into CKSEG0.
- *
- * Three options:
- * - Write it in assembly and guarantee that we don't use the stack.
- * - Disable caching for CKSEG0 before calling it.
- * - Pray that GCC doesn't randomly start using the stack.
- *
- * This being Linux, we obviously take the least sane of those options -
- * following DaveM's lead in c-r4k.c
- *
- * It seems we get our kicks from relying on unguaranteed behaviour in GCC
+ * This function is executed in uncached address space.
*/
static __init void __rm7k_sc_enable(void)
{
int i;
- set_c0_config(1 << 3); /* CONF_SE */
+ set_c0_config(RM7K_CONF_SE);
write_c0_taglo(0);
write_c0_taghi(0);
@@ -127,24 +116,22 @@ static __init void __rm7k_sc_enable(void)
".set mips0\n\t"
".set reorder"
:
- : "r" (KSEG0ADDR(i)), "i" (Index_Store_Tag_SD));
+ : "r" (CKSEG0ADDR(i)), "i" (Index_Store_Tag_SD));
}
}
static __init void rm7k_sc_enable(void)
{
- void (*func)(void) = (void *) KSEG1ADDR(&__rm7k_sc_enable);
-
- if (read_c0_config() & 0x08) /* CONF_SE */
+ if (read_c0_config() & RM7K_CONF_SE)
return;
- printk(KERN_INFO "Enabling secondary cache...");
- func();
+ printk(KERN_INFO "Enabling secondary cache...\n");
+ run_uncached(__rm7k_sc_enable);
}
static void rm7k_sc_disable(void)
{
- clear_c0_config(1<<3); /* CONF_SE */
+ clear_c0_config(RM7K_CONF_SE);
}
struct bcache_ops rm7k_sc_ops = {
@@ -158,19 +145,19 @@ void __init rm7k_sc_init(void)
{
unsigned int config = read_c0_config();
- if ((config >> 31) & 1) /* Bit 31 set -> no S-Cache */
+ if ((config & RM7K_CONF_SC))
return;
printk(KERN_INFO "Secondary cache size %dK, linesize %d bytes.\n",
(scache_size >> 10), sc_lsize);
- if (!((config >> 3) & 1)) /* CONF_SE */
+ if (!(config & RM7K_CONF_SE))
rm7k_sc_enable();
/*
* While we're at it let's deal with the tertiary cache.
*/
- if (!((config >> 17) & 1)) {
+ if (!(config & RM7K_CONF_TC)) {
/*
* We can't enable the L3 cache yet. There may be board-specific
@@ -183,9 +170,9 @@ void __init rm7k_sc_init(void)
* to probe it.
*/
printk(KERN_INFO "Tertiary cache present, %s enabled\n",
- config&(1<<12) ? "already" : "not (yet)");
+ (config & RM7K_CONF_TE) ? "already" : "not (yet)");
- if ((config >> 12) & 1)
+ if ((config & RM7K_CONF_TE))
rm7k_tcache_enabled = 1;
}
diff --git a/arch/mips/mm/tlb-andes.c b/arch/mips/mm/tlb-andes.c
index 167e08e9661..3f422a849c4 100644
--- a/arch/mips/mm/tlb-andes.c
+++ b/arch/mips/mm/tlb-andes.c
@@ -195,6 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
unsigned long flags;
pgd_t *pgdp;
+ pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
int idx, pid;
@@ -220,7 +221,8 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
write_c0_entryhi(address | (pid));
pgdp = pgd_offset(vma->vm_mm, address);
tlb_probe();
- pmdp = pmd_offset(pgdp, address);
+ pudp = pud_offset(pgdp, address);
+ pmdp = pmd_offset(pudp, address);
idx = read_c0_index();
ptep = pte_offset_map(pmdp, address);
write_c0_entrylo0(pte_val(*ptep++) >> 6);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 59d38bc05b6..8297970f0bb 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -21,6 +21,12 @@
extern void build_tlb_refill_handler(void);
+/*
+ * Make sure all entries differ. If they're not different
+ * MIPS32 will take revenge ...
+ */
+#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
+
/* CP0 hazard avoidance. */
#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
"nop; nop; nop; nop; nop; nop;\n\t" \
@@ -42,11 +48,8 @@ void local_flush_tlb_all(void)
/* Blast 'em all away. */
while (entry < current_cpu_data.tlbsize) {
- /*
- * Make sure all entries differ. If they're not different
- * MIPS32 will take revenge ...
- */
- write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
+ /* Make sure all entries differ. */
+ write_c0_entryhi(UNIQUE_ENTRYHI(entry));
write_c0_index(entry);
mtc0_tlbw_hazard();
tlb_write_indexed();
@@ -57,12 +60,21 @@ void local_flush_tlb_all(void)
local_irq_restore(flags);
}
+/* All entries common to a mm share an asid. To effectively flush
+ these entries, we just bump the asid. */
void local_flush_tlb_mm(struct mm_struct *mm)
{
- int cpu = smp_processor_id();
+ int cpu;
+
+ preempt_disable();
- if (cpu_context(cpu, mm) != 0)
- drop_mmu_context(mm,cpu);
+ cpu = smp_processor_id();
+
+ if (cpu_context(cpu, mm) != 0) {
+ drop_mmu_context(mm, cpu);
+ }
+
+ preempt_enable();
}
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
@@ -75,9 +87,9 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long flags;
int size;
- local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
size = (size + 1) >> 1;
+ local_irq_save(flags);
if (size <= current_cpu_data.tlbsize/2) {
int oldpid = read_c0_entryhi();
int newpid = cpu_asid(cpu, mm);
@@ -99,8 +111,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
if (idx < 0)
continue;
/* Make sure all entries differ. */
- write_c0_entryhi(CKSEG0 +
- (idx << (PAGE_SHIFT + 1)));
+ write_c0_entryhi(UNIQUE_ENTRYHI(idx));
mtc0_tlbw_hazard();
tlb_write_indexed();
}
@@ -118,9 +129,9 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
unsigned long flags;
int size;
- local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
size = (size + 1) >> 1;
+ local_irq_save(flags);
if (size <= current_cpu_data.tlbsize / 2) {
int pid = read_c0_entryhi();
@@ -142,7 +153,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
if (idx < 0)
continue;
/* Make sure all entries differ. */
- write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
+ write_c0_entryhi(UNIQUE_ENTRYHI(idx));
mtc0_tlbw_hazard();
tlb_write_indexed();
}
@@ -176,7 +187,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
if (idx < 0)
goto finish;
/* Make sure all entries differ. */
- write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
+ write_c0_entryhi(UNIQUE_ENTRYHI(idx));
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
@@ -197,8 +208,8 @@ void local_flush_tlb_one(unsigned long page)
int oldpid, idx;
local_irq_save(flags);
- page &= (PAGE_MASK << 1);
oldpid = read_c0_entryhi();
+ page &= (PAGE_MASK << 1);
write_c0_entryhi(page);
mtc0_tlbw_hazard();
tlb_probe();
@@ -208,7 +219,7 @@ void local_flush_tlb_one(unsigned long page)
write_c0_entrylo1(0);
if (idx >= 0) {
/* Make sure all entries differ. */
- write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
+ write_c0_entryhi(UNIQUE_ENTRYHI(idx));
mtc0_tlbw_hazard();
tlb_write_indexed();
tlbw_use_hazard();
@@ -227,6 +238,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
unsigned long flags;
pgd_t *pgdp;
+ pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
int idx, pid;
@@ -237,35 +249,34 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
if (current->active_mm != vma->vm_mm)
return;
- pid = read_c0_entryhi() & ASID_MASK;
-
local_irq_save(flags);
+
+ pid = read_c0_entryhi() & ASID_MASK;
address &= (PAGE_MASK << 1);
write_c0_entryhi(address | pid);
pgdp = pgd_offset(vma->vm_mm, address);
mtc0_tlbw_hazard();
tlb_probe();
BARRIER;
- pmdp = pmd_offset(pgdp, address);
+ pudp = pud_offset(pgdp, address);
+ pmdp = pmd_offset(pudp, address);
idx = read_c0_index();
ptep = pte_offset_map(pmdp, address);
- #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
- write_c0_entrylo0(ptep->pte_high);
- ptep++;
- write_c0_entrylo1(ptep->pte_high);
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
+ write_c0_entrylo0(ptep->pte_high);
+ ptep++;
+ write_c0_entrylo1(ptep->pte_high);
#else
- write_c0_entrylo0(pte_val(*ptep++) >> 6);
- write_c0_entrylo1(pte_val(*ptep) >> 6);
+ write_c0_entrylo0(pte_val(*ptep++) >> 6);
+ write_c0_entrylo1(pte_val(*ptep) >> 6);
#endif
- write_c0_entryhi(address | pid);
mtc0_tlbw_hazard();
if (idx < 0)
tlb_write_random();
else
tlb_write_indexed();
tlbw_use_hazard();
- write_c0_entryhi(pid);
local_irq_restore(flags);
}
@@ -357,7 +368,8 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
old_pagemask = read_c0_pagemask();
wired = read_c0_wired();
if (--temp_tlb_entry < wired) {
- printk(KERN_WARNING "No TLB space left for add_temporary_entry\n");
+ printk(KERN_WARNING
+ "No TLB space left for add_temporary_entry\n");
ret = -ENOSPC;
goto out;
}
@@ -388,7 +400,7 @@ static void __init probe_tlb(unsigned long config)
* is not supported, we assume R4k style. Cpu probing already figured
* out the number of tlb entries.
*/
- if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
+ if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
return;
reg = read_c0_config1();
diff --git a/arch/mips/mm/tlb-sb1.c b/arch/mips/mm/tlb-sb1.c
deleted file mode 100644
index 6256cafcf3a..00000000000
--- a/arch/mips/mm/tlb-sb1.c
+++ /dev/null
@@ -1,376 +0,0 @@
-/*
- * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
- * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org)
- * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-#include <linux/init.h>
-#include <asm/mmu_context.h>
-#include <asm/bootinfo.h>
-#include <asm/cpu.h>
-
-extern void build_tlb_refill_handler(void);
-
-#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
-
-/* Dump the current entry* and pagemask registers */
-static inline void dump_cur_tlb_regs(void)
-{
- unsigned int entryhihi, entryhilo, entrylo0hi, entrylo0lo, entrylo1hi;
- unsigned int entrylo1lo, pagemask;
-
- __asm__ __volatile__ (
- ".set push \n"
- ".set noreorder \n"
- ".set mips64 \n"
- ".set noat \n"
- " tlbr \n"
- " dmfc0 $1, $10 \n"
- " dsrl32 %0, $1, 0 \n"
- " sll %1, $1, 0 \n"
- " dmfc0 $1, $2 \n"
- " dsrl32 %2, $1, 0 \n"
- " sll %3, $1, 0 \n"
- " dmfc0 $1, $3 \n"
- " dsrl32 %4, $1, 0 \n"
- " sll %5, $1, 0 \n"
- " mfc0 %6, $5 \n"
- ".set pop \n"
- : "=r" (entryhihi), "=r" (entryhilo),
- "=r" (entrylo0hi), "=r" (entrylo0lo),
- "=r" (entrylo1hi), "=r" (entrylo1lo),
- "=r" (pagemask));
-
- printk("%08X%08X %08X%08X %08X%08X %08X",
- entryhihi, entryhilo,
- entrylo0hi, entrylo0lo,
- entrylo1hi, entrylo1lo,
- pagemask);
-}
-
-void sb1_dump_tlb(void)
-{
- unsigned long old_ctx;
- unsigned long flags;
- int entry;
- local_irq_save(flags);
- old_ctx = read_c0_entryhi();
- printk("Current TLB registers state:\n"
- " EntryHi EntryLo0 EntryLo1 PageMask Index\n"
- "--------------------------------------------------------------------\n");
- dump_cur_tlb_regs();
- printk(" %08X\n", read_c0_index());
- printk("\n\nFull TLB Dump:\n"
- "Idx EntryHi EntryLo0 EntryLo1 PageMask\n"
- "--------------------------------------------------------------\n");
- for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
- write_c0_index(entry);
- printk("\n%02i ", entry);
- dump_cur_tlb_regs();
- }
- printk("\n");
- write_c0_entryhi(old_ctx);
- local_irq_restore(flags);
-}
-
-void local_flush_tlb_all(void)
-{
- unsigned long flags;
- unsigned long old_ctx;
- int entry;
-
- local_irq_save(flags);
- /* Save old context and create impossible VPN2 value */
- old_ctx = read_c0_entryhi() & ASID_MASK;
- write_c0_entrylo0(0);
- write_c0_entrylo1(0);
-
- entry = read_c0_wired();
- while (entry < current_cpu_data.tlbsize) {
- write_c0_entryhi(UNIQUE_ENTRYHI(entry));
- write_c0_index(entry);
- tlb_write_indexed();
- entry++;
- }
- write_c0_entryhi(old_ctx);
- local_irq_restore(flags);
-}
-
-
-/*
- * Use a bogus region of memory (starting at 0) to sanitize the TLB's.
- * Use increments of the maximum page size (16MB), and check for duplicate
- * entries before doing a given write. Then, when we're safe from collisions
- * with the firmware, go back and give all the entries invalid addresses with
- * the normal flush routine. Wired entries will be killed as well!
- */
-static void __init sb1_sanitize_tlb(void)
-{
- int entry;
- long addr = 0;
-
- long inc = 1<<24; /* 16MB */
- /* Save old context and create impossible VPN2 value */
- write_c0_entrylo0(0);
- write_c0_entrylo1(0);
- for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
- do {
- addr += inc;
- write_c0_entryhi(addr);
- tlb_probe();
- } while ((int)(read_c0_index()) >= 0);
- write_c0_index(entry);
- tlb_write_indexed();
- }
- /* Now that we know we're safe from collisions, we can safely flush
- the TLB with the "normal" routine. */
- local_flush_tlb_all();
-}
-
-void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
-{
- struct mm_struct *mm = vma->vm_mm;
- unsigned long flags;
- int cpu;
-
- local_irq_save(flags);
- cpu = smp_processor_id();
- if (cpu_context(cpu, mm) != 0) {
- int size;
- size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
- size = (size + 1) >> 1;
- if (size <= (current_cpu_data.tlbsize/2)) {
- int oldpid = read_c0_entryhi() & ASID_MASK;
- int newpid = cpu_asid(cpu, mm);
-
- start &= (PAGE_MASK << 1);
- end += ((PAGE_SIZE << 1) - 1);
- end &= (PAGE_MASK << 1);
- while (start < end) {
- int idx;
-
- write_c0_entryhi(start | newpid);
- start += (PAGE_SIZE << 1);
- tlb_probe();
- idx = read_c0_index();
- write_c0_entrylo0(0);
- write_c0_entrylo1(0);
- write_c0_entryhi(UNIQUE_ENTRYHI(idx));
- if (idx < 0)
- continue;
- tlb_write_indexed();
- }
- write_c0_entryhi(oldpid);
- } else {
- drop_mmu_context(mm, cpu);
- }
- }
- local_irq_restore(flags);
-}
-
-void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
- unsigned long flags;
- int size;
-
- size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
- size = (size + 1) >> 1;
-
- local_irq_save(flags);
- if (size <= (current_cpu_data.tlbsize/2)) {
- int pid = read_c0_entryhi();
-
- start &= (PAGE_MASK << 1);
- end += ((PAGE_SIZE << 1) - 1);
- end &= (PAGE_MASK << 1);
-
- while (start < end) {
- int idx;
-
- write_c0_entryhi(start);
- start += (PAGE_SIZE << 1);
- tlb_probe();
- idx = read_c0_index();
- write_c0_entrylo0(0);
- write_c0_entrylo1(0);
- write_c0_entryhi(UNIQUE_ENTRYHI(idx));
- if (idx < 0)
- continue;
- tlb_write_indexed();
- }
- write_c0_entryhi(pid);
- } else {
- local_flush_tlb_all();
- }
- local_irq_restore(flags);
-}
-
-void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
-{
- unsigned long flags;
- int cpu = smp_processor_id();
-
- local_irq_save(flags);
- if (cpu_context(cpu, vma->vm_mm) != 0) {
- int oldpid, newpid, idx;
- newpid = cpu_asid(cpu, vma->vm_mm);
- page &= (PAGE_MASK << 1);
- oldpid = read_c0_entryhi() & ASID_MASK;
- write_c0_entryhi(page | newpid);
- tlb_probe();
- idx = read_c0_index();
- write_c0_entrylo0(0);
- write_c0_entrylo1(0);
- if (idx < 0)
- goto finish;
- /* Make sure all entries differ. */
- write_c0_entryhi(UNIQUE_ENTRYHI(idx));
- tlb_write_indexed();
- finish:
- write_c0_entryhi(oldpid);
- }
- local_irq_restore(flags);
-}
-
-/*
- * Remove one kernel space TLB entry. This entry is assumed to be marked
- * global so we don't do the ASID thing.
- */
-void local_flush_tlb_one(unsigned long page)
-{
- unsigned long flags;
- int oldpid, idx;
-
- page &= (PAGE_MASK << 1);
- oldpid = read_c0_entryhi() & ASID_MASK;
-
- local_irq_save(flags);
- write_c0_entryhi(page);
- tlb_probe();
- idx = read_c0_index();
- if (idx >= 0) {
- /* Make sure all entries differ. */
- write_c0_entryhi(UNIQUE_ENTRYHI(idx));
- write_c0_entrylo0(0);
- write_c0_entrylo1(0);
- tlb_write_indexed();
- }
-
- write_c0_entryhi(oldpid);
- local_irq_restore(flags);
-}
-
-/* All entries common to a mm share an asid. To effectively flush
- these entries, we just bump the asid. */
-void local_flush_tlb_mm(struct mm_struct *mm)
-{
- int cpu;
-
- preempt_disable();
-
- cpu = smp_processor_id();
-
- if (cpu_context(cpu, mm) != 0) {
- drop_mmu_context(mm, cpu);
- }
-
- preempt_enable();
-}
-
-/* Stolen from mips32 routines */
-
-void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
-{
- unsigned long flags;
- pgd_t *pgdp;
- pmd_t *pmdp;
- pte_t *ptep;
- int idx, pid;
-
- /*
- * Handle debugger faulting in for debugee.
- */
- if (current->active_mm != vma->vm_mm)
- return;
-
- local_irq_save(flags);
-
- pid = read_c0_entryhi() & ASID_MASK;
- address &= (PAGE_MASK << 1);
- write_c0_entryhi(address | (pid));
- pgdp = pgd_offset(vma->vm_mm, address);
- tlb_probe();
- pmdp = pmd_offset(pgdp, address);
- idx = read_c0_index();
- ptep = pte_offset_map(pmdp, address);
- write_c0_entrylo0(pte_val(*ptep++) >> 6);
- write_c0_entrylo1(pte_val(*ptep) >> 6);
- if (idx < 0) {
- tlb_write_random();
- } else {
- tlb_write_indexed();
- }
- local_irq_restore(flags);
-}
-
-void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
- unsigned long entryhi, unsigned long pagemask)
-{
- unsigned long flags;
- unsigned long wired;
- unsigned long old_pagemask;
- unsigned long old_ctx;
-
- local_irq_save(flags);
- old_ctx = read_c0_entryhi() & 0xff;
- old_pagemask = read_c0_pagemask();
- wired = read_c0_wired();
- write_c0_wired(wired + 1);
- write_c0_index(wired);
-
- write_c0_pagemask(pagemask);
- write_c0_entryhi(entryhi);
- write_c0_entrylo0(entrylo0);
- write_c0_entrylo1(entrylo1);
- tlb_write_indexed();
-
- write_c0_entryhi(old_ctx);
- write_c0_pagemask(old_pagemask);
-
- local_flush_tlb_all();
- local_irq_restore(flags);
-}
-
-/*
- * This is called from loadmmu.c. We have to set up all the
- * memory management function pointers, as well as initialize
- * the caches and tlbs
- */
-void tlb_init(void)
-{
- write_c0_pagemask(PM_DEFAULT_MASK);
- write_c0_wired(0);
-
- /*
- * We don't know what state the firmware left the TLB's in, so this is
- * the ultra-conservative way to flush the TLB's and avoid machine
- * check exceptions due to duplicate TLB entries
- */
- sb1_sanitize_tlb();
-
- build_tlb_refill_handler();
-}
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 6569be3983c..0f9485806ba 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -6,6 +6,7 @@
* Synthesize TLB refill handlers at runtime.
*
* Copyright (C) 2004,2005 by Thiemo Seufer
+ * Copyright (C) 2005 Maciej W. Rozycki
*/
#include <stdarg.h>
@@ -91,7 +92,7 @@ enum opcode {
insn_addu, insn_addiu, insn_and, insn_andi, insn_beq,
insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
insn_bne, insn_daddu, insn_daddiu, insn_dmfc0, insn_dmtc0,
- insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32,
+ insn_dsll, insn_dsll32, insn_dsra, insn_dsrl,
insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr, insn_ld,
insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, insn_mtc0,
insn_ori, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll,
@@ -134,7 +135,6 @@ static __initdata struct insn insn_table[] = {
{ insn_dsll32, M(spec_op,0,0,0,0,dsll32_op), RT | RD | RE },
{ insn_dsra, M(spec_op,0,0,0,0,dsra_op), RT | RD | RE },
{ insn_dsrl, M(spec_op,0,0,0,0,dsrl_op), RT | RD | RE },
- { insn_dsrl32, M(spec_op,0,0,0,0,dsrl32_op), RT | RD | RE },
{ insn_dsubu, M(spec_op,0,0,0,0,dsubu_op), RS | RT | RD },
{ insn_eret, M(cop0_op,cop_op,0,0,0,eret_op), 0 },
{ insn_j, M(j_op,0,0,0,0,0), JIMM },
@@ -366,7 +366,6 @@ I_u2u1u3(_dsll);
I_u2u1u3(_dsll32);
I_u2u1u3(_dsra);
I_u2u1u3(_dsrl);
-I_u2u1u3(_dsrl32);
I_u3u1u2(_dsubu);
I_0(_eret);
I_u1(_j);
@@ -412,7 +411,6 @@ enum label_id {
label_nopage_tlbm,
label_smp_pgtable_change,
label_r3000_write_probe_fail,
- label_r3000_write_probe_ok
};
struct label {
@@ -445,7 +443,6 @@ L_LA(_nopage_tlbs)
L_LA(_nopage_tlbm)
L_LA(_smp_pgtable_change)
L_LA(_r3000_write_probe_fail)
-L_LA(_r3000_write_probe_ok)
/* convenience macros for instructions */
#ifdef CONFIG_64BIT
@@ -490,7 +487,7 @@ L_LA(_r3000_write_probe_ok)
static __init int __attribute__((unused)) in_compat_space_p(long addr)
{
/* Is this address in 32bit compat space? */
- return (((addr) & 0xffffffff00000000) == 0xffffffff00000000);
+ return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L);
}
static __init int __attribute__((unused)) rel_highest(long val)
@@ -734,7 +731,7 @@ static void __init build_r3000_tlb_refill_handler(void)
if (p > tlb_handler + 32)
panic("TLB refill handler space exceeded");
- printk("Synthesized TLB handler (%u instructions).\n",
+ printk("Synthesized TLB refill handler (%u instructions).\n",
(unsigned int)(p - tlb_handler));
#ifdef DEBUG_TLB
{
@@ -746,7 +743,6 @@ static void __init build_r3000_tlb_refill_handler(void)
#endif
memcpy((void *)CAC_BASE, tlb_handler, 0x80);
- flush_icache_range(CAC_BASE, CAC_BASE + 0x80);
}
/*
@@ -783,6 +779,8 @@ static __initdata u32 final_handler[64];
static __init void __attribute__((unused)) build_tlb_probe_entry(u32 **p)
{
switch (current_cpu_data.cputype) {
+ /* Found by experiment: R4600 v2.0 needs this, too. */
+ case CPU_R4600:
case CPU_R5000:
case CPU_R5000A:
case CPU_NEVADA:
@@ -834,12 +832,20 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l,
case CPU_R4700:
case CPU_R5000:
case CPU_R5000A:
+ i_nop(p);
+ tlbw(p);
+ i_nop(p);
+ break;
+
+ case CPU_R4300:
case CPU_5KC:
case CPU_TX49XX:
case CPU_AU1000:
case CPU_AU1100:
case CPU_AU1500:
case CPU_AU1550:
+ case CPU_AU1200:
+ case CPU_PR4450:
i_nop(p);
tlbw(p);
break;
@@ -848,6 +854,7 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l,
case CPU_R12000:
case CPU_4KC:
case CPU_SB1:
+ case CPU_SB1A:
case CPU_4KSC:
case CPU_20KC:
case CPU_25KF:
@@ -875,6 +882,7 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l,
case CPU_4KEC:
case CPU_24K:
+ case CPU_34K:
i_ehb(p);
tlbw(p);
break;
@@ -911,6 +919,7 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l,
case CPU_VR4131:
case CPU_VR4133:
+ case CPU_R5432:
i_nop(p);
i_nop(p);
tlbw(p);
@@ -942,34 +951,29 @@ build_get_pmde64(u32 **p, struct label **l, struct reloc **r,
/* No i_nop needed here, since the next insn doesn't touch TMP. */
#ifdef CONFIG_SMP
+# ifdef CONFIG_BUILD_ELF64
/*
- * 64 bit SMP has the lower part of &pgd_current[smp_processor_id()]
+ * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
* stored in CONTEXT.
*/
- if (in_compat_space_p(pgdc)) {
- i_dmfc0(p, ptr, C0_CONTEXT);
- i_dsra(p, ptr, ptr, 23);
- i_ld(p, ptr, 0, ptr);
- } else {
-#ifdef CONFIG_BUILD_ELF64
- i_dmfc0(p, ptr, C0_CONTEXT);
- i_dsrl(p, ptr, ptr, 23);
- i_dsll(p, ptr, ptr, 3);
- i_LA_mostly(p, tmp, pgdc);
- i_daddu(p, ptr, ptr, tmp);
- i_dmfc0(p, tmp, C0_BADVADDR);
- i_ld(p, ptr, rel_lo(pgdc), ptr);
-#else
- i_dmfc0(p, ptr, C0_CONTEXT);
- i_lui(p, tmp, rel_highest(pgdc));
- i_dsll(p, ptr, ptr, 9);
- i_daddiu(p, tmp, tmp, rel_higher(pgdc));
- i_dsrl32(p, ptr, ptr, 0);
- i_and(p, ptr, ptr, tmp);
- i_dmfc0(p, tmp, C0_BADVADDR);
- i_ld(p, ptr, 0, ptr);
-#endif
- }
+ i_dmfc0(p, ptr, C0_CONTEXT);
+ i_dsrl(p, ptr, ptr, 23);
+ i_LA_mostly(p, tmp, pgdc);
+ i_daddu(p, ptr, ptr, tmp);
+ i_dmfc0(p, tmp, C0_BADVADDR);
+ i_ld(p, ptr, rel_lo(pgdc), ptr);
+# else
+ /*
+ * 64 bit SMP running in compat space has the lower part of
+ * &pgd_current[smp_processor_id()] stored in CONTEXT.
+ */
+ if (!in_compat_space_p(pgdc))
+ panic("Invalid page directory address!");
+
+ i_dmfc0(p, ptr, C0_CONTEXT);
+ i_dsra(p, ptr, ptr, 23);
+ i_ld(p, ptr, 0, ptr);
+# endif
#else
i_LA_mostly(p, ptr, pgdc);
i_ld(p, ptr, rel_lo(pgdc), ptr);
@@ -1026,7 +1030,6 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
i_mfc0(p, ptr, C0_CONTEXT);
i_LA_mostly(p, tmp, pgdc);
i_srl(p, ptr, ptr, 23);
- i_sll(p, ptr, ptr, 2);
i_addu(p, ptr, tmp, ptr);
#else
i_LA_mostly(p, ptr, pgdc);
@@ -1245,13 +1248,19 @@ static void __init build_r4000_tlb_refill_handler(void)
{
int i;
- for (i = 0; i < 64; i++)
- printk("%08x\n", final_handler[i]);
+ f = final_handler;
+#ifdef CONFIG_64BIT
+ if (final_len > 32)
+ final_len = 64;
+ else
+ f = final_handler + 32;
+#endif /* CONFIG_64BIT */
+ for (i = 0; i < final_len; i++)
+ printk("%08x\n", f[i]);
}
#endif
memcpy((void *)CAC_BASE, final_handler, 0x100);
- flush_icache_range(CAC_BASE, CAC_BASE + 0x100);
}
/*
@@ -1277,37 +1286,41 @@ u32 __tlb_handler_align handle_tlbs[FASTPATH_SIZE];
u32 __tlb_handler_align handle_tlbm[FASTPATH_SIZE];
static void __init
-iPTE_LW(u32 **p, struct label **l, unsigned int pte, int offset,
- unsigned int ptr)
+iPTE_LW(u32 **p, struct label **l, unsigned int pte, unsigned int ptr)
{
#ifdef CONFIG_SMP
# ifdef CONFIG_64BIT_PHYS_ADDR
if (cpu_has_64bits)
- i_lld(p, pte, offset, ptr);
+ i_lld(p, pte, 0, ptr);
else
# endif
- i_LL(p, pte, offset, ptr);
+ i_LL(p, pte, 0, ptr);
#else
# ifdef CONFIG_64BIT_PHYS_ADDR
if (cpu_has_64bits)
- i_ld(p, pte, offset, ptr);
+ i_ld(p, pte, 0, ptr);
else
# endif
- i_LW(p, pte, offset, ptr);
+ i_LW(p, pte, 0, ptr);
#endif
}
static void __init
-iPTE_SW(u32 **p, struct reloc **r, unsigned int pte, int offset,
- unsigned int ptr)
+iPTE_SW(u32 **p, struct reloc **r, unsigned int pte, unsigned int ptr,
+ unsigned int mode)
{
+#ifdef CONFIG_64BIT_PHYS_ADDR
+ unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
+#endif
+
+ i_ori(p, pte, pte, mode);
#ifdef CONFIG_SMP
# ifdef CONFIG_64BIT_PHYS_ADDR
if (cpu_has_64bits)
- i_scd(p, pte, offset, ptr);
+ i_scd(p, pte, 0, ptr);
else
# endif
- i_SC(p, pte, offset, ptr);
+ i_SC(p, pte, 0, ptr);
if (r10000_llsc_war())
il_beqzl(p, r, pte, label_smp_pgtable_change);
@@ -1318,7 +1331,7 @@ iPTE_SW(u32 **p, struct reloc **r, unsigned int pte, int offset,
if (!cpu_has_64bits) {
/* no i_nop needed */
i_ll(p, pte, sizeof(pte_t) / 2, ptr);
- i_ori(p, pte, pte, _PAGE_VALID);
+ i_ori(p, pte, pte, hwmode);
i_sc(p, pte, sizeof(pte_t) / 2, ptr);
il_beqz(p, r, pte, label_smp_pgtable_change);
/* no i_nop needed */
@@ -1331,15 +1344,15 @@ iPTE_SW(u32 **p, struct reloc **r, unsigned int pte, int offset,
#else
# ifdef CONFIG_64BIT_PHYS_ADDR
if (cpu_has_64bits)
- i_sd(p, pte, offset, ptr);
+ i_sd(p, pte, 0, ptr);
else
# endif
- i_SW(p, pte, offset, ptr);
+ i_SW(p, pte, 0, ptr);
# ifdef CONFIG_64BIT_PHYS_ADDR
if (!cpu_has_64bits) {
i_lw(p, pte, sizeof(pte_t) / 2, ptr);
- i_ori(p, pte, pte, _PAGE_VALID);
+ i_ori(p, pte, pte, hwmode);
i_sw(p, pte, sizeof(pte_t) / 2, ptr);
i_lw(p, pte, 0, ptr);
}
@@ -1359,7 +1372,7 @@ build_pte_present(u32 **p, struct label **l, struct reloc **r,
i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
il_bnez(p, r, pte, lid);
- iPTE_LW(p, l, pte, 0, ptr);
+ iPTE_LW(p, l, pte, ptr);
}
/* Make PTE valid, store result in PTR. */
@@ -1367,8 +1380,9 @@ static void __init
build_make_valid(u32 **p, struct reloc **r, unsigned int pte,
unsigned int ptr)
{
- i_ori(p, pte, pte, _PAGE_VALID | _PAGE_ACCESSED);
- iPTE_SW(p, r, pte, 0, ptr);
+ unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
+
+ iPTE_SW(p, r, pte, ptr, mode);
}
/*
@@ -1382,7 +1396,7 @@ build_pte_writable(u32 **p, struct label **l, struct reloc **r,
i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
il_bnez(p, r, pte, lid);
- iPTE_LW(p, l, pte, 0, ptr);
+ iPTE_LW(p, l, pte, ptr);
}
/* Make PTE writable, update software status bits as well, then store
@@ -1392,9 +1406,10 @@ static void __init
build_make_write(u32 **p, struct reloc **r, unsigned int pte,
unsigned int ptr)
{
- i_ori(p, pte, pte,
- _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
- iPTE_SW(p, r, pte, 0, ptr);
+ unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
+ | _PAGE_DIRTY);
+
+ iPTE_SW(p, r, pte, ptr, mode);
}
/*
@@ -1407,41 +1422,48 @@ build_pte_modifiable(u32 **p, struct label **l, struct reloc **r,
{
i_andi(p, pte, pte, _PAGE_WRITE);
il_beqz(p, r, pte, lid);
- iPTE_LW(p, l, pte, 0, ptr);
+ iPTE_LW(p, l, pte, ptr);
}
/*
* R3000 style TLB load/store/modify handlers.
*/
-/* This places the pte in the page table at PTR into ENTRYLO0. */
+/*
+ * This places the pte into ENTRYLO0 and writes it with tlbwi.
+ * Then it returns.
+ */
static void __init
-build_r3000_pte_reload(u32 **p, unsigned int ptr)
+build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
{
- i_lw(p, ptr, 0, ptr);
- i_nop(p); /* load delay */
- i_mtc0(p, ptr, C0_ENTRYLO0);
- i_nop(p); /* cp0 delay */
+ i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
+ i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
+ i_tlbwi(p);
+ i_jr(p, tmp);
+ i_rfe(p); /* branch delay */
}
/*
- * The index register may have the probe fail bit set,
- * because we would trap on access kseg2, i.e. without refill.
+ * This places the pte into ENTRYLO0 and writes it with tlbwi
+ * or tlbwr as appropriate. This is because the index register
+ * may have the probe fail bit set as a result of a trap on a
+ * kseg2 access, i.e. without refill. Then it returns.
*/
static void __init
-build_r3000_tlb_write(u32 **p, struct label **l, struct reloc **r,
- unsigned int tmp)
+build_r3000_tlb_reload_write(u32 **p, struct label **l, struct reloc **r,
+ unsigned int pte, unsigned int tmp)
{
i_mfc0(p, tmp, C0_INDEX);
- i_nop(p); /* cp0 delay */
- il_bltz(p, r, tmp, label_r3000_write_probe_fail);
- i_nop(p); /* branch delay */
- i_tlbwi(p);
- il_b(p, r, label_r3000_write_probe_ok);
- i_nop(p); /* branch delay */
+ i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
+ il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
+ i_mfc0(p, tmp, C0_EPC); /* branch delay */
+ i_tlbwi(p); /* cp0 delay */
+ i_jr(p, tmp);
+ i_rfe(p); /* branch delay */
l_r3000_write_probe_fail(l, *p);
- i_tlbwr(p);
- l_r3000_write_probe_ok(l, *p);
+ i_tlbwr(p); /* cp0 delay */
+ i_jr(p, tmp);
+ i_rfe(p); /* branch delay */
}
static void __init
@@ -1461,17 +1483,7 @@ build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
i_andi(p, pte, pte, 0xffc); /* load delay */
i_addu(p, ptr, ptr, pte);
i_lw(p, pte, 0, ptr);
- i_nop(p); /* load delay */
- i_tlbp(p);
-}
-
-static void __init
-build_r3000_tlbchange_handler_tail(u32 **p, unsigned int tmp)
-{
- i_mfc0(p, tmp, C0_EPC);
- i_nop(p); /* cp0 delay */
- i_jr(p, tmp);
- i_rfe(p); /* branch delay */
+ i_tlbp(p); /* load delay */
}
static void __init build_r3000_tlb_load_handler(void)
@@ -1486,10 +1498,9 @@ static void __init build_r3000_tlb_load_handler(void)
build_r3000_tlbchange_handler_head(&p, K0, K1);
build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl);
+ i_nop(&p); /* load delay */
build_make_valid(&p, &r, K0, K1);
- build_r3000_pte_reload(&p, K1);
- build_r3000_tlb_write(&p, &l, &r, K0);
- build_r3000_tlbchange_handler_tail(&p, K0);
+ build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
l_nopage_tlbl(&l, p);
i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
@@ -1506,13 +1517,10 @@ static void __init build_r3000_tlb_load_handler(void)
{
int i;
- for (i = 0; i < FASTPATH_SIZE; i++)
+ for (i = 0; i < (p - handle_tlbl); i++)
printk("%08x\n", handle_tlbl[i]);
}
#endif
-
- flush_icache_range((unsigned long)handle_tlbl,
- (unsigned long)handle_tlbl + FASTPATH_SIZE * sizeof(u32));
}
static void __init build_r3000_tlb_store_handler(void)
@@ -1527,10 +1535,9 @@ static void __init build_r3000_tlb_store_handler(void)
build_r3000_tlbchange_handler_head(&p, K0, K1);
build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs);
+ i_nop(&p); /* load delay */
build_make_write(&p, &r, K0, K1);
- build_r3000_pte_reload(&p, K1);
- build_r3000_tlb_write(&p, &l, &r, K0);
- build_r3000_tlbchange_handler_tail(&p, K0);
+ build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
l_nopage_tlbs(&l, p);
i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
@@ -1547,13 +1554,10 @@ static void __init build_r3000_tlb_store_handler(void)
{
int i;
- for (i = 0; i < FASTPATH_SIZE; i++)
+ for (i = 0; i < (p - handle_tlbs); i++)
printk("%08x\n", handle_tlbs[i]);
}
#endif
-
- flush_icache_range((unsigned long)handle_tlbs,
- (unsigned long)handle_tlbs + FASTPATH_SIZE * sizeof(u32));
}
static void __init build_r3000_tlb_modify_handler(void)
@@ -1568,10 +1572,9 @@ static void __init build_r3000_tlb_modify_handler(void)
build_r3000_tlbchange_handler_head(&p, K0, K1);
build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm);
+ i_nop(&p); /* load delay */
build_make_write(&p, &r, K0, K1);
- build_r3000_pte_reload(&p, K1);
- i_tlbwi(&p);
- build_r3000_tlbchange_handler_tail(&p, K0);
+ build_r3000_pte_reload_tlbwi(&p, K0, K1);
l_nopage_tlbm(&l, p);
i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
@@ -1588,13 +1591,10 @@ static void __init build_r3000_tlb_modify_handler(void)
{
int i;
- for (i = 0; i < FASTPATH_SIZE; i++)
+ for (i = 0; i < (p - handle_tlbm); i++)
printk("%08x\n", handle_tlbm[i]);
}
#endif
-
- flush_icache_range((unsigned long)handle_tlbm,
- (unsigned long)handle_tlbm + FASTPATH_SIZE * sizeof(u32));
}
/*
@@ -1620,7 +1620,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct label **l,
#ifdef CONFIG_SMP
l_smp_pgtable_change(l, *p);
# endif
- iPTE_LW(p, l, pte, 0, ptr); /* get even pte */
+ iPTE_LW(p, l, pte, ptr); /* get even pte */
build_tlb_probe_entry(p);
}
@@ -1680,13 +1680,10 @@ static void __init build_r4000_tlb_load_handler(void)
{
int i;
- for (i = 0; i < FASTPATH_SIZE; i++)
+ for (i = 0; i < (p - handle_tlbl); i++)
printk("%08x\n", handle_tlbl[i]);
}
#endif
-
- flush_icache_range((unsigned long)handle_tlbl,
- (unsigned long)handle_tlbl + FASTPATH_SIZE * sizeof(u32));
}
static void __init build_r4000_tlb_store_handler(void)
@@ -1719,13 +1716,10 @@ static void __init build_r4000_tlb_store_handler(void)
{
int i;
- for (i = 0; i < FASTPATH_SIZE; i++)
+ for (i = 0; i < (p - handle_tlbs); i++)
printk("%08x\n", handle_tlbs[i]);
}
#endif
-
- flush_icache_range((unsigned long)handle_tlbs,
- (unsigned long)handle_tlbs + FASTPATH_SIZE * sizeof(u32));
}
static void __init build_r4000_tlb_modify_handler(void)
@@ -1759,13 +1753,10 @@ static void __init build_r4000_tlb_modify_handler(void)
{
int i;
- for (i = 0; i < FASTPATH_SIZE; i++)
+ for (i = 0; i < (p - handle_tlbm); i++)
printk("%08x\n", handle_tlbm[i]);
}
#endif
-
- flush_icache_range((unsigned long)handle_tlbm,
- (unsigned long)handle_tlbm + FASTPATH_SIZE * sizeof(u32));
}
void __init build_tlb_refill_handler(void)
@@ -1813,3 +1804,13 @@ void __init build_tlb_refill_handler(void)
}
}
}
+
+void __init flush_tlb_handlers(void)
+{
+ flush_icache_range((unsigned long)handle_tlbl,
+ (unsigned long)handle_tlbl + sizeof(handle_tlbl));
+ flush_icache_range((unsigned long)handle_tlbs,
+ (unsigned long)handle_tlbs + sizeof(handle_tlbs));
+ flush_icache_range((unsigned long)handle_tlbm,
+ (unsigned long)handle_tlbm + sizeof(handle_tlbm));
+}