From 2277ab4a1df50e05bc732fe9488d4e902bb8399a Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 22 Jul 2009 19:20:49 +0900 Subject: sh: Migrate from PG_mapped to PG_dcache_dirty. This inverts the delayed dcache flush a bit to be more in line with other platforms. At the same time this also gives us the ability to do some more optimizations and cleanup. Now that the update_mmu_cache() callsite only tests for the bit, the implementation can gradually be split out and made generic, rather than relying on special implementations for each of the peculiar CPU types. SH7705 in 32kB mode and SH-4 still need slightly different handling, but this is something that can remain isolated in the varying page copy/clear routines. On top of that, SH-X3 is dcache coherent, so there is no need to bother with any of these tests in the PTEAEX version of update_mmu_cache(), so we kill that off too. Signed-off-by: Paul Mundt --- arch/sh/mm/tlb-sh4.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) (limited to 'arch/sh/mm/tlb-sh4.c') diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index f0c7b7397fa..cf50082d243 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c @@ -21,27 +21,26 @@ void update_mmu_cache(struct vm_area_struct * vma, unsigned long flags; unsigned long pteval; unsigned long vpn; + unsigned long pfn = pte_pfn(pte); + struct page *page; /* Ptrace may call this routine. */ if (vma && current->active_mm != vma->vm_mm) return; -#ifndef CONFIG_CACHE_OFF - { - unsigned long pfn = pte_pfn(pte); + page = pfn_to_page(pfn); + if (pfn_valid(pfn) && page_mapping(page)) { +#ifndef CONFIG_SMP + int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); + if (dirty) { - if (pfn_valid(pfn)) { - struct page *page = pfn_to_page(pfn); + unsigned long addr = (unsigned long)page_address(page); - if (!test_bit(PG_mapped, &page->flags)) { - unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; - __flush_wback_region((void *)P1SEGADDR(phys), - PAGE_SIZE); - __set_bit(PG_mapped, &page->flags); - } + if (pages_do_alias(addr, address & PAGE_MASK)) + __flush_wback_region((void *)addr, PAGE_SIZE); } - } #endif + } local_irq_save(flags); -- cgit v1.2.3 From 9cef7492696a416663b4edb953a4eade8517ebeb Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 29 Jul 2009 00:12:17 +0900 Subject: sh: update_mmu_cache() consolidation. This splits out a separate __update_cache()/__update_tlb() for update_mmu_cache() to wrap in to. This lets us share the common __update_cache() bits while keeping special __update_tlb() handling broken out. Signed-off-by: Paul Mundt --- arch/sh/mm/tlb-sh4.c | 29 ++++++----------------------- 1 file changed, 6 insertions(+), 23 deletions(-) (limited to 'arch/sh/mm/tlb-sh4.c') diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index cf50082d243..81199f1e594 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c @@ -15,33 +15,16 @@ #include #include -void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) +void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { - unsigned long flags; - unsigned long pteval; - unsigned long vpn; - unsigned long pfn = pte_pfn(pte); - struct page *page; + unsigned long flags, pteval, vpn; - /* Ptrace may call this routine. */ - if (vma && current->active_mm != vma->vm_mm) + /* + * Handle debugger faulting in for debugee. + */ + if (current->active_mm != vma->vm_mm) return; - page = pfn_to_page(pfn); - if (pfn_valid(pfn) && page_mapping(page)) { -#ifndef CONFIG_SMP - int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); - if (dirty) { - - unsigned long addr = (unsigned long)page_address(page); - - if (pages_do_alias(addr, address & PAGE_MASK)) - __flush_wback_region((void *)addr, PAGE_SIZE); - } -#endif - } - local_irq_save(flags); /* Set PTEH register */ -- cgit v1.2.3 From 3ed6e129390fb872c3b7e05a232e5d380fbdfb48 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 29 Jul 2009 22:06:58 +0900 Subject: sh: Handle a NULL vma in __update_tlb() for the fast-path. The TLB miss fast-path presently calls in to update_mmu_cache() to set up the entry, and does so with a NULL vma. Check for vma validity in the __update_tlb() ptrace checks. Signed-off-by: Paul Mundt --- arch/sh/mm/tlb-sh4.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/mm/tlb-sh4.c') diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index 81199f1e594..7d3c63e707a 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c @@ -22,7 +22,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) /* * Handle debugger faulting in for debugee. */ - if (current->active_mm != vma->vm_mm) + if (vma && current->active_mm != vma->vm_mm) return; local_irq_save(flags); -- cgit v1.2.3 From 6503fe4a6508673c15a509ec4ac3ca5979ae9593 Mon Sep 17 00:00:00 2001 From: Michael Trimarchi Date: Thu, 20 Aug 2009 13:27:44 +0900 Subject: sh: Better description of SH-4 PTEA register update. Signed-off-by: Michael Trimarchi Signed-off-by: Paul Mundt --- arch/sh/mm/tlb-sh4.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'arch/sh/mm/tlb-sh4.c') diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index f0c7b7397fa..fd0d11f1a81 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c @@ -61,9 +61,12 @@ void update_mmu_cache(struct vm_area_struct * vma, */ ctrl_outl(pte.pte_high, MMU_PTEA); #else - if (cpu_data->flags & CPU_HAS_PTEA) - /* TODO: make this look less hacky */ - ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA); + if (cpu_data->flags & CPU_HAS_PTEA) { + /* The last 3 bits and the first one of pteval contains + * the PTEA timing control and space attribute bits + */ + ctrl_outl(copy_ptea_attributes(pteval), MMU_PTEA); + } #endif /* Set PTEL register */ -- cgit v1.2.3