aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2006-11-20 18:45:06 +0100
committerPaul Mackerras <paulus@samba.org>2006-12-04 20:39:59 +1100
commit932f535dd4c83dc3eb631c2cee1dfd6ae289b88c (patch)
treea5fcb59665a421867de33f53b9f63ed1fefa5268 /arch/powerpc
parent5c3ecd659bd20cda214a402a3132c790cc886cd2 (diff)
[POWERPC] spufs: Always map local store non-guarded
When fixing spufs to map the 'mem' file backing store cacheable, I incorrectly set the physical mapping to use both cache-inhibited and guarded mapping, which resulted in a serious performance degradation. Debugged-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 55d7e0f4bb3..1c1af71d19c 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -105,11 +105,11 @@ spufs_mem_mmap_nopage(struct vm_area_struct *vma,
if (ctx->state == SPU_STATE_SAVED) {
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- & ~(_PAGE_NO_CACHE | _PAGE_GUARDED));
+ & ~_PAGE_NO_CACHE);
page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
} else {
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ | _PAGE_NO_CACHE);
page = pfn_to_page((ctx->spu->local_store_phys + offset)
>> PAGE_SHIFT);
}