aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-09-21 07:53:38 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-21 07:53:38 -0700
commit2fe9f798ba3cf7c939e638b78f46975e79039978 (patch)
treef65153c806f68bd6e965d65d7c7e4104b27fff90 /arch
parent76abf3e71c5a4d315ec95e23e5aedd15fbfcd6df (diff)
parenta131430c200f6bda313bf5d0a8e238c41afdfe0a (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc64/kernel/entry.S39
-rw-r--r--arch/sparc64/kernel/ptrace.c7
-rw-r--r--arch/sparc64/kernel/una_asm.S2
-rw-r--r--arch/sparc64/kernel/unaligned.c64
4 files changed, 81 insertions, 31 deletions
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 3e0badb820c..b4834952785 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -42,19 +42,15 @@
* executing (see inherit_locked_prom_mappings() rant).
*/
sparc64_vpte_nucleus:
- /* Load 0xf0000000, which is LOW_OBP_ADDRESS. */
- mov 0xf, %g5
- sllx %g5, 28, %g5
-
- /* Is addr >= LOW_OBP_ADDRESS? */
+ /* Note that kvmap below has verified that the address is
+ * in the range MODULES_VADDR --> VMALLOC_END already. So
+ * here we need only check if it is an OBP address or not.
+ */
+ sethi %hi(LOW_OBP_ADDRESS), %g5
cmp %g4, %g5
blu,pn %xcc, sparc64_vpte_patchme1
mov 0x1, %g5
-
- /* Load 0x100000000, which is HI_OBP_ADDRESS. */
sllx %g5, 32, %g5
-
- /* Is addr < HI_OBP_ADDRESS? */
cmp %g4, %g5
blu,pn %xcc, obp_iaddr_patch
nop
@@ -156,26 +152,29 @@ obp_daddr_patch:
* rather, use information saved during inherit_prom_mappings() using 8k
* pagesize.
*/
+ .align 32
kvmap:
- /* Load 0xf0000000, which is LOW_OBP_ADDRESS. */
- mov 0xf, %g5
- sllx %g5, 28, %g5
+ sethi %hi(MODULES_VADDR), %g5
+ cmp %g4, %g5
+ blu,pn %xcc, longpath
+ mov (VMALLOC_END >> 24), %g5
+ sllx %g5, 24, %g5
+ cmp %g4, %g5
+ bgeu,pn %xcc, longpath
+ nop
- /* Is addr >= LOW_OBP_ADDRESS? */
+kvmap_check_obp:
+ sethi %hi(LOW_OBP_ADDRESS), %g5
cmp %g4, %g5
- blu,pn %xcc, vmalloc_addr
+ blu,pn %xcc, kvmap_vmalloc_addr
mov 0x1, %g5
-
- /* Load 0x100000000, which is HI_OBP_ADDRESS. */
sllx %g5, 32, %g5
-
- /* Is addr < HI_OBP_ADDRESS? */
cmp %g4, %g5
blu,pn %xcc, obp_daddr_patch
nop
-vmalloc_addr:
- /* If we get here, a vmalloc addr accessed, load kernel VPTE. */
+kvmap_vmalloc_addr:
+ /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */
ldxa [%g3 + %g6] ASI_N, %g5
brgez,pn %g5, longpath
nop
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
index 23ad839d113..5efbff90d66 100644
--- a/arch/sparc64/kernel/ptrace.c
+++ b/arch/sparc64/kernel/ptrace.c
@@ -30,6 +30,7 @@
#include <asm/psrcompat.h>
#include <asm/visasm.h>
#include <asm/spitfire.h>
+#include <asm/page.h>
/* Returning from ptrace is a bit tricky because the syscall return
* low level code assumes any value returned which is negative and
@@ -128,20 +129,20 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
* is mapped to in the user's address space, we can skip the
* D-cache flush.
*/
- if ((uaddr ^ kaddr) & (1UL << 13)) {
+ if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) {
unsigned long start = __pa(kaddr);
unsigned long end = start + len;
if (tlb_type == spitfire) {
for (; start < end; start += 32)
- spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
+ spitfire_put_dcache_tag(start & 0x3fe0, 0x0);
} else {
for (; start < end; start += 32)
__asm__ __volatile__(
"stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* no outputs */
- : "r" (va),
+ : "r" (start),
"i" (ASI_DCACHE_INVALIDATE));
}
}
diff --git a/arch/sparc64/kernel/una_asm.S b/arch/sparc64/kernel/una_asm.S
index cbb40585253..da48400bcc9 100644
--- a/arch/sparc64/kernel/una_asm.S
+++ b/arch/sparc64/kernel/una_asm.S
@@ -17,7 +17,7 @@ kernel_unaligned_trap_fault:
__do_int_store:
rd %asi, %o4
wr %o3, 0, %asi
- ldx [%o2], %g3
+ mov %o2, %g3
cmp %o1, 2
be,pn %icc, 2f
cmp %o1, 4
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c
index da9739f0d43..42718f6a7d3 100644
--- a/arch/sparc64/kernel/unaligned.c
+++ b/arch/sparc64/kernel/unaligned.c
@@ -184,13 +184,14 @@ extern void do_int_load(unsigned long *dest_reg, int size,
unsigned long *saddr, int is_signed, int asi);
extern void __do_int_store(unsigned long *dst_addr, int size,
- unsigned long *src_val, int asi);
+ unsigned long src_val, int asi);
static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr,
- struct pt_regs *regs, int asi)
+ struct pt_regs *regs, int asi, int orig_asi)
{
unsigned long zero = 0;
- unsigned long *src_val = &zero;
+ unsigned long *src_val_p = &zero;
+ unsigned long src_val;
if (size == 16) {
size = 8;
@@ -198,7 +199,25 @@ static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr,
(unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
(unsigned)fetch_reg(reg_num + 1, regs);
} else if (reg_num) {
- src_val = fetch_reg_addr(reg_num, regs);
+ src_val_p = fetch_reg_addr(reg_num, regs);
+ }
+ src_val = *src_val_p;
+ if (unlikely(asi != orig_asi)) {
+ switch (size) {
+ case 2:
+ src_val = swab16(src_val);
+ break;
+ case 4:
+ src_val = swab32(src_val);
+ break;
+ case 8:
+ src_val = swab64(src_val);
+ break;
+ case 16:
+ default:
+ BUG();
+ break;
+ };
}
__do_int_store(dst_addr, size, src_val, asi);
}
@@ -276,6 +295,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
kernel_mna_trap_fault();
} else {
unsigned long addr;
+ int orig_asi, asi;
addr = compute_effective_address(regs, insn,
((insn >> 25) & 0x1f));
@@ -285,18 +305,48 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
regs->tpc, dirstrings[dir], addr, size,
regs->u_regs[UREG_RETPC]);
#endif
+ orig_asi = asi = decode_asi(insn, regs);
+ switch (asi) {
+ case ASI_NL:
+ case ASI_AIUPL:
+ case ASI_AIUSL:
+ case ASI_PL:
+ case ASI_SL:
+ case ASI_PNFL:
+ case ASI_SNFL:
+ asi &= ~0x08;
+ break;
+ };
switch (dir) {
case load:
do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
size, (unsigned long *) addr,
- decode_signedness(insn),
- decode_asi(insn, regs));
+ decode_signedness(insn), asi);
+ if (unlikely(asi != orig_asi)) {
+ unsigned long val_in = *(unsigned long *) addr;
+ switch (size) {
+ case 2:
+ val_in = swab16(val_in);
+ break;
+ case 4:
+ val_in = swab32(val_in);
+ break;
+ case 8:
+ val_in = swab64(val_in);
+ break;
+ case 16:
+ default:
+ BUG();
+ break;
+ };
+ *(unsigned long *) addr = val_in;
+ }
break;
case store:
do_int_store(((insn>>25)&0x1f), size,
(unsigned long *) addr, regs,
- decode_asi(insn, regs));
+ asi, orig_asi);
break;
default: