From 485f0720c3e0f57deac403acfbf078a89baeb6ba Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 20 Aug 2009 14:02:20 +0900 Subject: sh: DSP save/restore ordering and a0 sign extension fixups. As an excellent indicator of how much testing the DSP code gets, a couple of rather glaring bugs in the DSP save/restore paths were found: - In the DSP restore case a0 needs to be popped off before a0g, or the value of a0g is clobbered by the MSB of a0 in the case of sign extension. - Beyond that, the save and restore orders were out of sync, so this fixes that up as well. At the same time, we switch over to using movs.l for both the save and restore of the general DSP registers as opposed to using sts.l (which was initially put in place to work around a bug in ancient binutils versions which the kernel no longer supports). Reported-by: Chee Soon Yip Cc: Chu Lih Kwek , Cc: General Lai , Cc: Robert Cozens Signed-off-by: Paul Mundt --- arch/sh/include/asm/system_32.h | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'arch/sh/include/asm/system_32.h') diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h index 6c68a51f1cc..81e0d668eef 100644 --- a/arch/sh/include/asm/system_32.h +++ b/arch/sh/include/asm/system_32.h @@ -14,12 +14,12 @@ do { \ (u32 *)&tsk->thread.dsp_status; \ __asm__ __volatile__ ( \ ".balign 4\n\t" \ + "movs.l @r2+, a0\n\t" \ "movs.l @r2+, a1\n\t" \ "movs.l @r2+, a0g\n\t" \ "movs.l @r2+, a1g\n\t" \ "movs.l @r2+, m0\n\t" \ "movs.l @r2+, m1\n\t" \ - "movs.l @r2+, a0\n\t" \ "movs.l @r2+, x0\n\t" \ "movs.l @r2+, x1\n\t" \ "movs.l @r2+, y0\n\t" \ @@ -39,20 +39,20 @@ do { \ \ __asm__ __volatile__ ( \ ".balign 4\n\t" \ - "stc.l mod, @-r2\n\t" \ + "stc.l mod, @-r2\n\t" \ "stc.l re, @-r2\n\t" \ "stc.l rs, @-r2\n\t" \ - "sts.l dsr, @-r2\n\t" \ - "sts.l y1, @-r2\n\t" \ - "sts.l y0, @-r2\n\t" \ - "sts.l x1, @-r2\n\t" \ - "sts.l x0, @-r2\n\t" \ - "sts.l a0, @-r2\n\t" \ - ".word 0xf653 ! movs.l a1, @-r2\n\t" \ - ".word 0xf6f3 ! movs.l a0g, @-r2\n\t" \ - ".word 0xf6d3 ! movs.l a1g, @-r2\n\t" \ - ".word 0xf6c3 ! movs.l m0, @-r2\n\t" \ - ".word 0xf6e3 ! movs.l m1, @-r2\n\t" \ + "sts.l dsr, @-r2\n\t" \ + "movs.l y1, @-r2\n\t" \ + "movs.l y0, @-r2\n\t" \ + "movs.l x1, @-r2\n\t" \ + "movs.l x0, @-r2\n\t" \ + "movs.l m1, @-r2\n\t" \ + "movs.l m0, @-r2\n\t" \ + "movs.l a1g, @-r2\n\t" \ + "movs.l a0g, @-r2\n\t" \ + "movs.l a1, @-r2\n\t" \ + "movs.l a0, @-r2\n\t" \ : : "r" (__ts2)); \ } while (0) -- cgit v1.2.3 From 4aa5ac4ef44dd8c986241c54298abd7910b78b3f Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Fri, 28 Aug 2009 21:37:20 +0000 Subject: sh: Only shout about fixing up unexpected unaligned accesses Some unaligned accesses are completely expected. For example, the trapped_io code uses the unaligned access fixup code path so there's no need to warn about having to fixup the unaligned access. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/system_32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/include/asm/system_32.h') diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h index 81e0d668eef..d3ab269386b 100644 --- a/arch/sh/include/asm/system_32.h +++ b/arch/sh/include/asm/system_32.h @@ -199,7 +199,7 @@ do { \ #endif int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, - struct mem_access *ma); + struct mem_access *ma, int); asmlinkage void do_address_error(struct pt_regs *regs, unsigned long writeaccess, -- cgit v1.2.3