diff options
Diffstat (limited to 'arch/sh/include/asm')
39 files changed, 1029 insertions, 158 deletions
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index 43910cdf78a..e121c30f797 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild @@ -1,6 +1,6 @@ include include/asm-generic/Kbuild.asm -header-y += cpu-features.h +header-y += cachectl.h cpu-features.h unifdef-y += unistd_32.h unifdef-y += unistd_64.h diff --git a/arch/sh/include/asm/bug.h b/arch/sh/include/asm/bug.h index c0171804016..d02c01b3e6b 100644 --- a/arch/sh/include/asm/bug.h +++ b/arch/sh/include/asm/bug.h @@ -2,6 +2,7 @@ #define __ASM_SH_BUG_H #define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */ +#define BUGFLAG_UNWINDER (1 << 1) #ifdef CONFIG_GENERIC_BUG #define HAVE_ARCH_BUG @@ -72,6 +73,36 @@ do { \ unlikely(__ret_warn_on); \ }) +#define UNWINDER_BUG() \ +do { \ + __asm__ __volatile__ ( \ + "1:\t.short %O0\n" \ + _EMIT_BUG_ENTRY \ + : \ + : "n" (TRAPA_BUG_OPCODE), \ + "i" (__FILE__), \ + "i" (__LINE__), \ + "i" (BUGFLAG_UNWINDER), \ + "i" (sizeof(struct bug_entry))); \ +} while (0) + +#define UNWINDER_BUG_ON(x) ({ \ + int __ret_unwinder_on = !!(x); \ + if (__builtin_constant_p(__ret_unwinder_on)) { \ + if (__ret_unwinder_on) \ + UNWINDER_BUG(); \ + } else { \ + if (unlikely(__ret_unwinder_on)) \ + UNWINDER_BUG(); \ + } \ + unlikely(__ret_unwinder_on); \ +}) + +#else + +#define UNWINDER_BUG BUG +#define UNWINDER_BUG_ON BUG_ON + #endif /* CONFIG_GENERIC_BUG */ #include <asm-generic/bug.h> diff --git a/arch/sh/include/asm/bugs.h b/arch/sh/include/asm/bugs.h index 4924ff6f543..46260fcbdf4 100644 --- a/arch/sh/include/asm/bugs.h +++ b/arch/sh/include/asm/bugs.h @@ -21,25 +21,25 @@ static void __init check_bugs(void) current_cpu_data.loops_per_jiffy = loops_per_jiffy; - switch (current_cpu_data.type) { - case CPU_SH7619: + switch (current_cpu_data.family) { + case CPU_FAMILY_SH2: *p++ = '2'; break; - case CPU_SH7201 ... CPU_MXG: + case CPU_FAMILY_SH2A: *p++ = '2'; *p++ = 'a'; break; - case CPU_SH7705 ... CPU_SH7729: + case CPU_FAMILY_SH3: *p++ = '3'; break; - case CPU_SH7750 ... CPU_SH4_501: + case CPU_FAMILY_SH4: *p++ = '4'; break; - case CPU_SH7763 ... CPU_SHX3: + case CPU_FAMILY_SH4A: *p++ = '4'; *p++ = 'a'; break; - case CPU_SH7343 ... CPU_SH7366: + case CPU_FAMILY_SH4AL_DSP: *p++ = '4'; *p++ = 'a'; *p++ = 'l'; @@ -48,15 +48,15 @@ static void __init check_bugs(void) *p++ = 's'; *p++ = 'p'; break; - case CPU_SH5_101 ... CPU_SH5_103: + case CPU_FAMILY_SH5: *p++ = '6'; *p++ = '4'; break; - case CPU_SH_NONE: + case CPU_FAMILY_UNKNOWN: /* - * Specifically use CPU_SH_NONE rather than default:, - * so we're able to have the compiler whine about - * unhandled enumerations. + * Specifically use CPU_FAMILY_UNKNOWN rather than + * default:, so we're able to have the compiler whine + * about unhandled enumerations. */ break; } diff --git a/arch/sh/include/asm/cachectl.h b/arch/sh/include/asm/cachectl.h new file mode 100644 index 00000000000..6ffb4b7a212 --- /dev/null +++ b/arch/sh/include/asm/cachectl.h @@ -0,0 +1,19 @@ +#ifndef _SH_CACHECTL_H +#define _SH_CACHECTL_H + +/* Definitions for the cacheflush system call. */ + +#define CACHEFLUSH_D_INVAL 0x1 /* invalidate (without write back) */ +#define CACHEFLUSH_D_WB 0x2 /* write back (without invalidate) */ +#define CACHEFLUSH_D_PURGE 0x3 /* writeback and invalidate */ + +#define CACHEFLUSH_I 0x4 + +/* + * Options for cacheflush system call + */ +#define ICACHE CACHEFLUSH_I /* flush instruction cache */ +#define DCACHE CACHEFLUSH_D_PURGE /* writeback and flush data cache */ +#define BCACHE (ICACHE|DCACHE) /* flush both caches */ + +#endif /* _SH_CACHECTL_H */ diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index 4c5462daa74..c29918f3c81 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h @@ -3,45 +3,65 @@ #ifdef __KERNEL__ -#ifdef CONFIG_CACHE_OFF +#include <linux/mm.h> + /* - * Nothing to do when the cache is disabled, initial flush and explicit - * disabling is handled at CPU init time. + * Cache flushing: + * + * - flush_cache_all() flushes entire cache + * - flush_cache_mm(mm) flushes the specified mm context's cache lines + * - flush_cache_dup mm(mm) handles cache flushing when forking + * - flush_cache_page(mm, vmaddr, pfn) flushes a single page + * - flush_cache_range(vma, start, end) flushes a range of pages * - * See arch/sh/kernel/cpu/init.c:cache_init(). + * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache + * - flush_icache_range(start, end) flushes(invalidates) a range for icache + * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache + * - flush_cache_sigtramp(vaddr) flushes the signal trampoline */ -#define p3_cache_init() do { } while (0) -#define flush_cache_all() do { } while (0) -#define flush_cache_mm(mm) do { } while (0) -#define flush_cache_dup_mm(mm) do { } while (0) -#define flush_cache_range(vma, start, end) do { } while (0) -#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) -#define flush_dcache_page(page) do { } while (0) -#define flush_icache_range(start, end) do { } while (0) -#define flush_icache_page(vma,pg) do { } while (0) -#define flush_dcache_mmap_lock(mapping) do { } while (0) -#define flush_dcache_mmap_unlock(mapping) do { } while (0) -#define flush_cache_sigtramp(vaddr) do { } while (0) -#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) -#define __flush_wback_region(start, size) do { (void)(start); } while (0) -#define __flush_purge_region(start, size) do { (void)(start); } while (0) -#define __flush_invalidate_region(start, size) do { (void)(start); } while (0) -#else -#include <cpu/cacheflush.h> +extern void (*local_flush_cache_all)(void *args); +extern void (*local_flush_cache_mm)(void *args); +extern void (*local_flush_cache_dup_mm)(void *args); +extern void (*local_flush_cache_page)(void *args); +extern void (*local_flush_cache_range)(void *args); +extern void (*local_flush_dcache_page)(void *args); +extern void (*local_flush_icache_range)(void *args); +extern void (*local_flush_icache_page)(void *args); +extern void (*local_flush_cache_sigtramp)(void *args); -/* - * Consistent DMA requires that the __flush_xxx() primitives must be set - * for any of the enabled non-coherent caches (most of the UP CPUs), - * regardless of PIPT or VIPT cache configurations. - */ +static inline void cache_noop(void *args) { } + +extern void (*__flush_wback_region)(void *start, int size); +extern void (*__flush_purge_region)(void *start, int size); +extern void (*__flush_invalidate_region)(void *start, int size); + +extern void flush_cache_all(void); +extern void flush_cache_mm(struct mm_struct *mm); +extern void flush_cache_dup_mm(struct mm_struct *mm); +extern void flush_cache_page(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn); +extern void flush_cache_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +extern void flush_dcache_page(struct page *page); +extern void flush_icache_range(unsigned long start, unsigned long end); +extern void flush_icache_page(struct vm_area_struct *vma, + struct page *page); +extern void flush_cache_sigtramp(unsigned long address); + +struct flusher_data { + struct vm_area_struct *vma; + unsigned long addr1, addr2; +}; -/* Flush (write-back only) a region (smaller than a page) */ -extern void __flush_wback_region(void *start, int size); -/* Flush (write-back & invalidate) a region (smaller than a page) */ -extern void __flush_purge_region(void *start, int size); -/* Flush (invalidate only) a region (smaller than a page) */ -extern void __flush_invalidate_region(void *start, int size); -#endif +#define ARCH_HAS_FLUSH_ANON_PAGE +extern void __flush_anon_page(struct page *page, unsigned long); + +static inline void flush_anon_page(struct vm_area_struct *vma, + struct page *page, unsigned long vmaddr) +{ + if (boot_cpu_data.dcache.n_aliases && PageAnon(page)) + __flush_anon_page(page, vmaddr); +} #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE static inline void flush_kernel_dcache_page(struct page *page) @@ -49,7 +69,6 @@ static inline void flush_kernel_dcache_page(struct page *page) flush_dcache_page(page); } -#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_CACHE_OFF) extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len); @@ -57,23 +76,20 @@ extern void copy_to_user_page(struct vm_area_struct *vma, extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len); -#else -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ - do { \ - flush_cache_page(vma, vaddr, page_to_pfn(page));\ - memcpy(dst, src, len); \ - flush_icache_user_range(vma, page, vaddr, len); \ - } while (0) - -#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ - do { \ - flush_cache_page(vma, vaddr, page_to_pfn(page));\ - memcpy(dst, src, len); \ - } while (0) -#endif #define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all() +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) + +void kmap_coherent_init(void); +void *kmap_coherent(struct page *page, unsigned long addr); +void kunmap_coherent(void *kvaddr); + +#define PG_dcache_dirty PG_arch_1 + +void cpu_cache_init(void); + #endif /* __KERNEL__ */ #endif /* __ASM_SH_CACHEFLUSH_H */ diff --git a/arch/sh/include/asm/device.h b/arch/sh/include/asm/device.h index 8688a88303e..b16debfe8c1 100644 --- a/arch/sh/include/asm/device.h +++ b/arch/sh/include/asm/device.h @@ -3,7 +3,9 @@ * * This file is released under the GPLv2 */ -#include <asm-generic/device.h> + +struct dev_archdata { +}; struct platform_device; /* allocate contiguous memory chunk and fill in struct resource */ @@ -12,3 +14,15 @@ int platform_resource_setup_memory(struct platform_device *pdev, void plat_early_device_setup(void); +#define PDEV_ARCHDATA_FLAG_INIT 0 +#define PDEV_ARCHDATA_FLAG_IDLE 1 +#define PDEV_ARCHDATA_FLAG_SUSP 2 + +struct pdev_archdata { + int hwblk_id; +#ifdef CONFIG_PM_RUNTIME + unsigned long flags; + struct list_head entry; + struct mutex mutex; +#endif +}; diff --git a/arch/sh/include/asm/dma-sh.h b/arch/sh/include/asm/dma-sh.h index 0c8f8e14622..68a5f4cb034 100644 --- a/arch/sh/include/asm/dma-sh.h +++ b/arch/sh/include/asm/dma-sh.h @@ -16,6 +16,7 @@ /* DMAOR contorl: The DMAOR access size is different by CPU.*/ #if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ + defined(CONFIG_CPU_SUBTYPE_SH7724) || \ defined(CONFIG_CPU_SUBTYPE_SH7780) || \ defined(CONFIG_CPU_SUBTYPE_SH7785) #define dmaor_read_reg(n) \ diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h new file mode 100644 index 00000000000..ced6795891a --- /dev/null +++ b/arch/sh/include/asm/dwarf.h @@ -0,0 +1,398 @@ +/* + * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + */ +#ifndef __ASM_SH_DWARF_H +#define __ASM_SH_DWARF_H + +#ifdef CONFIG_DWARF_UNWINDER + +/* + * DWARF expression operations + */ +#define DW_OP_addr 0x03 +#define DW_OP_deref 0x06 +#define DW_OP_const1u 0x08 +#define DW_OP_const1s 0x09 +#define DW_OP_const2u 0x0a +#define DW_OP_const2s 0x0b +#define DW_OP_const4u 0x0c +#define DW_OP_const4s 0x0d +#define DW_OP_const8u 0x0e +#define DW_OP_const8s 0x0f +#define DW_OP_constu 0x10 +#define DW_OP_consts 0x11 +#define DW_OP_dup 0x12 +#define DW_OP_drop 0x13 +#define DW_OP_over 0x14 +#define DW_OP_pick 0x15 +#define DW_OP_swap 0x16 +#define DW_OP_rot 0x17 +#define DW_OP_xderef 0x18 +#define DW_OP_abs 0x19 +#define DW_OP_and 0x1a +#define DW_OP_div 0x1b +#define DW_OP_minus 0x1c +#define DW_OP_mod 0x1d +#define DW_OP_mul 0x1e +#define DW_OP_neg 0x1f +#define DW_OP_not 0x20 +#define DW_OP_or 0x21 +#define DW_OP_plus 0x22 +#define DW_OP_plus_uconst 0x23 +#define DW_OP_shl 0x24 +#define DW_OP_shr 0x25 +#define DW_OP_shra 0x26 +#define DW_OP_xor 0x27 +#define DW_OP_skip 0x2f +#define DW_OP_bra 0x28 +#define DW_OP_eq 0x29 +#define DW_OP_ge 0x2a +#define DW_OP_gt 0x2b +#define DW_OP_le 0x2c +#define DW_OP_lt 0x2d +#define DW_OP_ne 0x2e +#define DW_OP_lit0 0x30 +#define DW_OP_lit1 0x31 +#define DW_OP_lit2 0x32 +#define DW_OP_lit3 0x33 +#define DW_OP_lit4 0x34 +#define DW_OP_lit5 0x35 +#define DW_OP_lit6 0x36 +#define DW_OP_lit7 0x37 +#define DW_OP_lit8 0x38 +#define DW_OP_lit9 0x39 +#define DW_OP_lit10 0x3a +#define DW_OP_lit11 0x3b +#define DW_OP_lit12 0x3c +#define DW_OP_lit13 0x3d +#define DW_OP_lit14 0x3e +#define DW_OP_lit15 0x3f +#define DW_OP_lit16 0x40 +#define DW_OP_lit17 0x41 +#define DW_OP_lit18 0x42 +#define DW_OP_lit19 0x43 +#define DW_OP_lit20 0x44 +#define DW_OP_lit21 0x45 +#define DW_OP_lit22 0x46 +#define DW_OP_lit23 0x47 +#define DW_OP_lit24 0x48 +#define DW_OP_lit25 0x49 +#define DW_OP_lit26 0x4a +#define DW_OP_lit27 0x4b +#define DW_OP_lit28 0x4c +#define DW_OP_lit29 0x4d +#define DW_OP_lit30 0x4e +#define DW_OP_lit31 0x4f +#define DW_OP_reg0 0x50 +#define DW_OP_reg1 0x51 +#define DW_OP_reg2 0x52 +#define DW_OP_reg3 0x53 +#define DW_OP_reg4 0x54 +#define DW_OP_reg5 0x55 +#define DW_OP_reg6 0x56 +#define DW_OP_reg7 0x57 +#define DW_OP_reg8 0x58 +#define DW_OP_reg9 0x59 +#define DW_OP_reg10 0x5a +#define DW_OP_reg11 0x5b +#define DW_OP_reg12 0x5c +#define DW_OP_reg13 0x5d +#define DW_OP_reg14 0x5e +#define DW_OP_reg15 0x5f +#define DW_OP_reg16 0x60 +#define DW_OP_reg17 0x61 +#define DW_OP_reg18 0x62 +#define DW_OP_reg19 0x63 +#define DW_OP_reg20 0x64 +#define DW_OP_reg21 0x65 +#define DW_OP_reg22 0x66 +#define DW_OP_reg23 0x67 +#define DW_OP_reg24 0x68 +#define DW_OP_reg25 0x69 +#define DW_OP_reg26 0x6a +#define DW_OP_reg27 0x6b +#define DW_OP_reg28 0x6c +#define DW_OP_reg29 0x6d +#define DW_OP_reg30 0x6e +#define DW_OP_reg31 0x6f +#define DW_OP_breg0 0x70 +#define DW_OP_breg1 0x71 +#define DW_OP_breg2 0x72 +#define DW_OP_breg3 0x73 +#define DW_OP_breg4 0x74 +#define DW_OP_breg5 0x75 +#define DW_OP_breg6 0x76 +#define DW_OP_breg7 0x77 +#define DW_OP_breg8 0x78 +#define DW_OP_breg9 0x79 +#define DW_OP_breg10 0x7a +#define DW_OP_breg11 0x7b +#define DW_OP_breg12 0x7c +#define DW_OP_breg13 0x7d +#define DW_OP_breg14 0x7e +#define DW_OP_breg15 0x7f +#define DW_OP_breg16 0x80 +#define DW_OP_breg17 0x81 +#define DW_OP_breg18 0x82 +#define DW_OP_breg19 0x83 +#define DW_OP_breg20 0x84 +#define DW_OP_breg21 0x85 +#define DW_OP_breg22 0x86 +#define DW_OP_breg23 0x87 +#define DW_OP_breg24 0x88 +#define DW_OP_breg25 0x89 +#define DW_OP_breg26 0x8a +#define DW_OP_breg27 0x8b +#define DW_OP_breg28 0x8c +#define DW_OP_breg29 0x8d +#define DW_OP_breg30 0x8e +#define DW_OP_breg31 0x8f +#define DW_OP_regx 0x90 +#define DW_OP_fbreg 0x91 +#define DW_OP_bregx 0x92 +#define DW_OP_piece 0x93 +#define DW_OP_deref_size 0x94 +#define DW_OP_xderef_size 0x95 +#define DW_OP_nop 0x96 +#define DW_OP_push_object_address 0x97 +#define DW_OP_call2 0x98 +#define DW_OP_call4 0x99 +#define DW_OP_call_ref 0x9a +#define DW_OP_form_tls_address 0x9b +#define DW_OP_call_frame_cfa 0x9c +#define DW_OP_bit_piece 0x9d +#define DW_OP_lo_user 0xe0 +#define DW_OP_hi_user 0xff + +/* + * Addresses used in FDE entries in the .eh_frame section may be encoded + * using one of the following encodings. + */ +#define DW_EH_PE_absptr 0x00 +#define DW_EH_PE_omit 0xff +#define DW_EH_PE_uleb128 0x01 +#define DW_EH_PE_udata2 0x02 +#define DW_EH_PE_udata4 0x03 +#define DW_EH_PE_udata8 0x04 +#define DW_EH_PE_sleb128 0x09 +#define DW_EH_PE_sdata2 0x0a +#define DW_EH_PE_sdata4 0x0b +#define DW_EH_PE_sdata8 0x0c +#define DW_EH_PE_signed 0x09 + +#define DW_EH_PE_pcrel 0x10 + +/* + * The architecture-specific register number that contains the return + * address in the .debug_frame table. + */ +#define DWARF_ARCH_RA_REG 17 + +#ifndef __ASSEMBLY__ +/* + * Read either the frame pointer (r14) or the stack pointer (r15). + * NOTE: this MUST be inlined. + */ +static __always_inline unsigned long dwarf_read_arch_reg(unsigned int reg) +{ + unsigned long value = 0; + + switch (reg) { + case 14: + __asm__ __volatile__("mov r14, %0\n" : "=r" (value)); + break; + case 15: + __asm__ __volatile__("mov r15, %0\n" : "=r" (value)); + break; + default: + BUG(); + } + + return value; +} + +/** + * dwarf_cie - Common Information Entry + */ +struct dwarf_cie { + unsigned long length; + unsigned long cie_id; + unsigned char version; + const char *augmentation; + unsigned int code_alignment_factor; + int data_alignment_factor; + + /* Which column in the rule table represents return addr of func. */ + unsigned int return_address_reg; + + unsigned char *initial_instructions; + unsigned char *instructions_end; + + unsigned char encoding; + + unsigned long cie_pointer; + + struct list_head link; + + unsigned long flags; +#define DWARF_CIE_Z_AUGMENTATION (1 << 0) +}; + +/** + * dwarf_fde - Frame Description Entry + */ +struct dwarf_fde { + unsigned long length; + unsigned long cie_pointer; + struct dwarf_cie *cie; + unsigned long initial_location; + unsigned long address_range; + unsigned char *instructions; + unsigned char *end; + struct list_head link; +}; + +/** + * dwarf_frame - DWARF information for a frame in the call stack + */ +struct dwarf_frame { + struct dwarf_frame *prev, *next; + + unsigned long pc; + + struct list_head reg_list; + + unsigned long cfa; + + /* Valid when DW_FRAME_CFA_REG_OFFSET is set in flags */ + unsigned int cfa_register; + unsigned int cfa_offset; + + /* Valid when DW_FRAME_CFA_REG_EXP is set in flags */ + unsigned char *cfa_expr; + unsigned int cfa_expr_len; + + unsigned long flags; +#define DWARF_FRAME_CFA_REG_OFFSET (1 << 0) +#define DWARF_FRAME_CFA_REG_EXP (1 << 1) + + unsigned long return_addr; +}; + +/** + * dwarf_reg - DWARF register + * @flags: Describes how to calculate the value of this register + */ +struct dwarf_reg { + struct list_head link; + + unsigned int number; + + unsigned long addr; + unsigned long flags; +#define DWARF_REG_OFFSET (1 << 0) +#define DWARF_VAL_OFFSET (1 << 1) +#define DWARF_UNDEFINED (1 << 2) +}; + +/* + * Call Frame instruction opcodes. + */ +#define DW_CFA_advance_loc 0x40 +#define DW_CFA_offset 0x80 +#define DW_CFA_restore 0xc0 +#define DW_CFA_nop 0x00 +#define DW_CFA_set_loc 0x01 +#define DW_CFA_advance_loc1 0x02 +#define DW_CFA_advance_loc2 0x03 +#define DW_CFA_advance_loc4 0x04 +#define DW_CFA_offset_extended 0x05 +#define DW_CFA_restore_extended 0x06 +#define DW_CFA_undefined 0x07 +#define DW_CFA_same_value 0x08 +#define DW_CFA_register 0x09 +#define DW_CFA_remember_state 0x0a +#define DW_CFA_restore_state 0x0b +#define DW_CFA_def_cfa 0x0c +#define DW_CFA_def_cfa_register 0x0d +#define DW_CFA_def_cfa_offset 0x0e +#define DW_CFA_def_cfa_expression 0x0f +#define DW_CFA_expression 0x10 +#define DW_CFA_offset_extended_sf 0x11 +#define DW_CFA_def_cfa_sf 0x12 +#define DW_CFA_def_cfa_offset_sf 0x13 +#define DW_CFA_val_offset 0x14 +#define DW_CFA_val_offset_sf 0x15 +#define DW_CFA_val_expression 0x16 +#define DW_CFA_lo_user 0x1c +#define DW_CFA_hi_user 0x3f + +/* GNU extension opcodes */ +#define DW_CFA_GNU_args_size 0x2e +#define DW_CFA_GNU_negative_offset_extended 0x2f + +/* + * Some call frame instructions encode their operands in the opcode. We + * need some helper functions to extract both the opcode and operands + * from an instruction. + */ +static inline unsigned int DW_CFA_opcode(unsigned long insn) +{ + return (insn & 0xc0); +} + +static inline unsigned int DW_CFA_operand(unsigned long insn) +{ + return (insn & 0x3f); +} + +#define DW_EH_FRAME_CIE 0 /* .eh_frame CIE IDs are 0 */ +#define DW_CIE_ID 0xffffffff +#define DW64_CIE_ID 0xffffffffffffffffULL + +/* + * DWARF FDE/CIE length field values. + */ +#define DW_EXT_LO 0xfffffff0 +#define DW_EXT_HI 0xffffffff +#define DW_EXT_DWARF64 DW_EXT_HI + +extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, + struct dwarf_frame *); +#endif /* !__ASSEMBLY__ */ + +#define CFI_STARTPROC .cfi_startproc +#define CFI_ENDPROC .cfi_endproc +#define CFI_DEF_CFA .cfi_def_cfa +#define CFI_REGISTER .cfi_register +#define CFI_REL_OFFSET .cfi_rel_offset +#define CFI_UNDEFINED .cfi_undefined + +#else + +/* + * Use the asm comment character to ignore the rest of the line. + */ +#define CFI_IGNORE ! + +#define CFI_STARTPROC CFI_IGNORE +#define CFI_ENDPROC CFI_IGNORE +#define CFI_DEF_CFA CFI_IGNORE +#define CFI_REGISTER CFI_IGNORE +#define CFI_REL_OFFSET CFI_IGNORE +#define CFI_UNDEFINED CFI_IGNORE + +#ifndef __ASSEMBLY__ +static inline void dwarf_unwinder_init(void) +{ +} +#endif + +#endif /* CONFIG_DWARF_UNWINDER */ + +#endif /* __ASM_SH_DWARF_H */ diff --git a/arch/sh/include/asm/entry-macros.S b/arch/sh/include/asm/entry-macros.S index 3a4752a6572..cc43a55e1fc 100644 --- a/arch/sh/include/asm/entry-macros.S +++ b/arch/sh/include/asm/entry-macros.S @@ -7,7 +7,7 @@ .endm .macro sti - mov #0xf0, r11 + mov #0xfffffff0, r11 extu.b r11, r11 not r11, r11 stc sr, r10 @@ -31,8 +31,92 @@ #endif .endm +#ifdef CONFIG_TRACE_IRQFLAGS + + .macro TRACE_IRQS_ON + mov.l r0, @-r15 + mov.l r1, @-r15 + mov.l r2, @-r15 + mov.l r3, @-r15 + mov.l r4, @-r15 + mov.l r5, @-r15 + mov.l r6, @-r15 + mov.l r7, @-r15 + + mov.l 7834f, r0 + jsr @r0 + nop + + mov.l @r15+, r7 + mov.l @r15+, r6 + mov.l @r15+, r5 + mov.l @r15+, r4 + mov.l @r15+, r3 + mov.l @r15+, r2 + mov.l @r15+, r1 + mov.l @r15+, r0 + mov.l 7834f, r0 + + bra 7835f + nop + .balign 4 +7834: .long trace_hardirqs_on +7835: + .endm + .macro TRACE_IRQS_OFF + + mov.l r0, @-r15 + mov.l r1, @-r15 + mov.l r2, @-r15 + mov.l r3, @-r15 + mov.l r4, @-r15 + mov.l r5, @-r15 + mov.l r6, @-r15 + mov.l r7, @-r15 + + mov.l 7834f, r0 + jsr @r0 + nop + + mov.l @r15+, r7 + mov.l @r15+, r6 + mov.l @r15+, r5 + mov.l @r15+, r4 + mov.l @r15+, r3 + mov.l @r15+, r2 + mov.l @r15+, r1 + mov.l @r15+, r0 + mov.l 7834f, r0 + + bra 7835f + nop + .balign 4 +7834: .long trace_hardirqs_off +7835: + .endm + +#else + .macro TRACE_IRQS_ON + .endm + + .macro TRACE_IRQS_OFF + .endm +#endif + #if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4) # define PREF(x) pref @x #else # define PREF(x) nop #endif + + /* + * Macro for use within assembly. Because the DWARF unwinder + * needs to use the frame register to unwind the stack, we + * need to setup r14 with the value of the stack pointer as + * the return address is usually on the stack somewhere. + */ + .macro setup_frame_reg +#ifdef CONFIG_DWARF_UNWINDER + mov r15, r14 +#endif + .endm diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index 8fea7d8c825..12f3a31f20a 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h @@ -4,6 +4,7 @@ #ifdef CONFIG_FUNCTION_TRACER #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ +#define FTRACE_SYSCALL_MAX NR_syscalls #ifndef __ASSEMBLY__ extern void mcount(void); @@ -11,10 +12,13 @@ extern void mcount(void); #define MCOUNT_ADDR ((long)(mcount)) #ifdef CONFIG_DYNAMIC_FTRACE -#define CALLER_ADDR ((long)(ftrace_caller)) +#define CALL_ADDR ((long)(ftrace_call)) #define STUB_ADDR ((long)(ftrace_stub)) +#define GRAPH_ADDR ((long)(ftrace_graph_call)) +#define CALLER_ADDR ((long)(ftrace_caller)) -#define MCOUNT_INSN_OFFSET ((STUB_ADDR - CALLER_ADDR) >> 1) +#define MCOUNT_INSN_OFFSET ((STUB_ADDR - CALL_ADDR) - 4) +#define GRAPH_INSN_OFFSET ((CALLER_ADDR - GRAPH_ADDR) - 4) struct dyn_arch_ftrace { /* No extra data needed on sh */ diff --git a/arch/sh/include/asm/hardirq.h b/arch/sh/include/asm/hardirq.h index 715ee237fc7..a5be4afa790 100644 --- a/arch/sh/include/asm/hardirq.h +++ b/arch/sh/include/asm/hardirq.h @@ -1,16 +1,9 @@ #ifndef __ASM_SH_HARDIRQ_H #define __ASM_SH_HARDIRQ_H -#include <linux/threads.h> -#include <linux/irq.h> - -/* entry.S is sensitive to the offsets of these fields */ -typedef struct { - unsigned int __softirq_pending; -} ____cacheline_aligned irq_cpustat_t; - -#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ - extern void ack_bad_irq(unsigned int irq); +#define ack_bad_irq ack_bad_irq + +#include <asm-generic/hardirq.h> #endif /* __ASM_SH_HARDIRQ_H */ diff --git a/arch/sh/include/asm/heartbeat.h b/arch/sh/include/asm/heartbeat.h index 724a43ed245..caaafe5a3ef 100644 --- a/arch/sh/include/asm/heartbeat.h +++ b/arch/sh/include/asm/heartbeat.h @@ -11,6 +11,7 @@ struct heartbeat_data { unsigned int nr_bits; struct timer_list timer; unsigned int regsize; + unsigned int mask; unsigned long flags; }; diff --git a/arch/sh/include/asm/hwblk.h b/arch/sh/include/asm/hwblk.h new file mode 100644 index 00000000000..5d3ccae4202 --- /dev/null +++ b/arch/sh/include/asm/hwblk.h @@ -0,0 +1,72 @@ +#ifndef __ASM_SH_HWBLK_H +#define __ASM_SH_HWBLK_H + +#include <asm/clock.h> +#include <asm/io.h> + +#define HWBLK_CNT_USAGE 0 +#define HWBLK_CNT_IDLE 1 +#define HWBLK_CNT_DEVICES 2 +#define HWBLK_CNT_NR 3 + +#define HWBLK_AREA_FLAG_PARENT (1 << 0) /* valid parent */ + +#define HWBLK_AREA(_flags, _parent) \ +{ \ + .flags = _flags, \ + .parent = _parent, \ +} + +struct hwblk_area { + int cnt[HWBLK_CNT_NR]; + unsigned char parent; + unsigned char flags; +}; + +#define HWBLK(_mstp, _bit, _area) \ +{ \ + .mstp = (void __iomem *)_mstp, \ + .bit = _bit, \ + .area = _area, \ +} + +struct hwblk { + void __iomem *mstp; + unsigned char bit; + unsigned char area; + int cnt[HWBLK_CNT_NR]; +}; + +struct hwblk_info { + struct hwblk_area *areas; + int nr_areas; + struct hwblk *hwblks; + int nr_hwblks; +}; + +/* Should be defined by processor-specific code */ +int arch_hwblk_init(void); +int arch_hwblk_sleep_mode(void); + +int hwblk_register(struct hwblk_info *info); +int hwblk_init(void); + +void hwblk_enable(struct hwblk_info *info, int hwblk); +void hwblk_disable(struct hwblk_info *info, int hwblk); + +void hwblk_cnt_inc(struct hwblk_info *info, int hwblk, int cnt); +void hwblk_cnt_dec(struct hwblk_info *info, int hwblk, int cnt); + +/* allow clocks to enable and disable hardware blocks */ +#define SH_HWBLK_CLK(_name, _id, _parent, _hwblk, _flags) \ +{ \ + .name = _name, \ + .id = _id, \ + .parent = _parent, \ + .arch_flags = _hwblk, \ + .flags = _flags, \ +} + +int sh_hwblk_clk_register(struct clk *clks, int nr); + +#endif /* __ASM_SH_HWBLK_H */ diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 25348141674..5be45ea4dfe 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -92,8 +92,12 @@ static inline void ctrl_delay(void) { -#ifdef P2SEG +#ifdef CONFIG_CPU_SH4 + __raw_readw(CCN_PVR); +#elif defined(P2SEG) __raw_readw(P2SEG); +#else +#error "Need a dummy address for delay" #endif } @@ -146,6 +150,7 @@ __BUILD_MEMORY_STRING(q, u64) #define readl_relaxed(a) readl(a) #define readq_relaxed(a) readq(a) +#ifndef CONFIG_GENERIC_IOMAP /* Simple MMIO */ #define ioread8(a) __raw_readb(a) #define ioread16(a) __raw_readw(a) @@ -166,6 +171,15 @@ __BUILD_MEMORY_STRING(q, u64) #define iowrite8_rep(a, s, c) __raw_writesb((a), (s), (c)) #define iowrite16_rep(a, s, c) __raw_writesw((a), (s), (c)) #define iowrite32_rep(a, s, c) __raw_writesl((a), (s), (c)) +#endif + +#define mmio_insb(p,d,c) __raw_readsb(p,d,c) +#define mmio_insw(p,d,c) __raw_readsw(p,d,c) +#define mmio_insl(p,d,c) __raw_readsl(p,d,c) + +#define mmio_outsb(p,s,c) __raw_writesb(p,s,c) +#define mmio_outsw(p,s,c) __raw_writesw(p,s,c) +#define mmio_outsl(p,s,c) __raw_writesl(p,s,c) /* synco on SH-4A, otherwise a nop */ #define mmiowb() wmb() diff --git a/arch/sh/include/asm/kdebug.h b/arch/sh/include/asm/kdebug.h index 0b9f896f203..985219f9759 100644 --- a/arch/sh/include/asm/kdebug.h +++ b/arch/sh/include/asm/kdebug.h @@ -4,6 +4,7 @@ /* Grossly misnamed. */ enum die_val { DIE_TRAP, + DIE_NMI, DIE_OOPS, }; diff --git a/arch/sh/include/asm/kgdb.h b/arch/sh/include/asm/kgdb.h index 72704ed725e..4235e228d92 100644 --- a/arch/sh/include/asm/kgdb.h +++ b/arch/sh/include/asm/kgdb.h @@ -30,9 +30,6 @@ static inline void arch_kgdb_breakpoint(void) __asm__ __volatile__ ("trapa #0x3c\n"); } -/* State info */ -extern char in_nmi; /* Debounce flag to prevent NMI reentry*/ - #define BUFMAX 2048 #define CACHE_FLUSH_IS_SAFE 1 diff --git a/arch/sh/include/asm/lmb.h b/arch/sh/include/asm/lmb.h new file mode 100644 index 00000000000..9b437f657ff --- /dev/null +++ b/arch/sh/include/asm/lmb.h @@ -0,0 +1,6 @@ +#ifndef __ASM_SH_LMB_H +#define __ASM_SH_LMB_H + +#define LMB_REAL_LIMIT 0 + +#endif /* __ASM_SH_LMB_H */ diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h index 67d8946db19..41080b173a7 100644 --- a/arch/sh/include/asm/mmu_context.h +++ b/arch/sh/include/asm/mmu_context.h @@ -69,7 +69,7 @@ static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) * We exhaust ASID of this version. * Flush all TLB and start new cycle. */ - flush_tlb_all(); + local_flush_tlb_all(); #ifdef CONFIG_SUPERH64 /* diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index 49592c780a6..81bffc0d686 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -50,26 +50,24 @@ extern unsigned long shm_align_mask; extern unsigned long max_low_pfn, min_low_pfn; extern unsigned long memory_start, memory_end; -extern void clear_page(void *to); +static inline unsigned long +pages_do_alias(unsigned long addr1, unsigned long addr2) +{ + return (addr1 ^ addr2) & shm_align_mask; +} + + +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, void *from); -#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ - (defined(CONFIG_CPU_SH5) || defined(CONFIG_CPU_SH4) || \ - defined(CONFIG_SH7705_CACHE_32KB)) struct page; struct vm_area_struct; -extern void clear_user_page(void *to, unsigned long address, struct page *page); -extern void copy_user_page(void *to, void *from, unsigned long address, - struct page *page); -#if defined(CONFIG_CPU_SH4) + extern void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma); #define __HAVE_ARCH_COPY_USER_HIGHPAGE -#endif -#else -#define clear_user_page(page, vaddr, pg) clear_page(page) -#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) -#endif +extern void clear_user_highpage(struct page *page, unsigned long vaddr); +#define clear_user_highpage clear_user_highpage /* * These are used to make use of C type-checking.. diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index 2a011b18090..4f3efa7d5a6 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -36,6 +36,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define NEFF_SIGN (1LL << (NEFF - 1)) #define NEFF_MASK (-1LL << NEFF) +static inline unsigned long long neff_sign_extend(unsigned long val) +{ + unsigned long long extended = val; + return (extended & NEFF_SIGN) ? (extended | NEFF_MASK) : extended; +} + #ifdef CONFIG_29BIT #define NPHYS 29 #else @@ -133,27 +139,25 @@ typedef pte_t *pte_addr_t; */ #define pgtable_cache_init() do { } while (0) -#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \ - defined(CONFIG_SH7705_CACHE_32KB)) -struct mm_struct; -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR -pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); -#endif - struct vm_area_struct; -extern void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte); + +extern void __update_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte); +extern void __update_tlb(struct vm_area_struct *vma, + unsigned long address, pte_t pte); + +static inline void +update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +{ + __update_cache(vma, address, pte); + __update_tlb(vma, address, pte); +} + extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern void paging_init(void); extern void page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd); -#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_CPU_SH4) && defined(CONFIG_MMU) -extern void kmap_coherent_init(void); -#else -#define kmap_coherent_init() do { } while (0) -#endif - /* arch/sh/mm/mmap.c */ #define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h index 72ea209195b..c0d359ce337 100644 --- a/arch/sh/include/asm/pgtable_32.h +++ b/arch/sh/include/asm/pgtable_32.h @@ -20,7 +20,7 @@ * - Bit 9 is reserved by everyone and used by _PAGE_PROTNONE. * * - Bits 10 and 11 are low bits of the PPN that are reserved on >= 4K pages. - * Bit 10 is used for _PAGE_ACCESSED, bit 11 remains unused. + * Bit 10 is used for _PAGE_ACCESSED, and bit 11 is used for _PAGE_SPECIAL. * * - On 29 bit platforms, bits 31 to 29 are used for the space attributes * and timing control which (together with bit 0) are moved into the @@ -52,6 +52,7 @@ #define _PAGE_PROTNONE 0x200 /* software: if not present */ #define _PAGE_ACCESSED 0x400 /* software: page referenced */ #define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */ +#define _PAGE_SPECIAL 0x800 /* software: special page */ #define _PAGE_SZ_MASK (_PAGE_SZ0 | _PAGE_SZ1) #define _PAGE_PR_MASK (_PAGE_RW | _PAGE_USER) @@ -86,6 +87,14 @@ #define _PAGE_PCC_ATR8 0x60000000 /* Attribute Memory space, 8 bit bus */ #define _PAGE_PCC_ATR16 0x60000001 /* Attribute Memory space, 6 bit bus */ +#ifndef CONFIG_X2TLB +/* copy the ptea attributes */ +static inline unsigned long copy_ptea_attributes(unsigned long x) +{ + return ((x >> 28) & 0xe) | (x & 0x1); +} +#endif + /* Mask which drops unused bits from the PTEL value */ #if defined(CONFIG_CPU_SH3) #define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED| \ @@ -148,8 +157,12 @@ # define _PAGE_SZHUGE (_PAGE_FLAGS_HARD) #endif +/* + * Mask of bits that are to be preserved accross pgprot changes. + */ #define _PAGE_CHG_MASK \ - (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY) + (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | \ + _PAGE_DIRTY | _PAGE_SPECIAL) #ifndef __ASSEMBLY__ @@ -328,7 +341,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) #define pte_dirty(pte) ((pte).pte_low & _PAGE_DIRTY) #define pte_young(pte) ((pte).pte_low & _PAGE_ACCESSED) #define pte_file(pte) ((pte).pte_low & _PAGE_FILE) -#define pte_special(pte) (0) +#define pte_special(pte) ((pte).pte_low & _PAGE_SPECIAL) #ifdef CONFIG_X2TLB #define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE) @@ -358,8 +371,9 @@ PTE_BIT_FUNC(low, mkclean, &= ~_PAGE_DIRTY); PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY); PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED); PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED); +PTE_BIT_FUNC(low, mkspecial, |= _PAGE_SPECIAL); -static inline pte_t pte_mkspecial(pte_t pte) { return pte; } +#define __HAVE_ARCH_PTE_SPECIAL /* * Macro and implementation to make a page protection as uncachable. @@ -394,13 +408,19 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) /* to find an entry in a page-table-directory. */ #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) -#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) +#define __pgd_offset(address) pgd_index(address) /* to find an entry in a kernel page-table-directory */ #define pgd_offset_k(address) pgd_offset(&init_mm, address) +#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) +#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) + /* Find an entry in the third-level page table.. */ #define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define __pte_offset(address) pte_index(address) + #define pte_offset_kernel(dir, address) \ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) diff --git a/arch/sh/include/asm/pgtable_64.h b/arch/sh/include/asm/pgtable_64.h index c78990cda55..17cdbecc3ad 100644 --- a/arch/sh/include/asm/pgtable_64.h +++ b/arch/sh/include/asm/pgtable_64.h @@ -60,6 +60,9 @@ static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep) /* To find an entry in a kernel PGD. */ #define pgd_offset_k(address) pgd_offset(&init_mm, address) +#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) +#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) + /* * PMD level access routines. Same notes as above. */ @@ -80,6 +83,8 @@ static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep) #define pte_index(address) \ ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define __pte_offset(address) pte_index(address) + #define pte_offset_kernel(dir, addr) \ ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr))) diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h index ff7daaf9a62..017e0c1807b 100644 --- a/arch/sh/include/asm/processor.h +++ b/arch/sh/include/asm/processor.h @@ -32,7 +32,7 @@ enum cpu_type { /* SH-4A types */ CPU_SH7763, CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785, CPU_SH7786, - CPU_SH7723, CPU_SH7724, CPU_SHX3, + CPU_SH7723, CPU_SH7724, CPU_SH7757, CPU_SHX3, /* SH4AL-DSP types */ CPU_SH7343, CPU_SH7722, CPU_SH7366, @@ -44,6 +44,17 @@ enum cpu_type { CPU_SH_NONE }; +enum cpu_family { + CPU_FAMILY_SH2, + CPU_FAMILY_SH2A, + CPU_FAMILY_SH3, + CPU_FAMILY_SH4, + CPU_FAMILY_SH4A, + CPU_FAMILY_SH4AL_DSP, + CPU_FAMILY_SH5, + CPU_FAMILY_UNKNOWN, +}; + /* * TLB information structure * @@ -61,7 +72,7 @@ struct tlb_info { }; struct sh_cpuinfo { - unsigned int type; + unsigned int type, family; int cut_major, cut_minor; unsigned long loops_per_jiffy; unsigned long asid_cache; diff --git a/arch/sh/include/asm/romimage-macros.h b/arch/sh/include/asm/romimage-macros.h new file mode 100644 index 00000000000..ae17a150bb5 --- /dev/null +++ b/arch/sh/include/asm/romimage-macros.h @@ -0,0 +1,73 @@ +#ifndef __ROMIMAGE_MACRO_H +#define __ROMIMAGE_MACRO_H + +/* The LIST command is used to include comments in the script */ +.macro LIST comment +.endm + +/* The ED command is used to write a 32-bit word */ +.macro ED, addr, data + mov.l 1f, r1 + mov.l 2f, r0 + mov.l r0, @r1 + bra 3f + nop + .align 2 +1 : .long \addr +2 : .long \data +3 : +.endm + +/* The EW command is used to write a 16-bit word */ +.macro EW, addr, data + mov.l 1f, r1 + mov.l 2f, r0 + mov.w r0, @r1 + bra 3f + nop + .align 2 +1 : .long \addr +2 : .long \data +3 : +.endm + +/* The EB command is used to write an 8-bit word */ +.macro EB, addr, data + mov.l 1f, r1 + mov.l 2f, r0 + mov.b r0, @r1 + bra 3f + nop + .align 2 +1 : .long \addr +2 : .long \data +3 : +.endm + +/* The WAIT command is used to delay the execution */ +.macro WAIT, time + mov.l 2f, r3 +1 : + nop + tst r3, r3 + bf/s 1b + dt r3 + bra 3f + nop + .align 2 +2 : .long \time * 100 +3 : +.endm + +/* The DD command is used to read a 32-bit word */ +.macro DD, addr, addr2, nr + mov.l 1f, r1 + mov.l @r1, r0 + bra 2f + nop + .align 2 +1 : .long \addr +2 : +.endm + +#endif /* __ROMIMAGE_MACRO_H */ diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h index 01a4076a371..a78701da775 100644 --- a/arch/sh/include/asm/sections.h +++ b/arch/sh/include/asm/sections.h @@ -7,6 +7,7 @@ extern void __nosave_begin, __nosave_end; extern long __machvec_start, __machvec_end; extern char __uncached_start, __uncached_end; extern char _ebss[]; +extern char __start_eh_frame[], __stop_eh_frame[]; #endif /* __ASM_SH_SECTIONS_H */ diff --git a/arch/sh/include/asm/sh_keysc.h b/arch/sh/include/asm/sh_keysc.h index b5a4dd5a972..4a65b1e40ea 100644 --- a/arch/sh/include/asm/sh_keysc.h +++ b/arch/sh/include/asm/sh_keysc.h @@ -7,6 +7,7 @@ struct sh_keysc_info { enum { SH_KEYSC_MODE_1, SH_KEYSC_MODE_2, SH_KEYSC_MODE_3 } mode; int scan_timing; /* 0 -> 7, see KYCR1, SCN[2:0] */ int delay; + int kycr2_delay; int keycodes[SH_KEYSC_MAXKEYS]; }; diff --git a/arch/sh/include/asm/stacktrace.h b/arch/sh/include/asm/stacktrace.h new file mode 100644 index 00000000000..79701821371 --- /dev/null +++ b/arch/sh/include/asm/stacktrace.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2009 Matt Fleming + * + * Based on: + * The x86 implementation - arch/x86/include/asm/stacktrace.h + */ +#ifndef _ASM_SH_STACKTRACE_H +#define _ASM_SH_STACKTRACE_H + +/* Generic stack tracer with callbacks */ + +struct stacktrace_ops { + void (*warning)(void *data, char *msg); + /* msg must contain %s for the symbol */ + void (*warning_symbol)(void *data, char *msg, unsigned long symbol); + void (*address)(void *data, unsigned long address, int reliable); + /* On negative return stop dumping */ + int (*stack)(void *data, char *name); +}; + +void dump_trace(struct task_struct *tsk, struct pt_regs *regs, + unsigned long *stack, + const struct stacktrace_ops *ops, void *data); + +#endif /* _ASM_SH_STACKTRACE_H */ diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h index b1b995370e7..5c8ea28ff7a 100644 --- a/arch/sh/include/asm/suspend.h +++ b/arch/sh/include/asm/suspend.h @@ -10,6 +10,15 @@ struct swsusp_arch_regs { struct pt_regs user_regs; unsigned long bank1_regs[8]; }; + +void sh_mobile_call_standby(unsigned long mode); + +#ifdef CONFIG_CPU_IDLE +void sh_mobile_setup_cpuidle(void); +#else +static inline void sh_mobile_setup_cpuidle(void) {} +#endif + #endif /* flags passed to assembly suspend code */ diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h index 6f83f2cc45c..7d80df4f09c 100644 --- a/arch/sh/include/asm/syscall_32.h +++ b/arch/sh/include/asm/syscall_32.h @@ -65,6 +65,7 @@ static inline void syscall_get_arguments(struct task_struct *task, case 3: args[2] = regs->regs[6]; case 2: args[1] = regs->regs[5]; case 1: args[0] = regs->regs[4]; + case 0: break; default: BUG(); diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h index ab79e1f4fbe..b5c5acdc8c0 100644 --- a/arch/sh/include/asm/system.h +++ b/arch/sh/include/asm/system.h @@ -14,18 +14,6 @@ #define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */ -#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) -#define __icbi() \ -{ \ - unsigned long __addr; \ - __addr = 0xa8000000; \ - __asm__ __volatile__( \ - "icbi %0\n\t" \ - : /* no output */ \ - : "m" (__m(__addr))); \ -} -#endif - /* * A brief note on ctrl_barrier(), the control register write barrier. * @@ -44,7 +32,7 @@ #define mb() __asm__ __volatile__ ("synco": : :"memory") #define rmb() mb() #define wmb() __asm__ __volatile__ ("synco": : :"memory") -#define ctrl_barrier() __icbi() +#define ctrl_barrier() __icbi(0xa8000000) #define read_barrier_depends() do { } while(0) #else #define mb() __asm__ __volatile__ ("": : :"memory") @@ -181,6 +169,11 @@ BUILD_TRAP_HANDLER(breakpoint); BUILD_TRAP_HANDLER(singlestep); BUILD_TRAP_HANDLER(fpu_error); BUILD_TRAP_HANDLER(fpu_state_restore); +BUILD_TRAP_HANDLER(nmi); + +#ifdef CONFIG_BUG +extern void handle_BUG(struct pt_regs *); +#endif #define arch_align_stack(x) (x) diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h index 6c68a51f1cc..607d413f616 100644 --- a/arch/sh/include/asm/system_32.h +++ b/arch/sh/include/asm/system_32.h @@ -14,12 +14,12 @@ do { \ (u32 *)&tsk->thread.dsp_status; \ __asm__ __volatile__ ( \ ".balign 4\n\t" \ + "movs.l @r2+, a0\n\t" \ "movs.l @r2+, a1\n\t" \ "movs.l @r2+, a0g\n\t" \ "movs.l @r2+, a1g\n\t" \ "movs.l @r2+, m0\n\t" \ "movs.l @r2+, m1\n\t" \ - "movs.l @r2+, a0\n\t" \ "movs.l @r2+, x0\n\t" \ "movs.l @r2+, x1\n\t" \ "movs.l @r2+, y0\n\t" \ @@ -39,20 +39,20 @@ do { \ \ __asm__ __volatile__ ( \ ".balign 4\n\t" \ - "stc.l mod, @-r2\n\t" \ + "stc.l mod, @-r2\n\t" \ "stc.l re, @-r2\n\t" \ "stc.l rs, @-r2\n\t" \ - "sts.l dsr, @-r2\n\t" \ - "sts.l y1, @-r2\n\t" \ - "sts.l y0, @-r2\n\t" \ - "sts.l x1, @-r2\n\t" \ - "sts.l x0, @-r2\n\t" \ - "sts.l a0, @-r2\n\t" \ - ".word 0xf653 ! movs.l a1, @-r2\n\t" \ - ".word 0xf6f3 ! movs.l a0g, @-r2\n\t" \ - ".word 0xf6d3 ! movs.l a1g, @-r2\n\t" \ - ".word 0xf6c3 ! movs.l m0, @-r2\n\t" \ - ".word 0xf6e3 ! movs.l m1, @-r2\n\t" \ + "sts.l dsr, @-r2\n\t" \ + "movs.l y1, @-r2\n\t" \ + "movs.l y0, @-r2\n\t" \ + "movs.l x1, @-r2\n\t" \ + "movs.l x0, @-r2\n\t" \ + "movs.l m1, @-r2\n\t" \ + "movs.l m0, @-r2\n\t" \ + "movs.l a1g, @-r2\n\t" \ + "movs.l a0g, @-r2\n\t" \ + "movs.l a1, @-r2\n\t" \ + "movs.l a0, @-r2\n\t" \ : : "r" (__ts2)); \ } while (0) @@ -63,6 +63,16 @@ do { \ #define __restore_dsp(tsk) do { } while (0) #endif +#if defined(CONFIG_CPU_SH4A) +#define __icbi(addr) __asm__ __volatile__ ( "icbi @%0\n\t" : : "r" (addr)) +#else +#define __icbi(addr) mb() +#endif + +#define __ocbp(addr) __asm__ __volatile__ ( "ocbp @%0\n\t" : : "r" (addr)) +#define __ocbi(addr) __asm__ __volatile__ ( "ocbi @%0\n\t" : : "r" (addr)) +#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb @%0\n\t" : : "r" (addr)) + struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next); @@ -198,8 +208,13 @@ do { \ }) #endif +static inline reg_size_t register_align(void *val) +{ + return (unsigned long)(signed long)val; +} + int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, - struct mem_access *ma); + struct mem_access *ma, int); asmlinkage void do_address_error(struct pt_regs *regs, unsigned long writeaccess, diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h index 943acf5ea07..8e4a03e7966 100644 --- a/arch/sh/include/asm/system_64.h +++ b/arch/sh/include/asm/system_64.h @@ -37,4 +37,14 @@ do { \ #define jump_to_uncached() do { } while (0) #define back_to_cached() do { } while (0) +#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr)) +#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr)) +#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr)) +#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb %0, 0\n\t" : : "r" (addr)) + +static inline reg_size_t register_align(void *val) +{ + return (unsigned long long)(signed long long)(signed long)val; +} + #endif /* __ASM_SH_SYSTEM_64_H */ diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index d570ac2e5cb..bdeb9d46d17 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -97,7 +97,7 @@ static inline struct thread_info *current_thread_info(void) extern struct thread_info *alloc_thread_info(struct task_struct *tsk); extern void free_thread_info(struct thread_info *ti); - + #endif /* THREAD_SHIFT < PAGE_SHIFT */ #endif /* __ASSEMBLY__ */ @@ -116,6 +116,7 @@ extern void free_thread_info(struct thread_info *ti); #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ #define TIF_SECCOMP 6 /* secure computing */ #define TIF_NOTIFY_RESUME 7 /* callback before returning to user */ +#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_MEMDIE 18 @@ -129,25 +130,27 @@ extern void free_thread_info(struct thread_info *ti); #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_USEDFPU (1 << TIF_USEDFPU) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_FREEZE (1 << TIF_FREEZE) /* - * _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within a byte, or we + * _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we * blow the tst immediate size constraints and need to fix up * arch/sh/kernel/entry-common.S. */ /* work to do in syscall trace */ #define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ - _TIF_SYSCALL_AUDIT | _TIF_SECCOMP) + _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ + _TIF_SYSCALL_TRACEPOINT) /* work to do on any return to u-space */ #define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \ _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \ _TIF_SINGLESTEP | _TIF_RESTORE_SIGMASK | \ - _TIF_NOTIFY_RESUME) + _TIF_NOTIFY_RESUME | _TIF_SYSCALL_TRACEPOINT) /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ diff --git a/arch/sh/include/asm/types.h b/arch/sh/include/asm/types.h index c7f3c94837d..f8421f7ad63 100644 --- a/arch/sh/include/asm/types.h +++ b/arch/sh/include/asm/types.h @@ -11,8 +11,10 @@ #ifdef CONFIG_SUPERH32 typedef u16 insn_size_t; +typedef u32 reg_size_t; #else typedef u32 insn_size_t; +typedef u64 reg_size_t; #endif #endif /* __ASSEMBLY__ */ diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h index 61d6ad93d78..925dd40d9d5 100644 --- a/arch/sh/include/asm/unistd_32.h +++ b/arch/sh/include/asm/unistd_32.h @@ -132,7 +132,7 @@ #define __NR_clone 120 #define __NR_setdomainname 121 #define __NR_uname 122 -#define __NR_modify_ldt 123 +#define __NR_cacheflush 123 #define __NR_adjtimex 124 #define __NR_mprotect 125 #define __NR_sigprocmask 126 diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h index a751699afda..2b84bc916bc 100644 --- a/arch/sh/include/asm/unistd_64.h +++ b/arch/sh/include/asm/unistd_64.h @@ -137,7 +137,7 @@ #define __NR_clone 120 #define __NR_setdomainname 121 #define __NR_uname 122 -#define __NR_modify_ldt 123 +#define __NR_cacheflush 123 #define __NR_adjtimex 124 #define __NR_mprotect 125 #define __NR_sigprocmask 126 diff --git a/arch/sh/include/asm/unwinder.h b/arch/sh/include/asm/unwinder.h new file mode 100644 index 00000000000..1e65c07b3e1 --- /dev/null +++ b/arch/sh/include/asm/unwinder.h @@ -0,0 +1,31 @@ +#ifndef _LINUX_UNWINDER_H +#define _LINUX_UNWINDER_H + +#include <asm/stacktrace.h> + +struct unwinder { + const char *name; + struct list_head list; + int rating; + void (*dump)(struct task_struct *, struct pt_regs *, + unsigned long *, const struct stacktrace_ops *, void *); +}; + +extern int unwinder_init(void); +extern int unwinder_register(struct unwinder *); + +extern void unwind_stack(struct task_struct *, struct pt_regs *, + unsigned long *, const struct stacktrace_ops *, + void *); + +extern void stack_reader_dump(struct task_struct *, struct pt_regs *, + unsigned long *, const struct stacktrace_ops *, + void *); + +/* + * Used by fault handling code to signal to the unwinder code that it + * should switch to a different unwinder. + */ +extern int unwinder_faulted; + +#endif /* _LINUX_UNWINDER_H */ diff --git a/arch/sh/include/asm/vmlinux.lds.h b/arch/sh/include/asm/vmlinux.lds.h new file mode 100644 index 00000000000..244ec4ad9a7 --- /dev/null +++ b/arch/sh/include/asm/vmlinux.lds.h @@ -0,0 +1,17 @@ +#ifndef __ASM_SH_VMLINUX_LDS_H +#define __ASM_SH_VMLINUX_LDS_H + +#include <asm-generic/vmlinux.lds.h> + +#ifdef CONFIG_DWARF_UNWINDER +#define DWARF_EH_FRAME \ + .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start_eh_frame) = .; \ + *(.eh_frame) \ + VMLINUX_SYMBOL(__stop_eh_frame) = .; \ + } +#else +#define DWARF_EH_FRAME +#endif + +#endif /* __ASM_SH_VMLINUX_LDS_H */ diff --git a/arch/sh/include/asm/watchdog.h b/arch/sh/include/asm/watchdog.h index f024fed00a7..2fe7cee9e43 100644 --- a/arch/sh/include/asm/watchdog.h +++ b/arch/sh/include/asm/watchdog.h @@ -13,10 +13,18 @@ #ifdef __KERNEL__ #include <linux/types.h> +#include <linux/io.h> + +#define WTCNT_HIGH 0x5a +#define WTCSR_HIGH 0xa5 + +#define WTCSR_CKS2 0x04 +#define WTCSR_CKS1 0x02 +#define WTCSR_CKS0 0x01 + #include <cpu/watchdog.h> -#include <asm/io.h> -/* +/* * See cpu-sh2/watchdog.h for explanation of this stupidity.. */ #ifndef WTCNT_R @@ -27,13 +35,6 @@ # define WTCSR_R WTCSR #endif -#define WTCNT_HIGH 0x5a -#define WTCSR_HIGH 0xa5 - -#define WTCSR_CKS2 0x04 -#define WTCSR_CKS1 0x02 -#define WTCSR_CKS0 0x01 - /* * CKS0-2 supports a number of clock division ratios. At the time the watchdog * is enabled, it defaults to a 41 usec overflow period .. we overload this to |