diff options
Diffstat (limited to 'include/asm-xtensa')
-rw-r--r-- | include/asm-xtensa/cacheflush.h | 2 | ||||
-rw-r--r-- | include/asm-xtensa/coprocessor.h | 214 | ||||
-rw-r--r-- | include/asm-xtensa/elf.h | 110 | ||||
-rw-r--r-- | include/asm-xtensa/module.h | 4 | ||||
-rw-r--r-- | include/asm-xtensa/pgalloc.h | 2 | ||||
-rw-r--r-- | include/asm-xtensa/pgtable.h | 8 | ||||
-rw-r--r-- | include/asm-xtensa/processor.h | 13 | ||||
-rw-r--r-- | include/asm-xtensa/ptrace.h | 44 | ||||
-rw-r--r-- | include/asm-xtensa/regs.h | 9 | ||||
-rw-r--r-- | include/asm-xtensa/sigcontext.h | 4 | ||||
-rw-r--r-- | include/asm-xtensa/stat.h | 36 | ||||
-rw-r--r-- | include/asm-xtensa/system.h | 39 | ||||
-rw-r--r-- | include/asm-xtensa/thread_info.h | 21 | ||||
-rw-r--r-- | include/asm-xtensa/timex.h | 8 | ||||
-rw-r--r-- | include/asm-xtensa/uaccess.h | 99 | ||||
-rw-r--r-- | include/asm-xtensa/variant-fsf/tie-asm.h | 70 | ||||
-rw-r--r-- | include/asm-xtensa/variant-fsf/tie.h | 75 |
17 files changed, 440 insertions, 318 deletions
diff --git a/include/asm-xtensa/cacheflush.h b/include/asm-xtensa/cacheflush.h index b773c57e75a..94c4c53a099 100644 --- a/include/asm-xtensa/cacheflush.h +++ b/include/asm-xtensa/cacheflush.h @@ -70,6 +70,8 @@ extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long); #endif #if (ICACHE_WAY_SIZE > PAGE_SIZE) extern void __invalidate_icache_page_alias(unsigned long, unsigned long); +#else +# define __invalidate_icache_page_alias(v,p) do { } while(0) #endif /* diff --git a/include/asm-xtensa/coprocessor.h b/include/asm-xtensa/coprocessor.h index aa212103455..1cbcf9001a4 100644 --- a/include/asm-xtensa/coprocessor.h +++ b/include/asm-xtensa/coprocessor.h @@ -5,81 +5,173 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2003 - 2005 Tensilica Inc. + * Copyright (C) 2003 - 2007 Tensilica Inc. */ + #ifndef _XTENSA_COPROCESSOR_H #define _XTENSA_COPROCESSOR_H -#include <asm/variant/core.h> +#include <linux/stringify.h> #include <asm/variant/tie.h> +#include <asm/types.h> + +#ifdef __ASSEMBLY__ +# include <asm/variant/tie-asm.h> + +.macro xchal_sa_start a b + .set .Lxchal_pofs_, 0 + .set .Lxchal_ofs_, 0 +.endm + +.macro xchal_sa_align ptr minofs maxofs ofsalign totalign + .set .Lxchal_ofs_, .Lxchal_ofs_ + .Lxchal_pofs_ + \totalign - 1 + .set .Lxchal_ofs_, (.Lxchal_ofs_ & -\totalign) - .Lxchal_pofs_ +.endm + +#define _SELECT ( XTHAL_SAS_TIE | XTHAL_SAS_OPT \ + | XTHAL_SAS_CC \ + | XTHAL_SAS_CALR | XTHAL_SAS_CALE ) + +.macro save_xtregs_opt ptr clb at1 at2 at3 at4 offset + .if XTREGS_OPT_SIZE > 0 + addi \clb, \ptr, \offset + xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT + .endif +.endm + +.macro load_xtregs_opt ptr clb at1 at2 at3 at4 offset + .if XTREGS_OPT_SIZE > 0 + addi \clb, \ptr, \offset + xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT + .endif +.endm +#undef _SELECT + +#define _SELECT ( XTHAL_SAS_TIE | XTHAL_SAS_OPT \ + | XTHAL_SAS_NOCC \ + | XTHAL_SAS_CALR | XTHAL_SAS_CALE | XTHAL_SAS_GLOB ) + +.macro save_xtregs_user ptr clb at1 at2 at3 at4 offset + .if XTREGS_USER_SIZE > 0 + addi \clb, \ptr, \offset + xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT + .endif +.endm + +.macro load_xtregs_user ptr clb at1 at2 at3 at4 offset + .if XTREGS_USER_SIZE > 0 + addi \clb, \ptr, \offset + xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT + .endif +.endm +#undef _SELECT + + + +#endif /* __ASSEMBLY__ */ -#if !XCHAL_HAVE_CP - -#define XTENSA_CP_EXTRA_OFFSET 0 -#define XTENSA_CP_EXTRA_ALIGN 1 /* must be a power of 2 */ -#define XTENSA_CP_EXTRA_SIZE 0 - -#else - -#define XTOFS(last_start,last_size,align) \ - ((last_start+last_size+align-1) & -align) - -#define XTENSA_CP_EXTRA_OFFSET 0 -#define XTENSA_CP_EXTRA_ALIGN XCHAL_EXTRA_SA_ALIGN - -#define XTENSA_CPE_CP0_OFFSET \ - XTOFS(XTENSA_CP_EXTRA_OFFSET, XCHAL_EXTRA_SA_SIZE, XCHAL_CP0_SA_ALIGN) -#define XTENSA_CPE_CP1_OFFSET \ - XTOFS(XTENSA_CPE_CP0_OFFSET, XCHAL_CP0_SA_SIZE, XCHAL_CP1_SA_ALIGN) -#define XTENSA_CPE_CP2_OFFSET \ - XTOFS(XTENSA_CPE_CP1_OFFSET, XCHAL_CP1_SA_SIZE, XCHAL_CP2_SA_ALIGN) -#define XTENSA_CPE_CP3_OFFSET \ - XTOFS(XTENSA_CPE_CP2_OFFSET, XCHAL_CP2_SA_SIZE, XCHAL_CP3_SA_ALIGN) -#define XTENSA_CPE_CP4_OFFSET \ - XTOFS(XTENSA_CPE_CP3_OFFSET, XCHAL_CP3_SA_SIZE, XCHAL_CP4_SA_ALIGN) -#define XTENSA_CPE_CP5_OFFSET \ - XTOFS(XTENSA_CPE_CP4_OFFSET, XCHAL_CP4_SA_SIZE, XCHAL_CP5_SA_ALIGN) -#define XTENSA_CPE_CP6_OFFSET \ - XTOFS(XTENSA_CPE_CP5_OFFSET, XCHAL_CP5_SA_SIZE, XCHAL_CP6_SA_ALIGN) -#define XTENSA_CPE_CP7_OFFSET \ - XTOFS(XTENSA_CPE_CP6_OFFSET, XCHAL_CP6_SA_SIZE, XCHAL_CP7_SA_ALIGN) -#define XTENSA_CP_EXTRA_SIZE \ - XTOFS(XTENSA_CPE_CP7_OFFSET, XCHAL_CP7_SA_SIZE, 16) - -#if XCHAL_CP_NUM > 0 -# ifndef __ASSEMBLY__ /* - * Tasks that own contents of (last user) each coprocessor. - * Entries are 0 for not-owned or non-existent coprocessors. - * Note: The size of this structure is fixed to 8 bytes in entry.S + * XTENSA_HAVE_COPROCESSOR(x) returns 1 if coprocessor x is configured. + * + * XTENSA_HAVE_IO_PORT(x) returns 1 if io-port x is configured. + * */ -typedef struct { - struct task_struct *owner; /* owner */ - int offset; /* offset in cpextra space. */ -} coprocessor_info_t; -# else -# define COPROCESSOR_INFO_OWNER 0 -# define COPROCESSOR_INFO_OFFSET 4 -# define COPROCESSOR_INFO_SIZE 8 -# endif -#endif -#endif /* XCHAL_HAVE_CP */ +#define XTENSA_HAVE_COPROCESSOR(x) \ + ((XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK) & (1 << (x))) +#define XTENSA_HAVE_COPROCESSORS \ + (XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK) +#define XTENSA_HAVE_IO_PORT(x) \ + (XCHAL_CP_PORT_MASK & (1 << (x))) +#define XTENSA_HAVE_IO_PORTS \ + XCHAL_CP_PORT_MASK #ifndef __ASSEMBLY__ -# if XCHAL_CP_NUM > 0 -struct task_struct; -extern void release_coprocessors (struct task_struct*); -extern void save_coprocessor_registers(void*, int); -# else -# define release_coprocessors(task) -# endif -typedef unsigned char cp_state_t[XTENSA_CP_EXTRA_SIZE] - __attribute__ ((aligned (XTENSA_CP_EXTRA_ALIGN))); -#endif /* !__ASSEMBLY__ */ +#if XCHAL_HAVE_CP + +#define RSR_CPENABLE(x) do { \ + __asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \ + } while(0); +#define WSR_CPENABLE(x) do { \ + __asm__ __volatile__("wsr %0," __stringify(CPENABLE) "; rsync" \ + :: "a" (x)); \ + } while(0); +#endif /* XCHAL_HAVE_CP */ + +/* + * Additional registers. + * We define three types of additional registers: + * ext: extra registers that are used by the compiler + * cpn: optional registers that can be used by a user application + * cpX: coprocessor registers that can only be used if the corresponding + * CPENABLE bit is set. + */ + +#define XCHAL_SA_REG(list,cc,abi,type,y,name,z,align,size,...) \ + __REG ## list (cc, abi, type, name, size, align) + +#define __REG0(cc,abi,t,name,s,a) __REG0_ ## cc (abi,name) +#define __REG1(cc,abi,t,name,s,a) __REG1_ ## cc (name) +#define __REG2(cc,abi,type,...) __REG2_ ## type (__VA_ARGS__) + +#define __REG0_0(abi,name) +#define __REG0_1(abi,name) __REG0_1 ## abi (name) +#define __REG0_10(name) __u32 name; +#define __REG0_11(name) __u32 name; +#define __REG0_12(name) + +#define __REG1_0(name) __u32 name; +#define __REG1_1(name) + +#define __REG2_0(n,s,a) __u32 name; +#define __REG2_1(n,s,a) unsigned char n[s] __attribute__ ((aligned(a))); +#define __REG2_2(n,s,a) unsigned char n[s] __attribute__ ((aligned(a))); + +typedef struct { XCHAL_NCP_SA_LIST(0) } xtregs_opt_t + __attribute__ ((aligned (XCHAL_NCP_SA_ALIGN))); +typedef struct { XCHAL_NCP_SA_LIST(1) } xtregs_user_t + __attribute__ ((aligned (XCHAL_NCP_SA_ALIGN))); + +#if XTENSA_HAVE_COPROCESSORS + +typedef struct { XCHAL_CP0_SA_LIST(2) } xtregs_cp0_t + __attribute__ ((aligned (XCHAL_CP0_SA_ALIGN))); +typedef struct { XCHAL_CP1_SA_LIST(2) } xtregs_cp1_t + __attribute__ ((aligned (XCHAL_CP1_SA_ALIGN))); +typedef struct { XCHAL_CP2_SA_LIST(2) } xtregs_cp2_t + __attribute__ ((aligned (XCHAL_CP2_SA_ALIGN))); +typedef struct { XCHAL_CP3_SA_LIST(2) } xtregs_cp3_t + __attribute__ ((aligned (XCHAL_CP3_SA_ALIGN))); +typedef struct { XCHAL_CP4_SA_LIST(2) } xtregs_cp4_t + __attribute__ ((aligned (XCHAL_CP4_SA_ALIGN))); +typedef struct { XCHAL_CP5_SA_LIST(2) } xtregs_cp5_t + __attribute__ ((aligned (XCHAL_CP5_SA_ALIGN))); +typedef struct { XCHAL_CP6_SA_LIST(2) } xtregs_cp6_t + __attribute__ ((aligned (XCHAL_CP6_SA_ALIGN))); +typedef struct { XCHAL_CP7_SA_LIST(2) } xtregs_cp7_t + __attribute__ ((aligned (XCHAL_CP7_SA_ALIGN))); + +extern struct thread_info* coprocessor_owner[XCHAL_CP_MAX]; +extern void coprocessor_save(void*, int); +extern void coprocessor_load(void*, int); +extern void coprocessor_flush(struct thread_info*, int); +extern void coprocessor_restore(struct thread_info*, int); + +extern void coprocessor_release_all(struct thread_info*); +extern void coprocessor_flush_all(struct thread_info*); + +static inline void coprocessor_clear_cpenable(void) +{ + unsigned long i = 0; + WSR_CPENABLE(i); +} + +#endif /* XTENSA_HAVE_COPROCESSORS */ + +#endif /* !__ASSEMBLY__ */ #endif /* _XTENSA_COPROCESSOR_H */ diff --git a/include/asm-xtensa/elf.h b/include/asm-xtensa/elf.h index 46738454250..ca6e5101a2c 100644 --- a/include/asm-xtensa/elf.h +++ b/include/asm-xtensa/elf.h @@ -72,115 +72,32 @@ /* ELF register definitions. This is needed for core dump support. */ -/* - * elf_gregset_t contains the application-level state in the following order: - * Processor info: config_version, cpuxy - * Processor state: pc, ps, exccause, excvaddr, wb, ws, - * lbeg, lend, lcount, sar - * GP regs: ar0 - arXX - */ - typedef unsigned long elf_greg_t; typedef struct { - elf_greg_t xchal_config_id0; - elf_greg_t xchal_config_id1; - elf_greg_t cpux; - elf_greg_t cpuy; elf_greg_t pc; elf_greg_t ps; - elf_greg_t exccause; - elf_greg_t excvaddr; - elf_greg_t windowbase; - elf_greg_t windowstart; elf_greg_t lbeg; elf_greg_t lend; elf_greg_t lcount; elf_greg_t sar; - elf_greg_t syscall; - elf_greg_t ar[64]; + elf_greg_t windowstart; + elf_greg_t windowbase; + elf_greg_t reserved[8+48]; + elf_greg_t a[64]; } xtensa_gregset_t; #define ELF_NGREG (sizeof(xtensa_gregset_t) / sizeof(elf_greg_t)) typedef elf_greg_t elf_gregset_t[ELF_NGREG]; -/* - * Compute the size of the coprocessor and extra state layout (register info) - * table (in bytes). - * This is actually the maximum size of the table, as opposed to the size, - * which is available from the _xtensa_reginfo_table_size global variable. - * - * (See also arch/xtensa/kernel/coprocessor.S) - * - */ - -#ifndef XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM -# define XTENSA_CPE_LTABLE_SIZE 0 -#else -# define XTENSA_CPE_SEGMENT(num) (num ? (1+num) : 0) -# define XTENSA_CPE_LTABLE_ENTRIES \ - ( XTENSA_CPE_SEGMENT(XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM) \ - + XTENSA_CPE_SEGMENT(XCHAL_CP0_SA_CONTENTS_LIBDB_NUM) \ - + XTENSA_CPE_SEGMENT(XCHAL_CP1_SA_CONTENTS_LIBDB_NUM) \ - + XTENSA_CPE_SEGMENT(XCHAL_CP2_SA_CONTENTS_LIBDB_NUM) \ - + XTENSA_CPE_SEGMENT(XCHAL_CP3_SA_CONTENTS_LIBDB_NUM) \ - + XTENSA_CPE_SEGMENT(XCHAL_CP4_SA_CONTENTS_LIBDB_NUM) \ - + XTENSA_CPE_SEGMENT(XCHAL_CP5_SA_CONTENTS_LIBDB_NUM) \ - + XTENSA_CPE_SEGMENT(XCHAL_CP6_SA_CONTENTS_LIBDB_NUM) \ - + XTENSA_CPE_SEGMENT(XCHAL_CP7_SA_CONTENTS_LIBDB_NUM) \ - + 1 /* final entry */ \ - ) -# define XTENSA_CPE_LTABLE_SIZE (XTENSA_CPE_LTABLE_ENTRIES * 8) -#endif - - -/* - * Instantiations of the elf_fpregset_t type contain, in most - * architectures, the floating point (FPU) register set. - * For Xtensa, this type is extended to contain all custom state, - * ie. coprocessor and "extra" (non-coprocessor) state (including, - * for example, TIE-defined states and register files; as well - * as other optional processor state). - * This includes FPU state if a floating-point coprocessor happens - * to have been configured within the Xtensa processor. - * - * TOTAL_FPREGS_SIZE is the required size (without rounding) - * of elf_fpregset_t. It provides space for the following: - * - * a) 32-bit mask of active coprocessors for this task (similar - * to CPENABLE in single-threaded Xtensa processor systems) - * - * b) table describing the layout of custom states (ie. of - * individual registers, etc) within the save areas - * - * c) save areas for each coprocessor and for non-coprocessor - * ("extra") state - * - * Note that save areas may require up to 16-byte alignment when - * accessed by save/restore sequences. We do not need to ensure - * such alignment in an elf_fpregset_t structure because custom - * state is not directly loaded/stored into it; rather, save area - * contents are copied to elf_fpregset_t from the active save areas - * (see 'struct task_struct' definition in processor.h for that) - * using memcpy(). But we do allow space for such alignment, - * to allow optimizations of layout and copying. - */ -#if 0 -#define TOTAL_FPREGS_SIZE \ - (4 + XTENSA_CPE_LTABLE_SIZE + XTENSA_CP_EXTRA_SIZE) -#define ELF_NFPREG \ - ((TOTAL_FPREGS_SIZE + sizeof(elf_fpreg_t) - 1) / sizeof(elf_fpreg_t)) -#else -#define TOTAL_FPREGS_SIZE 0 -#define ELF_NFPREG 0 -#endif +#define ELF_NFPREG 18 typedef unsigned int elf_fpreg_t; typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; #define ELF_CORE_COPY_REGS(_eregs, _pregs) \ - xtensa_elf_core_copy_regs (&_eregs, _pregs); + xtensa_elf_core_copy_regs ((xtensa_gregset_t*)&(_eregs), _pregs); extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *); @@ -257,6 +174,21 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *); _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \ } while (0) +typedef struct { + xtregs_opt_t opt; + xtregs_user_t user; +#if XTENSA_HAVE_COPROCESSORS + xtregs_cp0_t cp0; + xtregs_cp1_t cp1; + xtregs_cp2_t cp2; + xtregs_cp3_t cp3; + xtregs_cp4_t cp4; + xtregs_cp5_t cp5; + xtregs_cp6_t cp6; + xtregs_cp7_t cp7; +#endif +} elf_xtregs_t; + #define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT) struct task_struct; diff --git a/include/asm-xtensa/module.h b/include/asm-xtensa/module.h index ffb25bfdf6a..d9b34bee4d4 100644 --- a/include/asm-xtensa/module.h +++ b/include/asm-xtensa/module.h @@ -15,9 +15,11 @@ struct mod_arch_specific { - /* Module support is not completely implemented. */ + /* No special elements, yet. */ }; +#define MODULE_ARCH_VERMAGIC "xtensa-" __stringify(XCHAL_CORE_ID) " " + #define Elf_Shdr Elf32_Shdr #define Elf_Sym Elf32_Sym #define Elf_Ehdr Elf32_Ehdr diff --git a/include/asm-xtensa/pgalloc.h b/include/asm-xtensa/pgalloc.h index 8d1544eb461..4f4a7987ede 100644 --- a/include/asm-xtensa/pgalloc.h +++ b/include/asm-xtensa/pgalloc.h @@ -47,7 +47,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT); } -static inline pte_token_t pte_alloc_one(struct mm_struct *mm, +static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr) { struct page *page; diff --git a/include/asm-xtensa/pgtable.h b/include/asm-xtensa/pgtable.h index c0fcc1c9660..c8b024a48b4 100644 --- a/include/asm-xtensa/pgtable.h +++ b/include/asm-xtensa/pgtable.h @@ -66,11 +66,9 @@ */ #define VMALLOC_START 0xC0000000 -#define VMALLOC_END 0xC6FEFFFF -#define TLBTEMP_BASE_1 0xC6FF0000 -#define TLBTEMP_BASE_2 0xC6FF8000 -#define MODULE_START 0xC7000000 -#define MODULE_END 0xC7FFFFFF +#define VMALLOC_END 0xC7FEFFFF +#define TLBTEMP_BASE_1 0xC7FF0000 +#define TLBTEMP_BASE_2 0xC7FF8000 /* * Xtensa Linux config PTE layout (when present): diff --git a/include/asm-xtensa/processor.h b/include/asm-xtensa/processor.h index 96408f43662..4918a4e96d4 100644 --- a/include/asm-xtensa/processor.h +++ b/include/asm-xtensa/processor.h @@ -103,10 +103,6 @@ struct thread_struct { unsigned long dbreaka[XCHAL_NUM_DBREAK]; unsigned long dbreakc[XCHAL_NUM_DBREAK]; - /* Allocate storage for extra state and coprocessor state. */ - unsigned char cp_save[XTENSA_CP_EXTRA_SIZE] - __attribute__ ((aligned(XTENSA_CP_EXTRA_ALIGN))); - /* Make structure 16 bytes aligned. */ int align[0] __attribute__ ((aligned(16))); }; @@ -162,21 +158,16 @@ struct thread_struct { struct task_struct; struct mm_struct; -// FIXME: do we need release_thread for CP?? /* Free all resources held by a thread. */ #define release_thread(thread) do { } while(0) -// FIXME: do we need prepare_to_copy (lazy status) for CP?? /* Prepare to copy thread state - unlazy all lazy status */ -#define prepare_to_copy(tsk) do { } while (0) +extern void prepare_to_copy(struct task_struct*); -/* - * create a kernel thread without removing it from tasklists - */ +/* Create a kernel thread without removing it from tasklists */ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); /* Copy and release all segment info associated with a VM */ - #define copy_segments(p, mm) do { } while(0) #define release_segments(mm) do { } while(0) #define forget_segments() do { } while (0) diff --git a/include/asm-xtensa/ptrace.h b/include/asm-xtensa/ptrace.h index 77ff02d307b..422c73e2693 100644 --- a/include/asm-xtensa/ptrace.h +++ b/include/asm-xtensa/ptrace.h @@ -53,33 +53,30 @@ /* Registers used by strace */ -#define REG_A_BASE 0xfc000000 -#define REG_AR_BASE 0x04000000 -#define REG_PC 0x14000000 -#define REG_PS 0x080000e6 -#define REG_WB 0x08000048 -#define REG_WS 0x08000049 -#define REG_LBEG 0x08000000 -#define REG_LEND 0x08000001 -#define REG_LCOUNT 0x08000002 -#define REG_SAR 0x08000003 -#define REG_DEPC 0x080000c0 -#define REG_EXCCAUSE 0x080000e8 -#define REG_EXCVADDR 0x080000ee -#define SYSCALL_NR 0x1 - -#define AR_REGNO_TO_A_REGNO(ar, wb) (ar - wb*4) & ~(XCHAL_NUM_AREGS - 1) +#define REG_A_BASE 0x0000 +#define REG_AR_BASE 0x0100 +#define REG_PC 0x0020 +#define REG_PS 0x02e6 +#define REG_WB 0x0248 +#define REG_WS 0x0249 +#define REG_LBEG 0x0200 +#define REG_LEND 0x0201 +#define REG_LCOUNT 0x0202 +#define REG_SAR 0x0203 + +#define SYSCALL_NR 0x00ff /* Other PTRACE_ values defined in <linux/ptrace.h> using values 0-9,16,17,24 */ -#define PTRACE_GETREGS 12 -#define PTRACE_SETREGS 13 -#define PTRACE_GETFPREGS 14 -#define PTRACE_SETFPREGS 15 -#define PTRACE_GETFPREGSIZE 18 +#define PTRACE_GETREGS 12 +#define PTRACE_SETREGS 13 +#define PTRACE_GETXTREGS 18 +#define PTRACE_SETXTREGS 19 #ifndef __ASSEMBLY__ +#ifdef __KERNEL__ + /* * This struct defines the way the registers are stored on the * kernel stack during a system call or other kernel entry. @@ -102,6 +99,9 @@ struct pt_regs { unsigned long icountlevel; /* 60 */ int reserved[1]; /* 64 */ + /* Additional configurable registers that are used by the compiler. */ + xtregs_opt_t xtregs_opt; + /* Make sure the areg field is 16 bytes aligned. */ int align[0] __attribute__ ((aligned(16))); @@ -111,8 +111,6 @@ struct pt_regs { unsigned long areg[16]; /* 128 (64) */ }; -#ifdef __KERNEL__ - #include <asm/variant/core.h> # define task_pt_regs(tsk) ((struct pt_regs*) \ diff --git a/include/asm-xtensa/regs.h b/include/asm-xtensa/regs.h index c913d259faa..d4baed24692 100644 --- a/include/asm-xtensa/regs.h +++ b/include/asm-xtensa/regs.h @@ -100,7 +100,14 @@ #define EXCCAUSE_DTLB_SIZE_RESTRICTION 27 #define EXCCAUSE_LOAD_CACHE_ATTRIBUTE 28 #define EXCCAUSE_STORE_CACHE_ATTRIBUTE 29 -#define EXCCAUSE_FLOATING_POINT 40 +#define EXCCAUSE_COPROCESSOR0_DISABLED 32 +#define EXCCAUSE_COPROCESSOR1_DISABLED 33 +#define EXCCAUSE_COPROCESSOR2_DISABLED 34 +#define EXCCAUSE_COPROCESSOR3_DISABLED 35 +#define EXCCAUSE_COPROCESSOR4_DISABLED 36 +#define EXCCAUSE_COPROCESSOR5_DISABLED 37 +#define EXCCAUSE_COPROCESSOR6_DISABLED 38 +#define EXCCAUSE_COPROCESSOR7_DISABLED 39 /* PS register fields. */ diff --git a/include/asm-xtensa/sigcontext.h b/include/asm-xtensa/sigcontext.h index e3381cee505..03383af8c3b 100644 --- a/include/asm-xtensa/sigcontext.h +++ b/include/asm-xtensa/sigcontext.h @@ -13,9 +13,6 @@ struct sigcontext { - unsigned long oldmask; - - /* CPU registers */ unsigned long sc_pc; unsigned long sc_ps; unsigned long sc_lbeg; @@ -25,6 +22,7 @@ struct sigcontext { unsigned long sc_acclo; unsigned long sc_acchi; unsigned long sc_a[16]; + void *sc_xtregs; }; #endif /* _XTENSA_SIGCONTEXT_H */ diff --git a/include/asm-xtensa/stat.h b/include/asm-xtensa/stat.h index 149f4bce092..c4992038cee 100644 --- a/include/asm-xtensa/stat.h +++ b/include/asm-xtensa/stat.h @@ -5,25 +5,23 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2001 - 2007 Tensilica Inc. */ #ifndef _XTENSA_STAT_H #define _XTENSA_STAT_H -#include <linux/types.h> - #define STAT_HAVE_NSEC 1 struct stat { unsigned long st_dev; - ino_t st_ino; - mode_t st_mode; - nlink_t st_nlink; - uid_t st_uid; - gid_t st_gid; - unsigned int st_rdev; - off_t st_size; + unsigned long st_ino; + unsigned int st_mode; + unsigned int st_nlink; + unsigned int st_uid; + unsigned int st_gid; + unsigned long st_rdev; + long st_size; unsigned long st_blksize; unsigned long st_blocks; unsigned long st_atime; @@ -36,8 +34,6 @@ struct stat { unsigned long __unused5; }; -/* This matches struct stat64 in glibc-2.3 */ - struct stat64 { unsigned long long st_dev; /* Device */ unsigned long long st_ino; /* File serial number */ @@ -47,20 +43,14 @@ struct stat64 { unsigned int st_gid; /* Group ID of the file's group. */ unsigned long long st_rdev; /* Device number, if device. */ long long st_size; /* Size of file, in bytes. */ - long st_blksize; /* Optimal block size for I/O. */ + unsigned long st_blksize; /* Optimal block size for I/O. */ unsigned long __unused2; -#ifdef __XTENSA_EB__ - unsigned long __unused3; - long st_blocks; /* Number 512-byte blocks allocated. */ -#else - long st_blocks; /* Number 512-byte blocks allocated. */ - unsigned long __unused3; -#endif - long st_atime; /* Time of last access. */ + unsigned long long st_blocks; /* Number 512-byte blocks allocated. */ + unsigned long st_atime; /* Time of last access. */ unsigned long st_atime_nsec; - long st_mtime; /* Time of last modification. */ + unsigned long st_mtime; /* Time of last modification. */ unsigned long st_mtime_nsec; - long st_ctime; /* Time of last status change. */ + unsigned long st_ctime; /* Time of last status change. */ unsigned long st_ctime_nsec; unsigned long __unused4; unsigned long __unused5; diff --git a/include/asm-xtensa/system.h b/include/asm-xtensa/system.h index e0cb9116d8a..62b1e8f3c13 100644 --- a/include/asm-xtensa/system.h +++ b/include/asm-xtensa/system.h @@ -46,42 +46,6 @@ static inline int irqs_disabled(void) return flags & 0xf; } -#define RSR_CPENABLE(x) do { \ - __asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \ - } while(0); -#define WSR_CPENABLE(x) do { \ - __asm__ __volatile__("wsr %0," __stringify(CPENABLE)";rsync" \ - :: "a" (x));} while(0); - -#define clear_cpenable() __clear_cpenable() - -static inline void __clear_cpenable(void) -{ -#if XCHAL_HAVE_CP - unsigned long i = 0; - WSR_CPENABLE(i); -#endif -} - -static inline void enable_coprocessor(int i) -{ -#if XCHAL_HAVE_CP - int cp; - RSR_CPENABLE(cp); - cp |= 1 << i; - WSR_CPENABLE(cp); -#endif -} - -static inline void disable_coprocessor(int i) -{ -#if XCHAL_HAVE_CP - int cp; - RSR_CPENABLE(cp); - cp &= ~(1 << i); - WSR_CPENABLE(cp); -#endif -} #define smp_read_barrier_depends() do { } while(0) #define read_barrier_depends() do { } while(0) @@ -111,7 +75,6 @@ extern void *_switch_to(void *last, void *next); #define switch_to(prev,next,last) \ do { \ - clear_cpenable(); \ (last) = _switch_to(prev, next); \ } while(0) @@ -244,7 +207,7 @@ static inline void spill_registers(void) "wsr a13," __stringify(SAR) "\n\t" "wsr a14," __stringify(PS) "\n\t" :: "a" (&a0), "a" (&ps) - : "a2", "a3", "a12", "a13", "a14", "a15", "memory"); + : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory"); } #define arch_align_stack(x) (x) diff --git a/include/asm-xtensa/thread_info.h b/include/asm-xtensa/thread_info.h index 52c958285bc..a2c640682ed 100644 --- a/include/asm-xtensa/thread_info.h +++ b/include/asm-xtensa/thread_info.h @@ -27,6 +27,21 @@ #ifndef __ASSEMBLY__ +#if XTENSA_HAVE_COPROCESSORS + +typedef struct xtregs_coprocessor { + xtregs_cp0_t cp0; + xtregs_cp1_t cp1; + xtregs_cp2_t cp2; + xtregs_cp3_t cp3; + xtregs_cp4_t cp4; + xtregs_cp5_t cp5; + xtregs_cp6_t cp6; + xtregs_cp7_t cp7; +} xtregs_coprocessor_t; + +#endif + struct thread_info { struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ @@ -38,7 +53,13 @@ struct thread_info { mm_segment_t addr_limit; /* thread address space */ struct restart_block restart_block; + unsigned long cpenable; + /* Allocate storage for extra user states and coprocessor states. */ +#if XTENSA_HAVE_COPROCESSORS + xtregs_coprocessor_t xtregs_cp; +#endif + xtregs_user_t xtregs_user; }; #else /* !__ASSEMBLY__ */ diff --git a/include/asm-xtensa/timex.h b/include/asm-xtensa/timex.h index a5fca59fba9..b83a8181d44 100644 --- a/include/asm-xtensa/timex.h +++ b/include/asm-xtensa/timex.h @@ -63,10 +63,10 @@ extern cycles_t cacheflush_time; * Register access. */ -#define WSR_CCOUNT(r) __asm__("wsr %0,"__stringify(CCOUNT) :: "a" (r)) -#define RSR_CCOUNT(r) __asm__("rsr %0,"__stringify(CCOUNT) : "=a" (r)) -#define WSR_CCOMPARE(x,r) __asm__("wsr %0,"__stringify(CCOMPARE)"+"__stringify(x) :: "a"(r)) -#define RSR_CCOMPARE(x,r) __asm__("rsr %0,"__stringify(CCOMPARE)"+"__stringify(x) : "=a"(r)) +#define WSR_CCOUNT(r) asm volatile ("wsr %0,"__stringify(CCOUNT) :: "a" (r)) +#define RSR_CCOUNT(r) asm volatile ("rsr %0,"__stringify(CCOUNT) : "=a" (r)) +#define WSR_CCOMPARE(x,r) asm volatile ("wsr %0,"__stringify(CCOMPARE)"+"__stringify(x) :: "a"(r)) +#define RSR_CCOMPARE(x,r) asm volatile ("rsr %0,"__stringify(CCOMPARE)"+"__stringify(x) : "=a"(r)) static inline unsigned long get_ccount (void) { diff --git a/include/asm-xtensa/uaccess.h b/include/asm-xtensa/uaccess.h index d6352da05b1..b8528426ab1 100644 --- a/include/asm-xtensa/uaccess.h +++ b/include/asm-xtensa/uaccess.h @@ -26,6 +26,7 @@ #include <asm/current.h> #include <asm/asm-offsets.h> #include <asm/processor.h> +#include <asm/types.h> /* * These assembly macros mirror the C macros that follow below. They @@ -118,7 +119,7 @@ * <at> destroyed (actually, (TASK_SIZE + 1 - size)) */ .macro user_ok aa, as, at, error - movi \at, (TASK_SIZE+1) + movi \at, __XTENSA_UL_CONST(TASK_SIZE) bgeu \as, \at, \error sub \at, \at, \as bgeu \aa, \at, \error @@ -226,20 +227,21 @@ extern long __put_user_bad(void); __pu_err; \ }) -#define __put_user_size(x,ptr,size,retval) \ -do { \ - retval = 0; \ - switch (size) { \ - case 1: __put_user_asm(x,ptr,retval,1,"s8i"); break; \ - case 2: __put_user_asm(x,ptr,retval,2,"s16i"); break; \ - case 4: __put_user_asm(x,ptr,retval,4,"s32i"); break; \ - case 8: { \ - __typeof__(*ptr) __v64 = x; \ - retval = __copy_to_user(ptr,&__v64,8); \ - break; \ - } \ - default: __put_user_bad(); \ - } \ +#define __put_user_size(x,ptr,size,retval) \ +do { \ + int __cb; \ + retval = 0; \ + switch (size) { \ + case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \ + case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \ + case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \ + case 8: { \ + __typeof__(*ptr) __v64 = x; \ + retval = __copy_to_user(ptr,&__v64,8); \ + break; \ + } \ + default: __put_user_bad(); \ + } \ } while (0) @@ -267,14 +269,14 @@ do { \ #define __check_align_1 "" #define __check_align_2 \ - " _bbci.l %2, 0, 1f \n" \ - " movi %0, %3 \n" \ + " _bbci.l %3, 0, 1f \n" \ + " movi %0, %4 \n" \ " _j 2f \n" #define __check_align_4 \ - " _bbsi.l %2, 0, 0f \n" \ - " _bbci.l %2, 1, 1f \n" \ - "0: movi %0, %3 \n" \ + " _bbsi.l %3, 0, 0f \n" \ + " _bbci.l %3, 1, 1f \n" \ + "0: movi %0, %4 \n" \ " _j 2f \n" @@ -286,24 +288,24 @@ do { \ * WARNING: If you modify this macro at all, verify that the * __check_align_* macros still work. */ -#define __put_user_asm(x, addr, err, align, insn) \ - __asm__ __volatile__( \ - __check_align_##align \ - "1: "insn" %1, %2, 0 \n" \ - "2: \n" \ - " .section .fixup,\"ax\" \n" \ - " .align 4 \n" \ - "4: \n" \ - " .long 2b \n" \ - "5: \n" \ - " l32r %2, 4b \n" \ - " movi %0, %3 \n" \ - " jx %2 \n" \ - " .previous \n" \ - " .section __ex_table,\"a\" \n" \ - " .long 1b, 5b \n" \ - " .previous" \ - :"=r" (err) \ +#define __put_user_asm(x, addr, err, align, insn, cb) \ + __asm__ __volatile__( \ + __check_align_##align \ + "1: "insn" %2, %3, 0 \n" \ + "2: \n" \ + " .section .fixup,\"ax\" \n" \ + " .align 4 \n" \ + "4: \n" \ + " .long 2b \n" \ + "5: \n" \ + " l32r %1, 4b \n" \ + " movi %0, %4 \n" \ + " jx %1 \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " .long 1b, 5b \n" \ + " .previous" \ + :"=r" (err), "=r" (cb) \ :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err)) #define __get_user_nocheck(x,ptr,size) \ @@ -328,11 +330,12 @@ extern long __get_user_bad(void); #define __get_user_size(x,ptr,size,retval) \ do { \ + int __cb; \ retval = 0; \ switch (size) { \ - case 1: __get_user_asm(x,ptr,retval,1,"l8ui"); break; \ - case 2: __get_user_asm(x,ptr,retval,2,"l16ui"); break; \ - case 4: __get_user_asm(x,ptr,retval,4,"l32i"); break; \ + case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \ + case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \ + case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \ case 8: retval = __copy_from_user(&x,ptr,8); break; \ default: (x) = __get_user_bad(); \ } \ @@ -343,25 +346,25 @@ do { \ * WARNING: If you modify this macro at all, verify that the * __check_align_* macros still work. */ -#define __get_user_asm(x, addr, err, align, insn) \ +#define __get_user_asm(x, addr, err, align, insn, cb) \ __asm__ __volatile__( \ __check_align_##align \ - "1: "insn" %1, %2, 0 \n" \ + "1: "insn" %2, %3, 0 \n" \ "2: \n" \ " .section .fixup,\"ax\" \n" \ " .align 4 \n" \ "4: \n" \ " .long 2b \n" \ "5: \n" \ - " l32r %2, 4b \n" \ - " movi %1, 0 \n" \ - " movi %0, %3 \n" \ - " jx %2 \n" \ + " l32r %1, 4b \n" \ + " movi %2, 0 \n" \ + " movi %0, %4 \n" \ + " jx %1 \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ " .long 1b, 5b \n" \ " .previous" \ - :"=r" (err), "=r" (x) \ + :"=r" (err), "=r" (cb), "=r" (x) \ :"r" (addr), "i" (-EFAULT), "0" (err)) diff --git a/include/asm-xtensa/variant-fsf/tie-asm.h b/include/asm-xtensa/variant-fsf/tie-asm.h new file mode 100644 index 00000000000..68a73bf4ffc --- /dev/null +++ b/include/asm-xtensa/variant-fsf/tie-asm.h @@ -0,0 +1,70 @@ +/* + * This header file contains assembly-language definitions (assembly + * macros, etc.) for this specific Xtensa processor's TIE extensions + * and options. It is customized to this Xtensa processor configuration. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1999-2008 Tensilica Inc. + */ + +#ifndef _XTENSA_CORE_TIE_ASM_H +#define _XTENSA_CORE_TIE_ASM_H + +/* Selection parameter values for save-area save/restore macros: */ +/* Option vs. TIE: */ +#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */ +#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */ +/* Whether used automatically by compiler: */ +#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */ +#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */ +/* ABI handling across function calls: */ +#define XTHAL_SAS_CALR 0x0010 /* caller-saved */ +#define XTHAL_SAS_CALE 0x0020 /* callee-saved */ +#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */ +/* Misc */ +#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */ + + + +/* Macro to save all non-coprocessor (extra) custom TIE and optional state + * (not including zero-overhead loop registers). + * Save area ptr (clobbered): ptr (1 byte aligned) + * Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed) + */ + .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL + xchal_sa_start \continue, \ofs + .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~\select + xchal_sa_align \ptr, 0, 1024-4, 4, 4 + rur \at1, THREADPTR // threadptr option + s32i \at1, \ptr, .Lxchal_ofs_ + 0 + .set .Lxchal_ofs_, .Lxchal_ofs_ + 4 + .endif + .endm // xchal_ncp_store + +/* Macro to save all non-coprocessor (extra) custom TIE and optional state + * (not including zero-overhead loop registers). + * Save area ptr (clobbered): ptr (1 byte aligned) + * Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed) + */ + .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL + xchal_sa_start \continue, \ofs + .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~\select + xchal_sa_align \ptr, 0, 1024-4, 4, 4 + l32i \at1, \ptr, .Lxchal_ofs_ + 0 + wur \at1, THREADPTR // threadptr option + .set .Lxchal_ofs_, .Lxchal_ofs_ + 4 + .endif + .endm // xchal_ncp_load + + + +#define XCHAL_NCP_NUM_ATMPS 1 + + +#define XCHAL_SA_NUM_ATMPS 1 + +#endif /*_XTENSA_CORE_TIE_ASM_H*/ + diff --git a/include/asm-xtensa/variant-fsf/tie.h b/include/asm-xtensa/variant-fsf/tie.h index a73c7166491..bf4020116df 100644 --- a/include/asm-xtensa/variant-fsf/tie.h +++ b/include/asm-xtensa/variant-fsf/tie.h @@ -1,22 +1,77 @@ /* - * Xtensa processor core configuration information. + * This header file describes this specific Xtensa processor's TIE extensions + * that extend basic Xtensa core functionality. It is customized to this + * Xtensa processor configuration. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1999-2006 Tensilica Inc. + * Copyright (C) 1999-2007 Tensilica Inc. */ -#ifndef XTENSA_TIE_H -#define XTENSA_TIE_H - -/*---------------------------------------------------------------------- - COPROCESSORS and EXTRA STATE - ----------------------------------------------------------------------*/ +#ifndef _XTENSA_CORE_TIE_H +#define _XTENSA_CORE_TIE_H #define XCHAL_CP_NUM 0 /* number of coprocessors */ -#define XCHAL_CP_MASK 0x00 +#define XCHAL_CP_MAX 0 /* max CP ID + 1 (0 if none) */ +#define XCHAL_CP_MASK 0x00 /* bitmask of all CPs by ID */ +#define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */ + +/* Basic parameters of each coprocessor: */ +#define XCHAL_CP7_NAME "XTIOP" +#define XCHAL_CP7_IDENT XTIOP +#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */ +#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */ +#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */ + +/* Filler info for unassigned coprocessors, to simplify arrays etc: */ +#define XCHAL_NCP_SA_SIZE 0 +#define XCHAL_NCP_SA_ALIGN 1 +#define XCHAL_CP0_SA_SIZE 0 +#define XCHAL_CP0_SA_ALIGN 1 +#define XCHAL_CP1_SA_SIZE 0 +#define XCHAL_CP1_SA_ALIGN 1 +#define XCHAL_CP2_SA_SIZE 0 +#define XCHAL_CP2_SA_ALIGN 1 +#define XCHAL_CP3_SA_SIZE 0 +#define XCHAL_CP3_SA_ALIGN 1 +#define XCHAL_CP4_SA_SIZE 0 +#define XCHAL_CP4_SA_ALIGN 1 +#define XCHAL_CP5_SA_SIZE 0 +#define XCHAL_CP5_SA_ALIGN 1 +#define XCHAL_CP6_SA_SIZE 0 +#define XCHAL_CP6_SA_ALIGN 1 + +/* Save area for non-coprocessor optional and custom (TIE) state: */ +#define XCHAL_NCP_SA_SIZE 0 +#define XCHAL_NCP_SA_ALIGN 1 + +/* Total save area for optional and custom state (NCP + CPn): */ +#define XCHAL_TOTAL_SA_SIZE 0 /* with 16-byte align padding */ +#define XCHAL_TOTAL_SA_ALIGN 1 /* actual minimum alignment */ + +#define XCHAL_NCP_SA_NUM 0 +#define XCHAL_NCP_SA_LIST(s) +#define XCHAL_CP0_SA_NUM 0 +#define XCHAL_CP0_SA_LIST(s) +#define XCHAL_CP1_SA_NUM 0 +#define XCHAL_CP1_SA_LIST(s) +#define XCHAL_CP2_SA_NUM 0 +#define XCHAL_CP2_SA_LIST(s) +#define XCHAL_CP3_SA_NUM 0 +#define XCHAL_CP3_SA_LIST(s) +#define XCHAL_CP4_SA_NUM 0 +#define XCHAL_CP4_SA_LIST(s) +#define XCHAL_CP5_SA_NUM 0 +#define XCHAL_CP5_SA_LIST(s) +#define XCHAL_CP6_SA_NUM 0 +#define XCHAL_CP6_SA_LIST(s) +#define XCHAL_CP7_SA_NUM 0 +#define XCHAL_CP7_SA_LIST(s) + +/* Byte length of instruction from its first nibble (op0 field), per FLIX. */ +#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3 -#endif /*XTENSA_CONFIG_TIE_H*/ +#endif /*_XTENSA_CORE_TIE_H*/ |