diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/kernel/arch.c | 46 | ||||
-rw-r--r-- | arch/arm/kernel/ecard.c | 12 | ||||
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 272 | ||||
-rw-r--r-- | arch/arm/kernel/entry-header.S | 7 | ||||
-rw-r--r-- | arch/arm/kernel/head.S | 44 | ||||
-rw-r--r-- | arch/arm/kernel/irq.c | 14 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 77 | ||||
-rw-r--r-- | arch/arm/kernel/signal.c | 31 | ||||
-rw-r--r-- | arch/arm/kernel/signal.h | 12 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 110 | ||||
-rw-r--r-- | arch/arm/kernel/time.c | 103 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 58 |
13 files changed, 585 insertions, 203 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 4a2af55e134..3e1b0327e4d 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -6,7 +6,7 @@ AFLAGS_head.o := -DTEXTADDR=$(TEXTADDR) -DDATAADDR=$(DATAADDR) # Object file lists. -obj-y := arch.o compat.o dma.o entry-armv.o entry-common.o irq.o \ +obj-y := compat.o dma.o entry-armv.o entry-common.o irq.o \ process.o ptrace.o semaphore.o setup.o signal.o sys_arm.o \ time.o traps.o diff --git a/arch/arm/kernel/arch.c b/arch/arm/kernel/arch.c deleted file mode 100644 index 4e02fbeb10a..00000000000 --- a/arch/arm/kernel/arch.c +++ /dev/null @@ -1,46 +0,0 @@ -/* - * linux/arch/arm/kernel/arch.c - * - * Architecture specific fixups. - */ -#include <linux/config.h> -#include <linux/init.h> -#include <linux/types.h> - -#include <asm/elf.h> -#include <asm/page.h> -#include <asm/setup.h> -#include <asm/mach/arch.h> - -unsigned int vram_size; - -#ifdef CONFIG_ARCH_ACORN - -unsigned int memc_ctrl_reg; -unsigned int number_mfm_drives; - -static int __init parse_tag_acorn(const struct tag *tag) -{ - memc_ctrl_reg = tag->u.acorn.memc_control_reg; - number_mfm_drives = tag->u.acorn.adfsdrives; - - switch (tag->u.acorn.vram_pages) { - case 512: - vram_size += PAGE_SIZE * 256; - case 256: - vram_size += PAGE_SIZE * 256; - default: - break; - } -#if 0 - if (vram_size) { - desc->video_start = 0x02000000; - desc->video_end = 0x02000000 + vram_size; - } -#endif - return 0; -} - -__tagtable(ATAG_ACORN, parse_tag_acorn); - -#endif diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c index 3dc15b131f5..6540db69133 100644 --- a/arch/arm/kernel/ecard.c +++ b/arch/arm/kernel/ecard.c @@ -866,19 +866,19 @@ static struct expansion_card *__init ecard_alloc_card(int type, int slot) return ec; } -static ssize_t ecard_show_irq(struct device *dev, char *buf) +static ssize_t ecard_show_irq(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%u\n", ec->irq); } -static ssize_t ecard_show_dma(struct device *dev, char *buf) +static ssize_t ecard_show_dma(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%u\n", ec->dma); } -static ssize_t ecard_show_resources(struct device *dev, char *buf) +static ssize_t ecard_show_resources(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); char *str = buf; @@ -893,19 +893,19 @@ static ssize_t ecard_show_resources(struct device *dev, char *buf) return str - buf; } -static ssize_t ecard_show_vendor(struct device *dev, char *buf) +static ssize_t ecard_show_vendor(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%u\n", ec->cid.manufacturer); } -static ssize_t ecard_show_device(struct device *dev, char *buf) +static ssize_t ecard_show_device(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%u\n", ec->cid.product); } -static ssize_t ecard_show_type(struct device *dev, char *buf) +static ssize_t ecard_show_type(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%s\n", ec->type == ECARD_EASI ? "EASI" : "IOC"); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 4eb36155dc9..39a6c1b0b9a 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -24,48 +24,91 @@ #include "entry-header.S" /* + * Interrupt handling. Preserves r7, r8, r9 + */ + .macro irq_handler +1: get_irqnr_and_base r0, r6, r5, lr + movne r1, sp + @ + @ routine called with r0 = irq number, r1 = struct pt_regs * + @ + adrne lr, 1b + bne asm_do_IRQ + +#ifdef CONFIG_SMP + /* + * XXX + * + * this macro assumes that irqstat (r6) and base (r5) are + * preserved from get_irqnr_and_base above + */ + test_for_ipi r0, r6, r5, lr + movne r0, sp + adrne lr, 1b + bne do_IPI +#endif + + .endm + +/* * Invalid mode handlers */ - .macro inv_entry, sym, reason - sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go - stmia sp, {r0 - lr} @ Save XXX r0 - lr - ldr r4, .LC\sym + .macro inv_entry, reason + sub sp, sp, #S_FRAME_SIZE + stmib sp, {r1 - lr} mov r1, #\reason .endm __pabt_invalid: - inv_entry abt, BAD_PREFETCH - b 1f + inv_entry BAD_PREFETCH + b common_invalid __dabt_invalid: - inv_entry abt, BAD_DATA - b 1f + inv_entry BAD_DATA + b common_invalid __irq_invalid: - inv_entry irq, BAD_IRQ - b 1f + inv_entry BAD_IRQ + b common_invalid __und_invalid: - inv_entry und, BAD_UNDEFINSTR + inv_entry BAD_UNDEFINSTR + + @ + @ XXX fall through to common_invalid + @ + +@ +@ common_invalid - generic code for failed exception (re-entrant version of handlers) +@ +common_invalid: + zero_fp + + ldmia r0, {r4 - r6} + add r0, sp, #S_PC @ here for interlock avoidance + mov r7, #-1 @ "" "" "" "" + str r4, [sp] @ save preserved r0 + stmia r0, {r5 - r7} @ lr_<exception>, + @ cpsr_<exception>, "old_r0" -1: zero_fp - ldmia r4, {r5 - r7} @ Get XXX pc, cpsr, old_r0 - add r4, sp, #S_PC - stmia r4, {r5 - r7} @ Save XXX pc, cpsr, old_r0 mov r0, sp - and r2, r6, #31 @ int mode + and r2, r6, #0x1f b bad_mode /* * SVC mode handlers */ - .macro svc_entry, sym + .macro svc_entry sub sp, sp, #S_FRAME_SIZE - stmia sp, {r0 - r12} @ save r0 - r12 - ldr r2, .LC\sym - add r0, sp, #S_FRAME_SIZE - ldmia r2, {r2 - r4} @ get pc, cpsr - add r5, sp, #S_SP + stmib sp, {r1 - r12} + + ldmia r0, {r1 - r3} + add r5, sp, #S_SP @ here for interlock avoidance + mov r4, #-1 @ "" "" "" "" + add r0, sp, #S_FRAME_SIZE @ "" "" "" "" + str r1, [sp] @ save the "real" r0 copied + @ from the exception stack + mov r1, lr @ @@ -82,7 +125,7 @@ __und_invalid: .align 5 __dabt_svc: - svc_entry abt + svc_entry @ @ get ready to re-enable interrupts if appropriate @@ -129,28 +172,24 @@ __dabt_svc: .align 5 __irq_svc: - svc_entry irq + svc_entry + #ifdef CONFIG_PREEMPT - get_thread_info r8 - ldr r9, [r8, #TI_PREEMPT] @ get preempt count - add r7, r9, #1 @ increment it - str r7, [r8, #TI_PREEMPT] + get_thread_info tsk + ldr r8, [tsk, #TI_PREEMPT] @ get preempt count + add r7, r8, #1 @ increment it + str r7, [tsk, #TI_PREEMPT] #endif -1: get_irqnr_and_base r0, r6, r5, lr - movne r1, sp - @ - @ routine called with r0 = irq number, r1 = struct pt_regs * - @ - adrne lr, 1b - bne asm_do_IRQ + + irq_handler #ifdef CONFIG_PREEMPT - ldr r0, [r8, #TI_FLAGS] @ get flags + ldr r0, [tsk, #TI_FLAGS] @ get flags tst r0, #_TIF_NEED_RESCHED blne svc_preempt preempt_return: - ldr r0, [r8, #TI_PREEMPT] @ read preempt value + ldr r0, [tsk, #TI_PREEMPT] @ read preempt value + str r8, [tsk, #TI_PREEMPT] @ restore preempt count teq r0, r7 - str r9, [r8, #TI_PREEMPT] @ restore preempt count strne r0, [r0, -r0] @ bug() #endif ldr r0, [sp, #S_PSR] @ irqs are already disabled @@ -161,7 +200,7 @@ preempt_return: #ifdef CONFIG_PREEMPT svc_preempt: - teq r9, #0 @ was preempt count = 0 + teq r8, #0 @ was preempt count = 0 ldreq r6, .LCirq_stat movne pc, lr @ no ldr r0, [r6, #4] @ local_irq_count @@ -169,9 +208,9 @@ svc_preempt: adds r0, r0, r1 movne pc, lr mov r7, #0 @ preempt_schedule_irq - str r7, [r8, #TI_PREEMPT] @ expects preempt_count == 0 + str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0 1: bl preempt_schedule_irq @ irq en/disable is done inside - ldr r0, [r8, #TI_FLAGS] @ get new tasks TI_FLAGS + ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS tst r0, #_TIF_NEED_RESCHED beq preempt_return @ go again b 1b @@ -179,7 +218,7 @@ svc_preempt: .align 5 __und_svc: - svc_entry und + svc_entry @ @ call emulation code, which returns using r9 if it has emulated @@ -209,7 +248,7 @@ __und_svc: .align 5 __pabt_svc: - svc_entry abt + svc_entry @ @ re-enable interrupts if appropriate @@ -242,12 +281,8 @@ __pabt_svc: ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr .align 5 -.LCirq: - .word __temp_irq -.LCund: - .word __temp_und -.LCabt: - .word __temp_abt +.LCcralign: + .word cr_alignment #ifdef MULTI_ABORT .LCprocfns: .word processor @@ -262,14 +297,18 @@ __pabt_svc: /* * User mode handlers */ - .macro usr_entry, sym - sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go - stmia sp, {r0 - r12} @ save r0 - r12 - ldr r7, .LC\sym - add r5, sp, #S_PC - ldmia r7, {r2 - r4} @ Get USR pc, cpsr - -#if __LINUX_ARM_ARCH__ < 6 + .macro usr_entry + sub sp, sp, #S_FRAME_SIZE + stmib sp, {r1 - r12} + + ldmia r0, {r1 - r3} + add r0, sp, #S_PC @ here for interlock avoidance + mov r4, #-1 @ "" "" "" "" + + str r1, [sp] @ save the "real" r0 copied + @ from the exception stack + +#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) @ make sure our user space atomic helper is aborted cmp r2, #VIRT_OFFSET bichs r3, r3, #PSR_Z_BIT @@ -284,13 +323,13 @@ __pabt_svc: @ @ Also, separately save sp_usr and lr_usr @ - stmia r5, {r2 - r4} - stmdb r5, {sp, lr}^ + stmia r0, {r2 - r4} + stmdb r0, {sp, lr}^ @ @ Enable the alignment trap while in kernel mode @ - alignment_trap r7, r0, __temp_\sym + alignment_trap r0 @ @ Clear FP to mark the first stack frame @@ -300,7 +339,7 @@ __pabt_svc: .align 5 __dabt_usr: - usr_entry abt + usr_entry @ @ Call the processor-specific abort handler: @@ -329,30 +368,23 @@ __dabt_usr: .align 5 __irq_usr: - usr_entry irq + usr_entry + get_thread_info tsk #ifdef CONFIG_PREEMPT - get_thread_info r8 - ldr r9, [r8, #TI_PREEMPT] @ get preempt count - add r7, r9, #1 @ increment it - str r7, [r8, #TI_PREEMPT] + ldr r8, [tsk, #TI_PREEMPT] @ get preempt count + add r7, r8, #1 @ increment it + str r7, [tsk, #TI_PREEMPT] #endif -1: get_irqnr_and_base r0, r6, r5, lr - movne r1, sp - adrne lr, 1b - @ - @ routine called with r0 = irq number, r1 = struct pt_regs * - @ - bne asm_do_IRQ + + irq_handler #ifdef CONFIG_PREEMPT - ldr r0, [r8, #TI_PREEMPT] + ldr r0, [tsk, #TI_PREEMPT] + str r8, [tsk, #TI_PREEMPT] teq r0, r7 - str r9, [r8, #TI_PREEMPT] strne r0, [r0, -r0] - mov tsk, r8 -#else - get_thread_info tsk #endif + mov why, #0 b ret_to_user @@ -360,7 +392,7 @@ __irq_usr: .align 5 __und_usr: - usr_entry und + usr_entry tst r3, #PSR_T_BIT @ Thumb mode? bne fpundefinstr @ ignore FP @@ -476,7 +508,7 @@ fpundefinstr: .align 5 __pabt_usr: - usr_entry abt + usr_entry enable_irq @ Enable interrupts mov r0, r2 @ address (pc) @@ -616,11 +648,17 @@ __kuser_helper_start: __kuser_cmpxchg: @ 0xffff0fc0 -#if __LINUX_ARM_ARCH__ < 6 +#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) -#ifdef CONFIG_SMP /* sanity check */ -#error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?" -#endif + /* + * Poor you. No fast solution possible... + * The kernel itself must perform the operation. + * A special ghost syscall is used for that (see traps.c). + */ + swi #0x9ffff0 + mov pc, lr + +#elif __LINUX_ARM_ARCH__ < 6 /* * Theory of operation: @@ -735,29 +773,41 @@ __kuser_helper_end: * * Common stub entry macro: * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC + * + * SP points to a minimal amount of processor-private memory, the address + * of which is copied into r0 for the mode specific abort handler. */ - .macro vector_stub, name, sym, correction=0 + .macro vector_stub, name, correction=0 .align 5 vector_\name: - ldr r13, .LCs\sym .if \correction sub lr, lr, #\correction .endif - str lr, [r13] @ save lr_IRQ + + @ + @ Save r0, lr_<exception> (parent PC) and spsr_<exception> + @ (parent CPSR) + @ + stmia sp, {r0, lr} @ save r0, lr mrs lr, spsr - str lr, [r13, #4] @ save spsr_IRQ + str lr, [sp, #8] @ save spsr + @ - @ now branch to the relevant MODE handling routine + @ Prepare for SVC32 mode. IRQs remain disabled. @ - mrs r13, cpsr - bic r13, r13, #MODE_MASK - orr r13, r13, #SVC_MODE - msr spsr_cxsf, r13 @ switch to SVC_32 mode + mrs r0, cpsr + bic r0, r0, #MODE_MASK + orr r0, r0, #SVC_MODE + msr spsr_cxsf, r0 - and lr, lr, #15 + @ + @ the branch table must immediately follow this code + @ + mov r0, sp + and lr, lr, #0x0f ldr lr, [pc, lr, lsl #2] - movs pc, lr @ Changes mode and branches + movs pc, lr @ branch to handler in SVC mode .endm .globl __stubs_start @@ -765,7 +815,7 @@ __stubs_start: /* * Interrupt dispatcher */ - vector_stub irq, irq, 4 + vector_stub irq, 4 .long __irq_usr @ 0 (USR_26 / USR_32) .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) @@ -788,7 +838,7 @@ __stubs_start: * Data abort dispatcher * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */ - vector_stub dabt, abt, 8 + vector_stub dabt, 8 .long __dabt_usr @ 0 (USR_26 / USR_32) .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) @@ -811,7 +861,7 @@ __stubs_start: * Prefetch abort dispatcher * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */ - vector_stub pabt, abt, 4 + vector_stub pabt, 4 .long __pabt_usr @ 0 (USR_26 / USR_32) .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) @@ -834,7 +884,7 @@ __stubs_start: * Undef instr entry dispatcher * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC */ - vector_stub und, und + vector_stub und .long __und_usr @ 0 (USR_26 / USR_32) .long __und_invalid @ 1 (FIQ_26 / FIQ_32) @@ -888,13 +938,6 @@ vector_addrexcptn: .LCvswi: .word vector_swi -.LCsirq: - .word __temp_irq -.LCsund: - .word __temp_und -.LCsabt: - .word __temp_abt - .globl __stubs_end __stubs_end: @@ -916,23 +959,6 @@ __vectors_end: .data -/* - * Do not reorder these, and do not insert extra data between... - */ - -__temp_irq: - .word 0 @ saved lr_irq - .word 0 @ saved spsr_irq - .word -1 @ old_r0 -__temp_und: - .word 0 @ Saved lr_und - .word 0 @ Saved spsr_und - .word -1 @ old_r0 -__temp_abt: - .word 0 @ Saved lr_abt - .word 0 @ Saved spsr_abt - .word -1 @ old_r0 - .globl cr_alignment .globl cr_no_alignment cr_alignment: diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index a3d40a0e2b0..afef2127396 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -59,11 +59,10 @@ mov \rd, \rd, lsl #13 .endm - .macro alignment_trap, rbase, rtemp, sym + .macro alignment_trap, rtemp #ifdef CONFIG_ALIGNMENT_TRAP -#define OFF_CR_ALIGNMENT(x) cr_alignment - x - - ldr \rtemp, [\rbase, #OFF_CR_ALIGNMENT(\sym)] + ldr \rtemp, .LCcralign + ldr \rtemp, [\rtemp] mcr p15, 0, \rtemp, c1, c0 #endif .endm diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 4733877296d..bd4823c7464 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -2,6 +2,8 @@ * linux/arch/arm/kernel/head.S * * Copyright (C) 1994-2002 Russell King + * Copyright (c) 2003 ARM Limited + * All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -165,6 +167,48 @@ __mmap_switched: stmia r6, {r0, r4} @ Save control register values b start_kernel +#if defined(CONFIG_SMP) + .type secondary_startup, #function +ENTRY(secondary_startup) + /* + * Common entry point for secondary CPUs. + * + * Ensure that we're in SVC mode, and IRQs are disabled. Lookup + * the processor type - there is no need to check the machine type + * as it has already been validated by the primary processor. + */ + msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC + bl __lookup_processor_type + movs r10, r5 @ invalid processor? + moveq r0, #'p' @ yes, error 'p' + beq __error + + /* + * Use the page tables supplied from __cpu_up. + */ + adr r4, __secondary_data + ldmia r4, {r5, r6, r13} @ address to jump to after + sub r4, r4, r5 @ mmu has been enabled + ldr r4, [r6, r4] @ get secondary_data.pgdir + adr lr, __enable_mmu @ return address + add pc, r10, #12 @ initialise processor + @ (return control reg) + + /* + * r6 = &secondary_data + */ +ENTRY(__secondary_switched) + ldr sp, [r6, #4] @ get secondary_data.stack + mov fp, #0 + b secondary_start_kernel + + .type __secondary_data, %object +__secondary_data: + .long . + .long secondary_data + .long __secondary_switched +#endif /* defined(CONFIG_SMP) */ + /* diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index ff187f4308f..395137a8fad 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -4,6 +4,10 @@ * Copyright (C) 1992 Linus Torvalds * Modifications for ARM processor Copyright (C) 1995-2000 Russell King. * + * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation. + * Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and + * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>. + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -37,6 +41,7 @@ #include <asm/irq.h> #include <asm/system.h> #include <asm/mach/irq.h> +#include <asm/mach/time.h> /* * Maximum IRQ count. Currently, this is arbitary. However, it should @@ -329,6 +334,15 @@ __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs) spin_unlock(&irq_controller_lock); +#ifdef CONFIG_NO_IDLE_HZ + if (!(action->flags & SA_TIMER) && system_timer->dyn_tick != NULL) { + write_seqlock(&xtime_lock); + if (system_timer->dyn_tick->state & DYN_TICK_ENABLED) + system_timer->dyn_tick->handler(irq, 0, regs); + write_sequnlock(&xtime_lock); + } +#endif + if (!(action->flags & SA_INTERRUPT)) local_irq_enable(); diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index c2a7da3ac0f..8cf733daa80 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -92,6 +92,14 @@ struct cpu_user_fns cpu_user; struct cpu_cache_fns cpu_cache; #endif +struct stack { + u32 irq[3]; + u32 abt[3]; + u32 und[3]; +} ____cacheline_aligned; + +static struct stack stacks[NR_CPUS]; + char elf_platform[ELF_PLATFORM_SIZE]; EXPORT_SYMBOL(elf_platform); @@ -307,8 +315,6 @@ static void __init setup_processor(void) cpu_name, processor_id, (int)processor_id & 15, proc_arch[cpu_architecture()]); - dump_cpu_info(smp_processor_id()); - sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS); sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); elf_hwcap = list->elf_hwcap; @@ -316,6 +322,46 @@ static void __init setup_processor(void) cpu_proc_init(); } +/* + * cpu_init - initialise one CPU. + * + * cpu_init dumps the cache information, initialises SMP specific + * information, and sets up the per-CPU stacks. + */ +void cpu_init(void) +{ + unsigned int cpu = smp_processor_id(); + struct stack *stk = &stacks[cpu]; + + if (cpu >= NR_CPUS) { + printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu); + BUG(); + } + + dump_cpu_info(cpu); + + /* + * setup stacks for re-entrant exception handlers + */ + __asm__ ( + "msr cpsr_c, %1\n\t" + "add sp, %0, %2\n\t" + "msr cpsr_c, %3\n\t" + "add sp, %0, %4\n\t" + "msr cpsr_c, %5\n\t" + "add sp, %0, %6\n\t" + "msr cpsr_c, %7" + : + : "r" (stk), + "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), + "I" (offsetof(struct stack, irq[0])), + "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE), + "I" (offsetof(struct stack, abt[0])), + "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE), + "I" (offsetof(struct stack, und[0])), + "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)); +} + static struct machine_desc * __init setup_machine(unsigned int nr) { struct machine_desc *list; @@ -349,6 +395,20 @@ static void __init early_initrd(char **p) } __early_param("initrd=", early_initrd); +static void __init add_memory(unsigned long start, unsigned long size) +{ + /* + * Ensure that start/size are aligned to a page boundary. + * Size is appropriately rounded down, start is rounded up. + */ + size -= start & ~PAGE_MASK; + + meminfo.bank[meminfo.nr_banks].start = PAGE_ALIGN(start); + meminfo.bank[meminfo.nr_banks].size = size & PAGE_MASK; + meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start); + meminfo.nr_banks += 1; +} + /* * Pick out the memory size. We look for mem=size@start, * where start and size are "size[KkMm]" @@ -373,10 +433,7 @@ static void __init early_mem(char **p) if (**p == '@') start = memparse(*p + 1, p); - meminfo.bank[meminfo.nr_banks].start = start; - meminfo.bank[meminfo.nr_banks].size = size; - meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start); - meminfo.nr_banks += 1; + add_memory(start, size); } __early_param("mem=", early_mem); @@ -518,11 +575,7 @@ static int __init parse_tag_mem32(const struct tag *tag) tag->u.mem.start, tag->u.mem.size / 1024); return -EINVAL; } - meminfo.bank[meminfo.nr_banks].start = tag->u.mem.start; - meminfo.bank[meminfo.nr_banks].size = tag->u.mem.size; - meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(tag->u.mem.start); - meminfo.nr_banks += 1; - + add_memory(tag->u.mem.start, tag->u.mem.size); return 0; } @@ -715,6 +768,8 @@ void __init setup_arch(char **cmdline_p) paging_init(&meminfo, mdesc); request_standard_resources(&meminfo, mdesc); + cpu_init(); + /* * Set up various architecture-specific pointers */ diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 931919fd512..5e435e42dac 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -19,6 +19,7 @@ #include <asm/unistd.h> #include "ptrace.h" +#include "signal.h" #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) @@ -35,7 +36,7 @@ #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) -static const unsigned long retcodes[4] = { +const unsigned long sigreturn_codes[4] = { SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN }; @@ -500,17 +501,25 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka, if (ka->sa.sa_flags & SA_SIGINFO) idx += 2; - if (__put_user(retcodes[idx], rc)) + if (__put_user(sigreturn_codes[idx], rc)) return 1; - /* - * Ensure that the instruction cache sees - * the return code written onto the stack. - */ - flush_icache_range((unsigned long)rc, - (unsigned long)(rc + 1)); - - retcode = ((unsigned long)rc) + thumb; + if (cpsr & MODE32_BIT) { + /* + * 32-bit code can use the new high-page + * signal return code support. + */ + retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; + } else { + /* + * Ensure that the instruction cache sees + * the return code written onto the stack. + */ + flush_icache_range((unsigned long)rc, + (unsigned long)(rc + 1)); + + retcode = ((unsigned long)rc) + thumb; + } } regs->ARM_r0 = usig; @@ -688,7 +697,7 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) if (!user_mode(regs)) return 0; - if (try_to_freeze(0)) + if (try_to_freeze()) goto no_signal; if (current->ptrace & PT_SINGLESTEP) diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h new file mode 100644 index 00000000000..91d26faca62 --- /dev/null +++ b/arch/arm/kernel/signal.h @@ -0,0 +1,12 @@ +/* + * linux/arch/arm/kernel/signal.h + * + * Copyright (C) 2005 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#define KERN_SIGRETURN_CODE 0xffff0500 + +extern const unsigned long sigreturn_codes[4]; diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index ecc8c333240..34892758f09 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -24,6 +24,9 @@ #include <asm/atomic.h> #include <asm/cacheflush.h> #include <asm/cpu.h> +#include <asm/mmu_context.h> +#include <asm/pgtable.h> +#include <asm/pgalloc.h> #include <asm/processor.h> #include <asm/tlbflush.h> #include <asm/ptrace.h> @@ -37,6 +40,13 @@ cpumask_t cpu_present_mask; cpumask_t cpu_online_map; /* + * as from 2.5, kernels no longer have an init_tasks structure + * so we need some other way of telling a new secondary core + * where to place its SVC stack + */ +struct secondary_data secondary_data; + +/* * structures for inter-processor calls * - A collection of single bit ipi messages. */ @@ -71,6 +81,8 @@ static DEFINE_SPINLOCK(smp_call_function_lock); int __init __cpu_up(unsigned int cpu) { struct task_struct *idle; + pgd_t *pgd; + pmd_t *pmd; int ret; /* @@ -84,11 +96,57 @@ int __init __cpu_up(unsigned int cpu) } /* + * Allocate initial page tables to allow the new CPU to + * enable the MMU safely. This essentially means a set + * of our "standard" page tables, with the addition of + * a 1:1 mapping for the physical address of the kernel. + */ + pgd = pgd_alloc(&init_mm); + pmd = pmd_offset(pgd, PHYS_OFFSET); + *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | + PMD_TYPE_SECT | PMD_SECT_AP_WRITE); + + /* + * We need to tell the secondary core where to find + * its stack and the page tables. + */ + secondary_data.stack = (void *)idle->thread_info + THREAD_SIZE - 8; + secondary_data.pgdir = virt_to_phys(pgd); + wmb(); + + /* * Now bring the CPU into our world. */ ret = boot_secondary(cpu, idle); + if (ret == 0) { + unsigned long timeout; + + /* + * CPU was successfully started, wait for it + * to come online or time out. + */ + timeout = jiffies + HZ; + while (time_before(jiffies, timeout)) { + if (cpu_online(cpu)) + break; + + udelay(10); + barrier(); + } + + if (!cpu_online(cpu)) + ret = -EIO; + } + + secondary_data.stack = 0; + secondary_data.pgdir = 0; + + *pmd_offset(pgd, PHYS_OFFSET) = __pmd(0); + pgd_free(pgd); + if (ret) { - printk(KERN_CRIT "cpu_up: processor %d failed to boot\n", cpu); + printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu); + /* * FIXME: We need to clean up the new idle thread. --rmk */ @@ -98,6 +156,56 @@ int __init __cpu_up(unsigned int cpu) } /* + * This is the secondary CPU boot entry. We're using this CPUs + * idle thread stack, but a set of temporary page tables. + */ +asmlinkage void __init secondary_start_kernel(void) +{ + struct mm_struct *mm = &init_mm; + unsigned int cpu = smp_processor_id(); + + printk("CPU%u: Booted secondary processor\n", cpu); + + /* + * All kernel threads share the same mm context; grab a + * reference and switch to it. + */ + atomic_inc(&mm->mm_users); + atomic_inc(&mm->mm_count); + current->active_mm = mm; + cpu_set(cpu, mm->cpu_vm_mask); + cpu_switch_mm(mm->pgd, mm); + enter_lazy_tlb(mm, current); + + cpu_init(); + + /* + * Give the platform a chance to do its own initialisation. + */ + platform_secondary_init(cpu); + + /* + * Enable local interrupts. + */ + local_irq_enable(); + local_fiq_enable(); + + calibrate_delay(); + + smp_store_cpu_info(cpu); + + /* + * OK, now it's safe to let the boot CPU continue + */ + cpu_set(cpu, cpu_online_map); + + /* + * OK, it's off to the idle thread for us + */ + cpu_idle(); +} + +/* * Called by both boot and secondaries to move global data into * per-processor storage. */ diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index c232f24f4a6..06054c9ba07 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -381,6 +381,95 @@ static struct sysdev_class timer_sysclass = { .resume = timer_resume, }; +#ifdef CONFIG_NO_IDLE_HZ +static int timer_dyn_tick_enable(void) +{ + struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick; + unsigned long flags; + int ret = -ENODEV; + + if (dyn_tick) { + write_seqlock_irqsave(&xtime_lock, flags); + ret = 0; + if (!(dyn_tick->state & DYN_TICK_ENABLED)) { + ret = dyn_tick->enable(); + + if (ret == 0) + dyn_tick->state |= DYN_TICK_ENABLED; + } + write_sequnlock_irqrestore(&xtime_lock, flags); + } + + return ret; +} + +static int timer_dyn_tick_disable(void) +{ + struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick; + unsigned long flags; + int ret = -ENODEV; + + if (dyn_tick) { + write_seqlock_irqsave(&xtime_lock, flags); + ret = 0; + if (dyn_tick->state & DYN_TICK_ENABLED) { + ret = dyn_tick->disable(); + + if (ret == 0) + dyn_tick->state &= ~DYN_TICK_ENABLED; + } + write_sequnlock_irqrestore(&xtime_lock, flags); + } + + return ret; +} + +void timer_dyn_reprogram(void) +{ + struct dyn_tick_timer *dyn_tick = system_timer->dyn_tick; + unsigned long flags; + + write_seqlock_irqsave(&xtime_lock, flags); + if (dyn_tick->state & DYN_TICK_ENABLED) + dyn_tick->reprogram(next_timer_interrupt() - jiffies); + write_sequnlock_irqrestore(&xtime_lock, flags); +} + +static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf) +{ + return sprintf(buf, "%i\n", + (system_timer->dyn_tick->state & DYN_TICK_ENABLED) >> 1); +} + +static ssize_t timer_set_dyn_tick(struct sys_device *dev, const char *buf, + size_t count) +{ + unsigned int enable = simple_strtoul(buf, NULL, 2); + + if (enable) + timer_dyn_tick_enable(); + else + timer_dyn_tick_disable(); + + return count; +} +static SYSDEV_ATTR(dyn_tick, 0644, timer_show_dyn_tick, timer_set_dyn_tick); + +/* + * dyntick=enable|disable + */ +static char dyntick_str[4] __initdata = ""; + +static int __init dyntick_setup(char *str) +{ + if (str) + strlcpy(dyntick_str, str, sizeof(dyntick_str)); + return 1; +} + +__setup("dyntick=", dyntick_setup); +#endif + static int __init timer_init_sysfs(void) { int ret = sysdev_class_register(&timer_sysclass); @@ -388,6 +477,20 @@ static int __init timer_init_sysfs(void) system_timer->dev.cls = &timer_sysclass; ret = sysdev_register(&system_timer->dev); } + +#ifdef CONFIG_NO_IDLE_HZ + if (ret == 0 && system_timer->dyn_tick) { + ret = sysdev_create_file(&system_timer->dev, &attr_dyn_tick); + + /* + * Turn on dynamic tick after calibrate delay + * for correct bogomips + */ + if (ret == 0 && dyntick_str[0] == 'e') + ret = timer_dyn_tick_enable(); + } +#endif + return ret; } diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 14df16b983f..2fb0a4cfb37 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -30,6 +30,7 @@ #include <asm/traps.h> #include "ptrace.h" +#include "signal.h" const char *processor_modes[]= { "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" , @@ -464,6 +465,55 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) #endif return 0; +#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG + /* + * Atomically store r1 in *r2 if *r2 is equal to r0 for user space. + * Return zero in r0 if *MEM was changed or non-zero if no exchange + * happened. Also set the user C flag accordingly. + * If access permissions have to be fixed up then non-zero is + * returned and the operation has to be re-attempted. + * + * *NOTE*: This is a ghost syscall private to the kernel. Only the + * __kuser_cmpxchg code in entry-armv.S should be aware of its + * existence. Don't ever use this from user code. + */ + case 0xfff0: + { + extern void do_DataAbort(unsigned long addr, unsigned int fsr, + struct pt_regs *regs); + unsigned long val; + unsigned long addr = regs->ARM_r2; + struct mm_struct *mm = current->mm; + pgd_t *pgd; pmd_t *pmd; pte_t *pte; + + regs->ARM_cpsr &= ~PSR_C_BIT; + spin_lock(&mm->page_table_lock); + pgd = pgd_offset(mm, addr); + if (!pgd_present(*pgd)) + goto bad_access; + pmd = pmd_offset(pgd, addr); + if (!pmd_present(*pmd)) + goto bad_access; + pte = pte_offset_map(pmd, addr); + if (!pte_present(*pte) || !pte_write(*pte)) + goto bad_access; + val = *(unsigned long *)addr; + val -= regs->ARM_r0; + if (val == 0) { + *(unsigned long *)addr = regs->ARM_r1; + regs->ARM_cpsr |= PSR_C_BIT; + } + spin_unlock(&mm->page_table_lock); + return val; + + bad_access: + spin_unlock(&mm->page_table_lock); + /* simulate a read access fault */ + do_DataAbort(addr, 15 + (1 << 11), regs); + return -1; + } +#endif + default: /* Calls 9f00xx..9f07ff are defined to return -ENOSYS if not implemented, rather than raising SIGILL. This @@ -634,6 +684,14 @@ void __init trap_init(void) memcpy((void *)0xffff0000, __vectors_start, __vectors_end - __vectors_start); memcpy((void *)0xffff0200, __stubs_start, __stubs_end - __stubs_start); memcpy((void *)0xffff1000 - kuser_sz, __kuser_helper_start, kuser_sz); + + /* + * Copy signal return handlers into the vector page, and + * set sigreturn to be a pointer to these. + */ + memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes, + sizeof(sigreturn_codes)); + flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE); modify_domain(DOMAIN_USER, DOMAIN_CLIENT); } |