diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/include/asm/exception.h | 42 | ||||
-rw-r--r-- | arch/powerpc/include/asm/paca.h | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 71 | ||||
-rw-r--r-- | arch/powerpc/kernel/paca.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom_init.c | 8 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 9 |
7 files changed, 72 insertions, 64 deletions
diff --git a/arch/powerpc/include/asm/exception.h b/arch/powerpc/include/asm/exception.h index 329148b5acc..d3d4534e3c7 100644 --- a/arch/powerpc/include/asm/exception.h +++ b/arch/powerpc/include/asm/exception.h @@ -53,14 +53,8 @@ * low halfword of the address, but for Kdump we need the whole low * word. */ -#ifdef CONFIG_CRASH_DUMP #define LOAD_HANDLER(reg, label) \ - oris reg,reg,(label)@h; /* virt addr of handler ... */ \ - ori reg,reg,(label)@l; /* .. and the rest */ -#else -#define LOAD_HANDLER(reg, label) \ - ori reg,reg,(label)@l; /* virt addr of handler ... */ -#endif + addi reg,reg,(label)-_stext; /* virt addr of handler ... */ #define EXCEPTION_PROLOG_1(area) \ mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ @@ -72,37 +66,12 @@ std r9,area+EX_R13(r13); \ mfcr r9 -/* - * Equal to EXCEPTION_PROLOG_PSERIES, except that it forces 64bit mode. - * The firmware calls the registered system_reset_fwnmi and - * machine_check_fwnmi handlers in 32bit mode if the cpu happens to run - * a 32bit application at the time of the event. - * This firmware bug is present on POWER4 and JS20. - */ -#define EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(area, label) \ - EXCEPTION_PROLOG_1(area); \ - clrrdi r12,r13,32; /* get high part of &label */ \ - mfmsr r10; \ - /* force 64bit mode */ \ - li r11,5; /* MSR_SF_LG|MSR_ISF_LG */ \ - rldimi r10,r11,61,0; /* insert into top 3 bits */ \ - /* done 64bit mode */ \ - mfspr r11,SPRN_SRR0; /* save SRR0 */ \ - LOAD_HANDLER(r12,label) \ - ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ - mtspr SPRN_SRR0,r12; \ - mfspr r12,SPRN_SRR1; /* and SRR1 */ \ - mtspr SPRN_SRR1,r10; \ - rfid; \ - b . /* prevent speculative execution */ - #define EXCEPTION_PROLOG_PSERIES(area, label) \ EXCEPTION_PROLOG_1(area); \ - clrrdi r12,r13,32; /* get high part of &label */ \ - mfmsr r10; \ + ld r12,PACAKBASE(r13); /* get high part of &label */ \ + ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ mfspr r11,SPRN_SRR0; /* save SRR0 */ \ LOAD_HANDLER(r12,label) \ - ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ mtspr SPRN_SRR0,r12; \ mfspr r12,SPRN_SRR1; /* and SRR1 */ \ mtspr SPRN_SRR1,r10; \ @@ -210,11 +179,10 @@ label##_pSeries: \ std r10,PACA_EXGEN+EX_R13(r13); \ std r11,PACA_EXGEN+EX_R11(r13); \ std r12,PACA_EXGEN+EX_R12(r13); \ - clrrdi r12,r13,32; /* get high part of &label */ \ - mfmsr r10; \ + ld r12,PACAKBASE(r13); /* get high part of &label */ \ + ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ mfspr r11,SPRN_SRR0; /* save SRR0 */ \ LOAD_HANDLER(r12,label##_common) \ - ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ mtspr SPRN_SRR0,r12; \ mfspr r12,SPRN_SRR1; /* and SRR1 */ \ mtspr SPRN_SRR1,r10; \ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 6493a395508..082b3aedf14 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -62,6 +62,8 @@ struct paca_struct { u16 paca_index; /* Logical processor number */ u64 kernel_toc; /* Kernel TOC address */ + u64 kernelbase; /* Base address of kernel */ + u64 kernel_msr; /* MSR while running in kernel */ u64 stab_real; /* Absolute address of segment table */ u64 stab_addr; /* Virtual address of segment table */ void *emergency_sp; /* pointer to emergency stack */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 92768d3006f..e9c4044012b 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -122,6 +122,8 @@ int main(void) DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); + DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase)); + DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 27935d1ab6a..97bb6e6f67b 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -82,7 +82,11 @@ END_FTR_SECTION(0, 1) /* Catch branch to 0 in real mode */ trap - /* Secondary processors spin on this value until it goes to 1. */ + /* Secondary processors spin on this value until it becomes nonzero. + * When it does it contains the real address of the descriptor + * of the function that the cpu should jump to to continue + * initialization. + */ .globl __secondary_hold_spinloop __secondary_hold_spinloop: .llong 0x0 @@ -109,8 +113,11 @@ __secondary_hold_acknowledge: * before the bulk of the kernel has been relocated. This code * is relocated to physical address 0x60 before prom_init is run. * All of it must fit below the first exception vector at 0x100. + * Use .globl here not _GLOBAL because we want __secondary_hold + * to be the actual text address, not a descriptor. */ -_GLOBAL(__secondary_hold) + .globl __secondary_hold +__secondary_hold: mfmsr r24 ori r24,r24,MSR_RI mtmsrd r24 /* RI on */ @@ -126,11 +133,11 @@ _GLOBAL(__secondary_hold) /* All secondary cpus wait here until told to start. */ 100: ld r4,__secondary_hold_spinloop@l(0) - cmpdi 0,r4,1 - bne 100b + cmpdi 0,r4,0 + beq 100b #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) - LOAD_REG_IMMEDIATE(r4, .generic_secondary_smp_init) + ld r4,0(r4) /* deref function descriptor */ mtctr r4 mr r3,r24 bctr @@ -147,6 +154,10 @@ exception_marker: /* * This is the start of the interrupt handlers for pSeries * This code runs with relocation off. + * Code from here to __end_interrupts gets copied down to real + * address 0x100 when we are running a relocatable kernel. + * Therefore any relative branches in this section must only + * branch to labels in this section. */ . = 0x100 .globl __start_interrupts @@ -200,7 +211,20 @@ data_access_slb_pSeries: mfspr r10,SPRN_SPRG1 std r10,PACA_EXSLB+EX_R13(r13) mfspr r12,SPRN_SRR1 /* and SRR1 */ - b .slb_miss_realmode /* Rel. branch works in real mode */ +#ifndef CONFIG_RELOCATABLE + b .slb_miss_realmode +#else + /* + * We can't just use a direct branch to .slb_miss_realmode + * because the distance from here to there depends on where + * the kernel ends up being put. + */ + mfctr r11 + ld r10,PACAKBASE(r13) + LOAD_HANDLER(r10, .slb_miss_realmode) + mtctr r10 + bctr +#endif STD_EXCEPTION_PSERIES(0x400, instruction_access) @@ -225,7 +249,15 @@ instruction_access_slb_pSeries: mfspr r10,SPRN_SPRG1 std r10,PACA_EXSLB+EX_R13(r13) mfspr r12,SPRN_SRR1 /* and SRR1 */ - b .slb_miss_realmode /* Rel. branch works in real mode */ +#ifndef CONFIG_RELOCATABLE + b .slb_miss_realmode +#else + mfctr r11 + ld r10,PACAKBASE(r13) + LOAD_HANDLER(r10, .slb_miss_realmode) + mtctr r10 + bctr +#endif MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) STD_EXCEPTION_PSERIES(0x600, alignment) @@ -244,14 +276,12 @@ BEGIN_FTR_SECTION beq- 1f END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) mr r9,r13 - mfmsr r10 mfspr r13,SPRN_SPRG3 mfspr r11,SPRN_SRR0 - clrrdi r12,r13,32 - oris r12,r12,system_call_common@h - ori r12,r12,system_call_common@l + ld r12,PACAKBASE(r13) + ld r10,PACAKMSR(r13) + LOAD_HANDLER(r12, system_call_entry) mtspr SPRN_SRR0,r12 - ori r10,r10,MSR_IR|MSR_DR|MSR_RI mfspr r12,SPRN_SRR1 mtspr SPRN_SRR1,r10 rfid @@ -379,7 +409,10 @@ __end_interrupts: /* * Code from here down to __end_handlers is invoked from the - * exception prologs above. + * exception prologs above. Because the prologs assemble the + * addresses of these handlers using the LOAD_HANDLER macro, + * which uses an addi instruction, these handlers must be in + * the first 32k of the kernel image. */ /*** Common interrupt handlers ***/ @@ -419,6 +452,10 @@ machine_check_common: STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) #endif /* CONFIG_CBE_RAS */ + .align 7 +system_call_entry: + b system_call_common + /* * Here we have detected that the kernel stack pointer is bad. * R9 contains the saved CR, r13 points to the paca, @@ -562,6 +599,9 @@ unrecov_user_slb: */ _GLOBAL(slb_miss_realmode) mflr r10 +#ifdef CONFIG_RELOCATABLE + mtctr r11 +#endif stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ @@ -612,11 +652,10 @@ BEGIN_FW_FTR_SECTION END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) #endif /* CONFIG_PPC_ISERIES */ mfspr r11,SPRN_SRR0 - clrrdi r10,r13,32 + ld r10,PACAKBASE(r13) LOAD_HANDLER(r10,unrecov_slb) mtspr SPRN_SRR0,r10 - mfmsr r10 - ori r10,r10,MSR_IR|MSR_DR|MSR_RI + ld r10,PACAKMSR(r13) mtspr SPRN_SRR1,r10 rfid b . diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index c9bf17eec31..623e8c3c57f 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -79,6 +79,8 @@ void __init initialise_pacas(void) new_paca->lock_token = 0x8000; new_paca->paca_index = cpu; new_paca->kernel_toc = kernel_toc; + new_paca->kernelbase = KERNELBASE; + new_paca->kernel_msr = MSR_KERNEL; new_paca->hw_cpu_id = 0xffff; new_paca->slb_shadow_ptr = &slb_shadow[cpu]; new_paca->__current = &init_task; diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index b72849ac7db..1f898858505 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1321,7 +1321,7 @@ static void __init prom_initialize_tce_table(void) * * -- Cort */ -extern void __secondary_hold(void); +extern char __secondary_hold; extern unsigned long __secondary_hold_spinloop; extern unsigned long __secondary_hold_acknowledge; @@ -1342,13 +1342,7 @@ static void __init prom_hold_cpus(void) = (void *) LOW_ADDR(__secondary_hold_spinloop); unsigned long *acknowledge = (void *) LOW_ADDR(__secondary_hold_acknowledge); -#ifdef CONFIG_PPC64 - /* __secondary_hold is actually a descriptor, not the text address */ - unsigned long secondary_hold - = __pa(*PTRRELOC((unsigned long *)__secondary_hold)); -#else unsigned long secondary_hold = LOW_ADDR(__secondary_hold); -#endif prom_debug("prom_hold_cpus: start...\n"); prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 8b25f51f03b..843c0af210d 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -255,9 +255,11 @@ void early_setup_secondary(void) #endif /* CONFIG_SMP */ #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) +extern unsigned long __secondary_hold_spinloop; +extern void generic_secondary_smp_init(void); + void smp_release_cpus(void) { - extern unsigned long __secondary_hold_spinloop; unsigned long *ptr; DBG(" -> smp_release_cpus()\n"); @@ -266,12 +268,11 @@ void smp_release_cpus(void) * all now so they can start to spin on their individual paca * spinloops. For non SMP kernels, the secondary cpus never get out * of the common spinloop. - * This is useless but harmless on iSeries, secondaries are already - * waiting on their paca spinloops. */ + */ ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop - PHYSICAL_START); - *ptr = 1; + *ptr = __pa(generic_secondary_smp_init); mb(); DBG(" <- smp_release_cpus()\n"); |