aboutsummaryrefslogtreecommitdiff
path: root/arch/ppc64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64')
-rw-r--r--arch/ppc64/kernel/head.S7
-rw-r--r--arch/ppc64/kernel/setup.c2
-rw-r--r--arch/ppc64/kernel/smp.c15
-rw-r--r--arch/ppc64/mm/stab.c35
4 files changed, 37 insertions, 22 deletions
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index 93ebcac0d5a..3f447712e3f 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -2131,13 +2131,6 @@ empty_zero_page:
swapper_pg_dir:
.space 4096
-#ifdef CONFIG_SMP
-/* 1 page segment table per cpu (max 48, cpu0 allocated at STAB0_PHYS_ADDR) */
- .globl stab_array
-stab_array:
- .space 4096 * 48
-#endif
-
/*
* This space gets a copy of optional info passed to us by the bootstrap
* Used to pass parameters into the kernel like root=/dev/sda1, etc.
diff --git a/arch/ppc64/kernel/setup.c b/arch/ppc64/kernel/setup.c
index e80f10c8982..687e8559520 100644
--- a/arch/ppc64/kernel/setup.c
+++ b/arch/ppc64/kernel/setup.c
@@ -1068,6 +1068,8 @@ void __init setup_arch(char **cmdline_p)
irqstack_early_init();
emergency_stack_init();
+ stabs_alloc();
+
/* set up the bootmem stuff with available memory */
do_init_bootmem();
sparse_init();
diff --git a/arch/ppc64/kernel/smp.c b/arch/ppc64/kernel/smp.c
index 2fcddfcb594..793b562da65 100644
--- a/arch/ppc64/kernel/smp.c
+++ b/arch/ppc64/kernel/smp.c
@@ -65,8 +65,6 @@ struct smp_ops_t *smp_ops;
static volatile unsigned int cpu_callin_map[NR_CPUS];
-extern unsigned char stab_array[];
-
void smp_call_function_interrupt(void);
int smt_enabled_at_boot = 1;
@@ -492,19 +490,6 @@ int __devinit __cpu_up(unsigned int cpu)
paca[cpu].default_decr = tb_ticks_per_jiffy;
- if (!cpu_has_feature(CPU_FTR_SLB)) {
- void *tmp;
-
- /* maximum of 48 CPUs on machines with a segment table */
- if (cpu >= 48)
- BUG();
-
- tmp = &stab_array[PAGE_SIZE * cpu];
- memset(tmp, 0, PAGE_SIZE);
- paca[cpu].stab_addr = (unsigned long)tmp;
- paca[cpu].stab_real = virt_to_abs(tmp);
- }
-
/* Make sure callin-map entry is 0 (can be leftover a CPU
* hotplug
*/
diff --git a/arch/ppc64/mm/stab.c b/arch/ppc64/mm/stab.c
index df4bbe14153..1b83f002bf2 100644
--- a/arch/ppc64/mm/stab.c
+++ b/arch/ppc64/mm/stab.c
@@ -18,6 +18,8 @@
#include <asm/mmu_context.h>
#include <asm/paca.h>
#include <asm/cputable.h>
+#include <asm/lmb.h>
+#include <asm/abs_addr.h>
struct stab_entry {
unsigned long esid_data;
@@ -224,6 +226,39 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
extern void slb_initialize(void);
/*
+ * Allocate segment tables for secondary CPUs. These must all go in
+ * the first (bolted) segment, so that do_stab_bolted won't get a
+ * recursive segment miss on the segment table itself.
+ */
+void stabs_alloc(void)
+{
+ int cpu;
+
+ if (cpu_has_feature(CPU_FTR_SLB))
+ return;
+
+ for_each_cpu(cpu) {
+ unsigned long newstab;
+
+ if (cpu == 0)
+ continue; /* stab for CPU 0 is statically allocated */
+
+ newstab = lmb_alloc_base(PAGE_SIZE, PAGE_SIZE, 1<<SID_SHIFT);
+ if (! newstab)
+ panic("Unable to allocate segment table for CPU %d.\n",
+ cpu);
+
+ newstab += KERNELBASE;
+
+ memset((void *)newstab, 0, PAGE_SIZE);
+
+ paca[cpu].stab_addr = newstab;
+ paca[cpu].stab_real = virt_to_abs(newstab);
+ printk(KERN_DEBUG "Segment table for CPU %d at 0x%lx virtual, 0x%lx absolute\n", cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
+ }
+}
+
+/*
* Build an entry for the base kernel segment and put it into
* the segment table or SLB. All other segment table or SLB
* entries are faulted in.