aboutsummaryrefslogtreecommitdiff
path: root/arch/ppc64/mm
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@shinybook.infradead.org>2005-08-09 16:51:35 +0100
committerDavid Woodhouse <dwmw2@shinybook.infradead.org>2005-08-09 16:51:35 +0100
commitc973b112c76c9d8fd042991128f218a738cc8d0a (patch)
treee813b0da5d0a0e19e06de6462d145a29ad683026 /arch/ppc64/mm
parentc5fbc3966f48279dbebfde10248c977014aa9988 (diff)
parent00dd1e433967872f3997a45d5adf35056fdf2f56 (diff)
Merge with /shiny/git/linux-2.6/.git
Diffstat (limited to 'arch/ppc64/mm')
-rw-r--r--arch/ppc64/mm/numa.c7
-rw-r--r--arch/ppc64/mm/stab.c35
2 files changed, 41 insertions, 1 deletions
diff --git a/arch/ppc64/mm/numa.c b/arch/ppc64/mm/numa.c
index cafd91aef28..0b191f2de01 100644
--- a/arch/ppc64/mm/numa.c
+++ b/arch/ppc64/mm/numa.c
@@ -647,7 +647,12 @@ void __init do_init_bootmem(void)
new_range:
mem_start = read_n_cells(addr_cells, &memcell_buf);
mem_size = read_n_cells(size_cells, &memcell_buf);
- numa_domain = numa_enabled ? of_node_numa_domain(memory) : 0;
+ if (numa_enabled) {
+ numa_domain = of_node_numa_domain(memory);
+ if (numa_domain >= MAX_NUMNODES)
+ numa_domain = 0;
+ } else
+ numa_domain = 0;
if (numa_domain != nid)
continue;
diff --git a/arch/ppc64/mm/stab.c b/arch/ppc64/mm/stab.c
index df4bbe14153..1b83f002bf2 100644
--- a/arch/ppc64/mm/stab.c
+++ b/arch/ppc64/mm/stab.c
@@ -18,6 +18,8 @@
#include <asm/mmu_context.h>
#include <asm/paca.h>
#include <asm/cputable.h>
+#include <asm/lmb.h>
+#include <asm/abs_addr.h>
struct stab_entry {
unsigned long esid_data;
@@ -224,6 +226,39 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
extern void slb_initialize(void);
/*
+ * Allocate segment tables for secondary CPUs. These must all go in
+ * the first (bolted) segment, so that do_stab_bolted won't get a
+ * recursive segment miss on the segment table itself.
+ */
+void stabs_alloc(void)
+{
+ int cpu;
+
+ if (cpu_has_feature(CPU_FTR_SLB))
+ return;
+
+ for_each_cpu(cpu) {
+ unsigned long newstab;
+
+ if (cpu == 0)
+ continue; /* stab for CPU 0 is statically allocated */
+
+ newstab = lmb_alloc_base(PAGE_SIZE, PAGE_SIZE, 1<<SID_SHIFT);
+ if (! newstab)
+ panic("Unable to allocate segment table for CPU %d.\n",
+ cpu);
+
+ newstab += KERNELBASE;
+
+ memset((void *)newstab, 0, PAGE_SIZE);
+
+ paca[cpu].stab_addr = newstab;
+ paca[cpu].stab_real = virt_to_abs(newstab);
+ printk(KERN_DEBUG "Segment table for CPU %d at 0x%lx virtual, 0x%lx absolute\n", cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
+ }
+}
+
+/*
* Build an entry for the base kernel segment and put it into
* the segment table or SLB. All other segment table or SLB
* entries are faulted in.