aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc64/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@hutch.davemloft.net>2007-06-04 17:01:39 -0700
committerDavid S. Miller <davem@sunset.davemloft.net>2007-06-04 21:50:00 -0700
commitf78eae2e6f5d1eb05f76a45486286445b916bd92 (patch)
tree0fa81e104ad9891afcaf18cdcb413c4a0f2ee8da /arch/sparc64/kernel
parentd887ab3a9b1899f88b8cfba531e726b5fb2ebd14 (diff)
[SPARC64]: Proper multi-core scheduling support.
The scheduling domain hierarchy is: all cpus --> cpus that share an instruction cache --> cpus that share an integer execution unit Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r--arch/sparc64/kernel/mdesc.c49
-rw-r--r--arch/sparc64/kernel/prom.c1
-rw-r--r--arch/sparc64/kernel/smp.c19
3 files changed, 68 insertions, 1 deletions
diff --git a/arch/sparc64/kernel/mdesc.c b/arch/sparc64/kernel/mdesc.c
index 9246c2cf957..1b5db4bc6b3 100644
--- a/arch/sparc64/kernel/mdesc.c
+++ b/arch/sparc64/kernel/mdesc.c
@@ -473,6 +473,53 @@ static void __init set_core_ids(void)
}
}
+static void __init mark_proc_ids(struct mdesc_node *mp, int proc_id)
+{
+ int i;
+
+ for (i = 0; i < mp->num_arcs; i++) {
+ struct mdesc_node *t = mp->arcs[i].arc;
+ const u64 *id;
+
+ if (strcmp(mp->arcs[i].name, "back"))
+ continue;
+
+ if (strcmp(t->name, "cpu"))
+ continue;
+
+ id = md_get_property(t, "id", NULL);
+ if (*id < NR_CPUS)
+ cpu_data(*id).proc_id = proc_id;
+ }
+}
+
+static void __init __set_proc_ids(const char *exec_unit_name)
+{
+ struct mdesc_node *mp;
+ int idx;
+
+ idx = 0;
+ md_for_each_node_by_name(mp, exec_unit_name) {
+ const char *type;
+ int len;
+
+ type = md_get_property(mp, "type", &len);
+ if (!find_in_proplist(type, "int", len) &&
+ !find_in_proplist(type, "integer", len))
+ continue;
+
+ mark_proc_ids(mp, idx);
+
+ idx++;
+ }
+}
+
+static void __init set_proc_ids(void)
+{
+ __set_proc_ids("exec_unit");
+ __set_proc_ids("exec-unit");
+}
+
static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def)
{
u64 val;
@@ -574,9 +621,11 @@ static void __init mdesc_fill_in_cpu_data(void)
#endif
c->core_id = 0;
+ c->proc_id = -1;
}
set_core_ids();
+ set_proc_ids();
smp_fill_in_sib_core_maps();
}
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
index dad4b3ba705..928aba3d0db 100644
--- a/arch/sparc64/kernel/prom.c
+++ b/arch/sparc64/kernel/prom.c
@@ -1800,6 +1800,7 @@ static void __init of_fill_in_cpu_data(void)
cpu_data(cpuid).core_id = 0;
}
+ cpu_data(cpuid).proc_id = -1;
#ifdef CONFIG_SMP
cpu_set(cpuid, cpu_present_map);
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index c550bba3490..68a45ac9337 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -51,6 +51,8 @@ cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
+ { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
static cpumask_t smp_commenced_mask;
static cpumask_t cpu_callout_map;
@@ -1217,13 +1219,28 @@ void __devinit smp_fill_in_sib_core_maps(void)
unsigned int j;
if (cpu_data(i).core_id == 0) {
- cpu_set(i, cpu_sibling_map[i]);
+ cpu_set(i, cpu_core_map[i]);
continue;
}
for_each_possible_cpu(j) {
if (cpu_data(i).core_id ==
cpu_data(j).core_id)
+ cpu_set(j, cpu_core_map[i]);
+ }
+ }
+
+ for_each_possible_cpu(i) {
+ unsigned int j;
+
+ if (cpu_data(i).proc_id == -1) {
+ cpu_set(i, cpu_sibling_map[i]);
+ continue;
+ }
+
+ for_each_possible_cpu(j) {
+ if (cpu_data(i).proc_id ==
+ cpu_data(j).proc_id)
cpu_set(j, cpu_sibling_map[i]);
}
}