aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-04 03:10:53 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 01:11:40 -0800
commita43fe0e789f5445f5224511034f410adf11f153b (patch)
treecface7b6e616be616899da8c0762f904263c5985
parent1633a53c79498455b16d051451f4e3f83ab4e7dd (diff)
[SPARC64]: Add some hypervisor tlb_type checks.
And more consistently check cheetah{,_plus} instead of assuming anything not spitfire is cheetah{,_plus}. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc64/kernel/smp.c32
-rw-r--r--arch/sparc64/mm/init.c6
2 files changed, 30 insertions, 8 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 16b8eca9754..aba0f886b05 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -528,6 +528,11 @@ retry:
}
}
+static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
+{
+ /* XXX implement me */
+}
+
/* Send cross call to all processors mentioned in MASK
* except self.
*/
@@ -541,8 +546,10 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d
if (tlb_type == spitfire)
spitfire_xcall_deliver(data0, data1, data2, mask);
- else
+ else if (tlb_type == cheetah || tlb_type == cheetah_plus)
cheetah_xcall_deliver(data0, data1, data2, mask);
+ else
+ hypervisor_xcall_deliver(data0, data1, data2, mask);
/* NOTE: Caller runs local copy on master. */
put_cpu();
@@ -695,11 +702,17 @@ static __inline__ void __local_flush_dcache_page(struct page *page)
void smp_flush_dcache_page_impl(struct page *page, int cpu)
{
cpumask_t mask = cpumask_of_cpu(cpu);
- int this_cpu = get_cpu();
+ int this_cpu;
+
+ if (tlb_type == hypervisor)
+ return;
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes);
#endif
+
+ this_cpu = get_cpu();
+
if (cpu == this_cpu) {
__local_flush_dcache_page(page);
} else if (cpu_online(cpu)) {
@@ -715,7 +728,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
__pa(pg_addr),
(u64) pg_addr,
mask);
- } else {
+ } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
#ifdef DCACHE_ALIASING_POSSIBLE
data0 =
((u64)&xcall_flush_dcache_page_cheetah);
@@ -737,7 +750,12 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
void *pg_addr = page_address(page);
cpumask_t mask = cpu_online_map;
u64 data0;
- int this_cpu = get_cpu();
+ int this_cpu;
+
+ if (tlb_type == hypervisor)
+ return;
+
+ this_cpu = get_cpu();
cpu_clear(this_cpu, mask);
@@ -754,7 +772,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
__pa(pg_addr),
(u64) pg_addr,
mask);
- } else {
+ } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
#ifdef DCACHE_ALIASING_POSSIBLE
data0 = ((u64)&xcall_flush_dcache_page_cheetah);
cheetah_xcall_deliver(data0,
@@ -780,8 +798,10 @@ void smp_receive_signal(int cpu)
if (tlb_type == spitfire)
spitfire_xcall_deliver(data0, 0, 0, mask);
- else
+ else if (tlb_type == cheetah || tlb_type == cheetah_plus)
cheetah_xcall_deliver(data0, 0, 0, mask);
+ else if (tlb_type == hypervisor)
+ hypervisor_xcall_deliver(data0, 0, 0, mask);
}
}
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 1af63307b24..ab50cd9618f 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -335,7 +335,7 @@ out:
void __kprobes flush_icache_range(unsigned long start, unsigned long end)
{
- /* Cheetah has coherent I-cache. */
+ /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
if (tlb_type == spitfire) {
unsigned long kaddr;
@@ -372,6 +372,8 @@ void mmu_info(struct seq_file *m)
seq_printf(m, "MMU Type\t: Cheetah+\n");
else if (tlb_type == spitfire)
seq_printf(m, "MMU Type\t: Spitfire\n");
+ else if (tlb_type == hypervisor)
+ seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
else
seq_printf(m, "MMU Type\t: ???\n");
@@ -581,7 +583,7 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
if (++n >= 512)
break;
}
- } else {
+ } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
start = __pa(start);
end = __pa(end);
for (va = start; va < end; va += 32)