aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/defconfig12
-rw-r--r--arch/sparc64/kernel/cpu.c10
-rw-r--r--arch/sparc64/kernel/ds.c3
-rw-r--r--arch/sparc64/kernel/entry.S30
-rw-r--r--arch/sparc64/kernel/entry.h196
-rw-r--r--arch/sparc64/kernel/head.S8
-rw-r--r--arch/sparc64/kernel/iommu.c8
-rw-r--r--arch/sparc64/kernel/irq.c21
-rw-r--r--arch/sparc64/kernel/process.c3
-rw-r--r--arch/sparc64/kernel/ptrace.c20
-rw-r--r--arch/sparc64/kernel/setup.c5
-rw-r--r--arch/sparc64/kernel/signal.c3
-rw-r--r--arch/sparc64/kernel/smp.c20
-rw-r--r--arch/sparc64/kernel/stacktrace.c4
-rw-r--r--arch/sparc64/kernel/sys_sparc.c14
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c3
-rw-r--r--arch/sparc64/kernel/sys_sunos32.c2
-rw-r--r--arch/sparc64/kernel/systbls.h53
-rw-r--r--arch/sparc64/kernel/time.c66
-rw-r--r--arch/sparc64/kernel/trampoline.S188
-rw-r--r--arch/sparc64/kernel/traps.c49
-rw-r--r--arch/sparc64/mm/init.c43
-rw-r--r--arch/sparc64/mm/tlb.c3
23 files changed, 477 insertions, 287 deletions
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 250958d1e3c..9d4bd222949 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.25-rc1
-# Sun Feb 17 22:44:12 2008
+# Linux kernel version: 2.6.25-rc3
+# Wed Mar 26 04:33:35 2008
#
CONFIG_SPARC=y
CONFIG_SPARC64=y
@@ -55,9 +55,11 @@ CONFIG_POSIX_MQUEUE=y
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=18
# CONFIG_CGROUPS is not set
+CONFIG_GROUP_SCHED=y
CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_FAIR_USER_SCHED=y
-# CONFIG_FAIR_CGROUP_SCHED is not set
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_RELAY=y
CONFIG_NAMESPACES=y
@@ -482,6 +484,7 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_IPS is not set
# CONFIG_SCSI_INITIO is not set
# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_MVSAS is not set
# CONFIG_SCSI_STEX is not set
# CONFIG_SCSI_SYM53C8XX_2 is not set
# CONFIG_SCSI_QLOGIC_1280 is not set
@@ -810,6 +813,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_ADM1031 is not set
# CONFIG_SENSORS_ADM9240 is not set
# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
# CONFIG_SENSORS_ATXP1 is not set
# CONFIG_SENSORS_DS1621 is not set
# CONFIG_SENSORS_I5K_AMB is not set
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c
index dd5d28e3d79..0097c08dc60 100644
--- a/arch/sparc64/kernel/cpu.c
+++ b/arch/sparc64/kernel/cpu.c
@@ -15,6 +15,8 @@
#include <asm/spitfire.h>
#include <asm/oplib.h>
+#include "entry.h"
+
DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
struct cpu_iu_info {
@@ -65,8 +67,6 @@ static struct cpu_iu_info linux_sparc_chips[] = {
char *sparc_cpu_type;
char *sparc_fpu_type;
-unsigned int fsr_storage;
-
static void __init sun4v_cpu_probe(void)
{
switch (sun4v_chip_type) {
@@ -94,8 +94,10 @@ void __init cpu_probe(void)
unsigned long ver, fpu_vers, manuf, impl, fprs;
int i;
- if (tlb_type == hypervisor)
- return sun4v_cpu_probe();
+ if (tlb_type == hypervisor) {
+ sun4v_cpu_probe();
+ return;
+ }
fprs = fprs_read();
fprs_write(FPRS_FEF);
diff --git a/arch/sparc64/kernel/ds.c b/arch/sparc64/kernel/ds.c
index bd76482077b..edb74f5a118 100644
--- a/arch/sparc64/kernel/ds.c
+++ b/arch/sparc64/kernel/ds.c
@@ -972,8 +972,7 @@ static void process_ds_work(void)
LIST_HEAD(todo);
spin_lock_irqsave(&ds_lock, flags);
- list_splice(&ds_work_list, &todo);
- INIT_LIST_HEAD(&ds_work_list);
+ list_splice_init(&ds_work_list, &todo);
spin_unlock_irqrestore(&ds_lock, flags);
list_for_each_entry_safe(qp, tmp, &todo, list) {
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 6be4d2d2904..49eca4b1cf2 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -1705,6 +1705,36 @@ __flushw_user:
2: retl
nop
+ /* Flush %fp and %i7 to the stack for all register
+ * windows active inside of the cpu. This allows
+ * show_stack_trace() to avoid using an expensive
+ * 'flushw'.
+ */
+ .globl stack_trace_flush
+ .type stack_trace_flush,#function
+stack_trace_flush:
+ rdpr %pstate, %o0
+ wrpr %o0, PSTATE_IE, %pstate
+
+ rdpr %cwp, %g1
+ rdpr %canrestore, %g2
+ sub %g1, 1, %g3
+
+1: brz,pn %g2, 2f
+ sub %g2, 1, %g2
+ wrpr %g3, %cwp
+ stx %fp, [%sp + STACK_BIAS + RW_V9_I6]
+ stx %i7, [%sp + STACK_BIAS + RW_V9_I7]
+ ba,pt %xcc, 1b
+ sub %g3, 1, %g3
+
+2: wrpr %g1, %cwp
+ wrpr %o0, %pstate
+
+ retl
+ nop
+ .size stack_trace_flush,.-stack_trace_flush
+
#ifdef CONFIG_SMP
.globl hard_smp_processor_id
hard_smp_processor_id:
diff --git a/arch/sparc64/kernel/entry.h b/arch/sparc64/kernel/entry.h
new file mode 100644
index 00000000000..4a91e9c6d31
--- /dev/null
+++ b/arch/sparc64/kernel/entry.h
@@ -0,0 +1,196 @@
+#ifndef _ENTRY_H
+#define _ENTRY_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+
+extern char *sparc_cpu_type;
+extern char *sparc_fpu_type;
+
+extern void __init per_cpu_patch(void);
+extern void __init sun4v_patch(void);
+extern void __init boot_cpu_id_too_large(int cpu);
+extern unsigned int dcache_parity_tl1_occurred;
+extern unsigned int icache_parity_tl1_occurred;
+
+extern asmlinkage void update_perfctrs(void);
+extern asmlinkage void sparc_breakpoint(struct pt_regs *regs);
+extern void timer_interrupt(int irq, struct pt_regs *regs);
+
+extern void do_notify_resume(struct pt_regs *regs,
+ unsigned long orig_i0,
+ int restart_syscall,
+ unsigned long thread_info_flags);
+
+extern asmlinkage void syscall_trace(struct pt_regs *regs,
+ int syscall_exit_p);
+
+extern void bad_trap_tl1(struct pt_regs *regs, long lvl);
+
+extern void do_fpe_common(struct pt_regs *regs);
+extern void do_fpieee(struct pt_regs *regs);
+extern void do_fpother(struct pt_regs *regs);
+extern void do_tof(struct pt_regs *regs);
+extern void do_div0(struct pt_regs *regs);
+extern void do_illegal_instruction(struct pt_regs *regs);
+extern void mem_address_unaligned(struct pt_regs *regs,
+ unsigned long sfar,
+ unsigned long sfsr);
+extern void sun4v_do_mna(struct pt_regs *regs,
+ unsigned long addr,
+ unsigned long type_ctx);
+extern void do_privop(struct pt_regs *regs);
+extern void do_privact(struct pt_regs *regs);
+extern void do_cee(struct pt_regs *regs);
+extern void do_cee_tl1(struct pt_regs *regs);
+extern void do_dae_tl1(struct pt_regs *regs);
+extern void do_iae_tl1(struct pt_regs *regs);
+extern void do_div0_tl1(struct pt_regs *regs);
+extern void do_fpdis_tl1(struct pt_regs *regs);
+extern void do_fpieee_tl1(struct pt_regs *regs);
+extern void do_fpother_tl1(struct pt_regs *regs);
+extern void do_ill_tl1(struct pt_regs *regs);
+extern void do_irq_tl1(struct pt_regs *regs);
+extern void do_lddfmna_tl1(struct pt_regs *regs);
+extern void do_stdfmna_tl1(struct pt_regs *regs);
+extern void do_paw(struct pt_regs *regs);
+extern void do_paw_tl1(struct pt_regs *regs);
+extern void do_vaw(struct pt_regs *regs);
+extern void do_vaw_tl1(struct pt_regs *regs);
+extern void do_tof_tl1(struct pt_regs *regs);
+extern void do_getpsr(struct pt_regs *regs);
+
+extern void spitfire_insn_access_exception(struct pt_regs *regs,
+ unsigned long sfsr,
+ unsigned long sfar);
+extern void spitfire_insn_access_exception_tl1(struct pt_regs *regs,
+ unsigned long sfsr,
+ unsigned long sfar);
+extern void spitfire_data_access_exception(struct pt_regs *regs,
+ unsigned long sfsr,
+ unsigned long sfar);
+extern void spitfire_data_access_exception_tl1(struct pt_regs *regs,
+ unsigned long sfsr,
+ unsigned long sfar);
+extern void spitfire_access_error(struct pt_regs *regs,
+ unsigned long status_encoded,
+ unsigned long afar);
+
+extern void cheetah_fecc_handler(struct pt_regs *regs,
+ unsigned long afsr,
+ unsigned long afar);
+extern void cheetah_cee_handler(struct pt_regs *regs,
+ unsigned long afsr,
+ unsigned long afar);
+extern void cheetah_deferred_handler(struct pt_regs *regs,
+ unsigned long afsr,
+ unsigned long afar);
+extern void cheetah_plus_parity_error(int type, struct pt_regs *regs);
+
+extern void sun4v_insn_access_exception(struct pt_regs *regs,
+ unsigned long addr,
+ unsigned long type_ctx);
+extern void sun4v_insn_access_exception_tl1(struct pt_regs *regs,
+ unsigned long addr,
+ unsigned long type_ctx);
+extern void sun4v_data_access_exception(struct pt_regs *regs,
+ unsigned long addr,
+ unsigned long type_ctx);
+extern void sun4v_data_access_exception_tl1(struct pt_regs *regs,
+ unsigned long addr,
+ unsigned long type_ctx);
+extern void sun4v_resum_error(struct pt_regs *regs,
+ unsigned long offset);
+extern void sun4v_resum_overflow(struct pt_regs *regs);
+extern void sun4v_nonresum_error(struct pt_regs *regs,
+ unsigned long offset);
+extern void sun4v_nonresum_overflow(struct pt_regs *regs);
+
+extern unsigned long sun4v_err_itlb_vaddr;
+extern unsigned long sun4v_err_itlb_ctx;
+extern unsigned long sun4v_err_itlb_pte;
+extern unsigned long sun4v_err_itlb_error;
+
+extern void sun4v_itlb_error_report(struct pt_regs *regs, int tl);
+
+extern unsigned long sun4v_err_dtlb_vaddr;
+extern unsigned long sun4v_err_dtlb_ctx;
+extern unsigned long sun4v_err_dtlb_pte;
+extern unsigned long sun4v_err_dtlb_error;
+
+extern void sun4v_dtlb_error_report(struct pt_regs *regs, int tl);
+extern void hypervisor_tlbop_error(unsigned long err,
+ unsigned long op);
+extern void hypervisor_tlbop_error_xcall(unsigned long err,
+ unsigned long op);
+
+/* WARNING: The error trap handlers in assembly know the precise
+ * layout of the following structure.
+ *
+ * C-level handlers in traps.c use this information to log the
+ * error and then determine how to recover (if possible).
+ */
+struct cheetah_err_info {
+/*0x00*/u64 afsr;
+/*0x08*/u64 afar;
+
+ /* D-cache state */
+/*0x10*/u64 dcache_data[4]; /* The actual data */
+/*0x30*/u64 dcache_index; /* D-cache index */
+/*0x38*/u64 dcache_tag; /* D-cache tag/valid */
+/*0x40*/u64 dcache_utag; /* D-cache microtag */
+/*0x48*/u64 dcache_stag; /* D-cache snooptag */
+
+ /* I-cache state */
+/*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
+/*0x90*/u64 icache_index; /* I-cache index */
+/*0x98*/u64 icache_tag; /* I-cache phys tag */
+/*0xa0*/u64 icache_utag; /* I-cache microtag */
+/*0xa8*/u64 icache_stag; /* I-cache snooptag */
+/*0xb0*/u64 icache_upper; /* I-cache upper-tag */
+/*0xb8*/u64 icache_lower; /* I-cache lower-tag */
+
+ /* E-cache state */
+/*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
+/*0xe0*/u64 ecache_index; /* E-cache index */
+/*0xe8*/u64 ecache_tag; /* E-cache tag/state */
+
+/*0xf0*/u64 __pad[32 - 30];
+};
+#define CHAFSR_INVALID ((u64)-1L)
+
+/* This is allocated at boot time based upon the largest hardware
+ * cpu ID in the system. We allocate two entries per cpu, one for
+ * TL==0 logging and one for TL >= 1 logging.
+ */
+extern struct cheetah_err_info *cheetah_error_log;
+
+/* UPA nodes send interrupt packet to UltraSparc with first data reg
+ * value low 5 (7 on Starfire) bits holding the IRQ identifier being
+ * delivered. We must translate this into a non-vector IRQ so we can
+ * set the softint on this cpu.
+ *
+ * To make processing these packets efficient and race free we use
+ * an array of irq buckets below. The interrupt vector handler in
+ * entry.S feeds incoming packets into per-cpu pil-indexed lists.
+ *
+ * If you make changes to ino_bucket, please update hand coded assembler
+ * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
+ */
+struct ino_bucket {
+/*0x00*/unsigned long __irq_chain_pa;
+
+ /* Virtual interrupt number assigned to this INO. */
+/*0x08*/unsigned int __virt_irq;
+/*0x0c*/unsigned int __pad;
+};
+
+extern struct ino_bucket *ivector_table;
+extern unsigned long ivector_table_pa;
+
+extern void handler_irq(int irq, struct pt_regs *regs);
+extern void init_irqwork_curcpu(void);
+extern void __cpuinit sun4v_register_mondo_queues(int this_cpu);
+
+#endif /* _ENTRY_H */
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 44b105c04dd..34f8ff57c56 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -288,8 +288,12 @@ sun4v_chip_type:
/* Leave arg2 as-is, prom_mmu_ihandle_cache */
mov -1, %l3
stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default)
- sethi %hi(8 * 1024 * 1024), %l3
- stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: size (8MB)
+ /* 4MB align the kernel image size. */
+ set (_end - KERNBASE), %l3
+ set ((4 * 1024 * 1024) - 1), %l4
+ add %l3, %l4, %l3
+ andn %l3, %l4, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: roundup(ksize, 4MB)
sethi %hi(KERNBASE), %l3
stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE)
stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty
diff --git a/arch/sparc64/kernel/iommu.c b/arch/sparc64/kernel/iommu.c
index fbaab3497bf..b781d3d54fb 100644
--- a/arch/sparc64/kernel/iommu.c
+++ b/arch/sparc64/kernel/iommu.c
@@ -626,7 +626,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
iommu_map_failed:
for_each_sg(sglist, s, nelems, i) {
if (s->dma_length != 0) {
- unsigned long vaddr, npages, entry, i;
+ unsigned long vaddr, npages, entry, j;
iopte_t *base;
vaddr = s->dma_address & IO_PAGE_MASK;
@@ -637,8 +637,8 @@ iommu_map_failed:
>> IO_PAGE_SHIFT;
base = iommu->page_table + entry;
- for (i = 0; i < npages; i++)
- iopte_make_dummy(iommu, base + i);
+ for (j = 0; j < npages; j++)
+ iopte_make_dummy(iommu, base + j);
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
@@ -803,7 +803,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
spin_unlock_irqrestore(&iommu->lock, flags);
}
-const struct dma_ops sun4u_dma_ops = {
+static const struct dma_ops sun4u_dma_ops = {
.alloc_coherent = dma_4u_alloc_coherent,
.free_coherent = dma_4u_free_coherent,
.map_single = dma_4u_map_single,
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 5ec06c8c7fe..eb88bd6e674 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -44,27 +44,10 @@
#include <asm/hypervisor.h>
#include <asm/cacheflush.h>
-/* UPA nodes send interrupt packet to UltraSparc with first data reg
- * value low 5 (7 on Starfire) bits holding the IRQ identifier being
- * delivered. We must translate this into a non-vector IRQ so we can
- * set the softint on this cpu.
- *
- * To make processing these packets efficient and race free we use
- * an array of irq buckets below. The interrupt vector handler in
- * entry.S feeds incoming packets into per-cpu pil-indexed lists.
- *
- * If you make changes to ino_bucket, please update hand coded assembler
- * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
- */
-struct ino_bucket {
-/*0x00*/unsigned long __irq_chain_pa;
-
- /* Virtual interrupt number assigned to this INO. */
-/*0x08*/unsigned int __virt_irq;
-/*0x0c*/unsigned int __pad;
-};
+#include "entry.h"
#define NUM_IVECS (IMAP_INR + 1)
+
struct ino_bucket *ivector_table;
unsigned long ivector_table_pa;
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index e116e38b160..acf8c5250aa 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -731,9 +731,6 @@ asmlinkage int sparc_execve(struct pt_regs *regs)
current_thread_info()->xfsr[0] = 0;
current_thread_info()->fpsaved[0] = 0;
regs->tstate &= ~TSTATE_PEF;
- task_lock(current);
- current->ptrace &= ~PT_DTRACE;
- task_unlock(current);
}
out:
return error;
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
index 9a1ba1fe859..aaae865e793 100644
--- a/arch/sparc64/kernel/ptrace.c
+++ b/arch/sparc64/kernel/ptrace.c
@@ -35,6 +35,9 @@
#include <asm/spitfire.h>
#include <asm/page.h>
#include <asm/cpudata.h>
+#include <asm/cacheflush.h>
+
+#include "entry.h"
/* #define ALLOW_INIT_TRACING */
@@ -67,6 +70,8 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
if (tlb_type == hypervisor)
return;
+ preempt_disable();
+
#ifdef DCACHE_ALIASING_POSSIBLE
/* If bit 13 of the kernel address we used to access the
* user page is the same as the virtual address that page
@@ -105,6 +110,8 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
for (; start < end; start += icache_line_size)
flushi(start);
}
+
+ preempt_enable();
}
enum sparc_regset {
@@ -382,6 +389,7 @@ static const struct user_regset_view user_sparc64_view = {
.regsets = sparc64_regsets, .n = ARRAY_SIZE(sparc64_regsets)
};
+#ifdef CONFIG_COMPAT
static int genregs32_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
@@ -676,14 +684,18 @@ static const struct user_regset_view user_sparc32_view = {
.name = "sparc", .e_machine = EM_SPARC,
.regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets)
};
+#endif /* CONFIG_COMPAT */
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
+#ifdef CONFIG_COMPAT
if (test_tsk_thread_flag(task, TIF_32BIT))
return &user_sparc32_view;
+#endif
return &user_sparc64_view;
}
+#ifdef CONFIG_COMPAT
struct compat_fps {
unsigned int regs[32];
unsigned int fsr;
@@ -798,6 +810,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
return ret;
}
+#endif /* CONFIG_COMPAT */
struct fps {
unsigned int regs[64];
@@ -807,11 +820,14 @@ struct fps {
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
const struct user_regset_view *view = task_user_regset_view(child);
- struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
unsigned long addr2 = task_pt_regs(current)->u_regs[UREG_I4];
- struct fps __user *fps = (struct fps __user *) addr;
+ struct pt_regs __user *pregs;
+ struct fps __user *fps;
int ret;
+ pregs = (struct pt_regs __user *) (unsigned long) addr;
+ fps = (struct fps __user *) (unsigned long) addr;
+
switch (request) {
case PTRACE_PEEKUSR:
ret = (addr != 0) ? -EIO : 0;
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index d036dbe7286..6acb4c51cfe 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -51,6 +51,8 @@
#include <net/ipconfig.h>
#endif
+#include "entry.h"
+
/* Used to synchronize accesses to NatSemi SUPER I/O chip configure
* operations in asm/ns87303.h
*/
@@ -335,9 +337,6 @@ void __init setup_arch(char **cmdline_p)
/* BUFFER is PAGE_SIZE bytes long. */
-extern char *sparc_cpu_type;
-extern char *sparc_fpu_type;
-
extern void smp_info(struct seq_file *);
extern void smp_bogo(struct seq_file *);
extern void mmu_info(struct seq_file *);
diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c
index fb13775b368..94a9d64208e 100644
--- a/arch/sparc64/kernel/signal.c
+++ b/arch/sparc64/kernel/signal.c
@@ -32,6 +32,9 @@
#include <asm/siginfo.h>
#include <asm/visasm.h>
+#include "entry.h"
+#include "systbls.h"
+
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
/* {set, get}context() needed for 64-bit SparcLinux userland. */
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index cc454731d87..59f020d69d4 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -1,6 +1,6 @@
/* smp.c: Sparc64 SMP support.
*
- * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
*/
#include <linux/module.h>
@@ -30,6 +30,7 @@
#include <asm/cpudata.h>
#include <asm/hvtramp.h>
#include <asm/io.h>
+#include <asm/timer.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
@@ -284,14 +285,17 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
{
extern unsigned long sparc64_ttable_tl0;
extern unsigned long kern_locked_tte_data;
- extern int bigkernel;
struct hvtramp_descr *hdesc;
unsigned long trampoline_ra;
struct trap_per_cpu *tb;
u64 tte_vaddr, tte_data;
unsigned long hv_err;
+ int i;
- hdesc = kzalloc(sizeof(*hdesc), GFP_KERNEL);
+ hdesc = kzalloc(sizeof(*hdesc) +
+ (sizeof(struct hvtramp_mapping) *
+ num_kernel_image_mappings - 1),
+ GFP_KERNEL);
if (!hdesc) {
printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
"hvtramp_descr.\n");
@@ -299,7 +303,7 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
}
hdesc->cpu = cpu;
- hdesc->num_mappings = (bigkernel ? 2 : 1);
+ hdesc->num_mappings = num_kernel_image_mappings;
tb = &trap_block[cpu];
tb->hdesc = hdesc;
@@ -312,13 +316,11 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
tte_vaddr = (unsigned long) KERNBASE;
tte_data = kern_locked_tte_data;
- hdesc->maps[0].vaddr = tte_vaddr;
- hdesc->maps[0].tte = tte_data;
- if (bigkernel) {
+ for (i = 0; i < hdesc->num_mappings; i++) {
+ hdesc->maps[i].vaddr = tte_vaddr;
+ hdesc->maps[i].tte = tte_data;
tte_vaddr += 0x400000;
tte_data += 0x400000;
- hdesc->maps[1].vaddr = tte_vaddr;
- hdesc->maps[1].tte = tte_data;
}
trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
diff --git a/arch/sparc64/kernel/stacktrace.c b/arch/sparc64/kernel/stacktrace.c
index 47f92a59be1..84d39e873e8 100644
--- a/arch/sparc64/kernel/stacktrace.c
+++ b/arch/sparc64/kernel/stacktrace.c
@@ -2,13 +2,15 @@
#include <linux/stacktrace.h>
#include <linux/thread_info.h>
#include <asm/ptrace.h>
+#include <asm/stacktrace.h>
void save_stack_trace(struct stack_trace *trace)
{
unsigned long ksp, fp, thread_base;
struct thread_info *tp = task_thread_info(current);
- flushw_all();
+ stack_trace_flush();
+
__asm__ __volatile__(
"mov %%fp, %0"
: "=r" (ksp)
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
index 134d801579f..f952745d0f3 100644
--- a/arch/sparc64/kernel/sys_sparc.c
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -1,5 +1,4 @@
-/* $Id: sys_sparc.c,v 1.57 2002/02/09 19:49:30 davem Exp $
- * linux/arch/sparc64/kernel/sys_sparc.c
+/* linux/arch/sparc64/kernel/sys_sparc.c
*
* This file contains various random system calls that
* have a non-standard calling sequence on the Linux/sparc
@@ -30,6 +29,9 @@
#include <asm/perfctr.h>
#include <asm/unistd.h>
+#include "entry.h"
+#include "systbls.h"
+
/* #define DEBUG_UNIMP_SYSCALL */
asmlinkage unsigned long sys_getpagesize(void)
@@ -445,7 +447,8 @@ asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
goto out;
case SEMTIMEDOP:
err = sys_semtimedop(first, ptr, (unsigned)second,
- (const struct timespec __user *) fifth);
+ (const struct timespec __user *)
+ (unsigned long) fifth);
goto out;
case SEMGET:
err = sys_semget(first, (int)second, (int)third);
@@ -788,7 +791,7 @@ asmlinkage long sys_utrap_install(utrap_entry_t type,
} else {
if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
current_thread_info()->utraps[0] > 1) {
- long *p = current_thread_info()->utraps;
+ unsigned long *p = current_thread_info()->utraps;
current_thread_info()->utraps =
kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
@@ -816,7 +819,8 @@ asmlinkage long sys_utrap_install(utrap_entry_t type,
return 0;
}
-long sparc_memory_ordering(unsigned long model, struct pt_regs *regs)
+asmlinkage long sparc_memory_ordering(unsigned long model,
+ struct pt_regs *regs)
{
if (model >= 3)
return -EINVAL;
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
index deaba2bd053..2455fa49887 100644
--- a/arch/sparc64/kernel/sys_sparc32.c
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -678,9 +678,6 @@ asmlinkage long sparc32_execve(struct pt_regs *regs)
current_thread_info()->xfsr[0] = 0;
current_thread_info()->fpsaved[0] = 0;
regs->tstate &= ~TSTATE_PEF;
- task_lock(current);
- current->ptrace &= ~PT_DTRACE;
- task_unlock(current);
}
out:
return error;
diff --git a/arch/sparc64/kernel/sys_sunos32.c b/arch/sparc64/kernel/sys_sunos32.c
index cfc22d3fe54..e91194fe39d 100644
--- a/arch/sparc64/kernel/sys_sunos32.c
+++ b/arch/sparc64/kernel/sys_sunos32.c
@@ -618,7 +618,7 @@ sunos_nfs_get_server_fd (int fd, struct sockaddr_in *addr)
socket = SOCKET_I(inode);
local.sin_family = AF_INET;
- local.sin_addr.s_addr = INADDR_ANY;
+ local.sin_addr.s_addr = htonl(INADDR_ANY);
/* IPPORT_RESERVED = 1024, can't find the definition in the kernel */
try_port = 1024;
diff --git a/arch/sparc64/kernel/systbls.h b/arch/sparc64/kernel/systbls.h
new file mode 100644
index 00000000000..8a0d20a35d0
--- /dev/null
+++ b/arch/sparc64/kernel/systbls.h
@@ -0,0 +1,53 @@
+#ifndef _SYSTBLS_H
+#define _SYSTBLS_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/utsname.h>
+#include <asm/utrap.h>
+#include <asm/signal.h>
+
+extern asmlinkage unsigned long sys_getpagesize(void);
+extern asmlinkage unsigned long sparc_brk(unsigned long brk);
+extern asmlinkage long sparc_pipe(struct pt_regs *regs);
+extern asmlinkage long sys_ipc(unsigned int call, int first,
+ unsigned long second,
+ unsigned long third,
+ void __user *ptr, long fifth);
+extern asmlinkage long sparc64_newuname(struct new_utsname __user *name);
+extern asmlinkage long sparc64_personality(unsigned long personality);
+extern asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long off);
+extern asmlinkage long sys64_munmap(unsigned long addr, size_t len);
+extern asmlinkage unsigned long sys64_mremap(unsigned long addr,
+ unsigned long old_len,
+ unsigned long new_len,
+ unsigned long flags,
+ unsigned long new_addr);
+extern asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs);
+extern asmlinkage long sys_getdomainname(char __user *name, int len);
+extern asmlinkage long solaris_syscall(struct pt_regs *regs);
+extern asmlinkage long sunos_syscall(struct pt_regs *regs);
+extern asmlinkage long sys_utrap_install(utrap_entry_t type,
+ utrap_handler_t new_p,
+ utrap_handler_t new_d,
+ utrap_handler_t __user *old_p,
+ utrap_handler_t __user *old_d);
+extern asmlinkage long sparc_memory_ordering(unsigned long model,
+ struct pt_regs *regs);
+extern asmlinkage long sys_rt_sigaction(int sig,
+ const struct sigaction __user *act,
+ struct sigaction __user *oact,
+ void __user *restorer,
+ size_t sigsetsize);
+extern asmlinkage long sys_perfctr(int opcode, unsigned long arg0,
+ unsigned long arg1, unsigned long arg2);
+
+extern asmlinkage void sparc64_set_context(struct pt_regs *regs);
+extern asmlinkage void sparc64_get_context(struct pt_regs *regs);
+extern asmlinkage long sys_sigpause(unsigned int set);
+extern asmlinkage long sys_sigsuspend(old_sigset_t set);
+extern void do_rt_sigreturn(struct pt_regs *regs);
+
+#endif /* _SYSTBLS_H */
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index d204f1ab1d4..e5d238970c7 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -1,7 +1,6 @@
-/* $Id: time.c,v 1.42 2002/01/23 14:33:55 davem Exp $
- * time.c: UltraSparc timer and TOD clock support.
+/* time.c: UltraSparc timer and TOD clock support.
*
- * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997, 2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
*
* Based largely on code which is:
@@ -48,6 +47,8 @@
#include <asm/uaccess.h>
#include <asm/irq_regs.h>
+#include "entry.h"
+
DEFINE_SPINLOCK(mostek_lock);
DEFINE_SPINLOCK(rtc_lock);
void __iomem *mstk48t02_regs = NULL;
@@ -508,6 +509,37 @@ static int __init has_low_battery(void)
return (data1 == data2); /* Was the write blocked? */
}
+static void __init mostek_set_system_time(void __iomem *mregs)
+{
+ unsigned int year, mon, day, hour, min, sec;
+ u8 tmp;
+
+ spin_lock_irq(&mostek_lock);
+
+ /* Traditional Mostek chip. */
+ tmp = mostek_read(mregs + MOSTEK_CREG);
+ tmp |= MSTK_CREG_READ;
+ mostek_write(mregs + MOSTEK_CREG, tmp);
+
+ sec = MSTK_REG_SEC(mregs);
+ min = MSTK_REG_MIN(mregs);
+ hour = MSTK_REG_HOUR(mregs);
+ day = MSTK_REG_DOM(mregs);
+ mon = MSTK_REG_MONTH(mregs);
+ year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
+
+ xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
+ xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
+ set_normalized_timespec(&wall_to_monotonic,
+ -xtime.tv_sec, -xtime.tv_nsec);
+
+ tmp = mostek_read(mregs + MOSTEK_CREG);
+ tmp &= ~MSTK_CREG_READ;
+ mostek_write(mregs + MOSTEK_CREG, tmp);
+
+ spin_unlock_irq(&mostek_lock);
+}
+
/* Probe for the real time clock chip. */
static void __init set_system_time(void)
{
@@ -520,7 +552,6 @@ static void __init set_system_time(void)
unsigned long dregs = 0UL;
void __iomem *bregs = 0UL;
#endif
- u8 tmp;
if (!mregs && !dregs && !bregs) {
prom_printf("Something wrong, clock regs not mapped yet.\n");
@@ -528,20 +559,11 @@ static void __init set_system_time(void)
}
if (mregs) {
- spin_lock_irq(&mostek_lock);
-
- /* Traditional Mostek chip. */
- tmp = mostek_read(mregs + MOSTEK_CREG);
- tmp |= MSTK_CREG_READ;
- mostek_write(mregs + MOSTEK_CREG, tmp);
+ mostek_set_system_time(mregs);
+ return;
+ }
- sec = MSTK_REG_SEC(mregs);
- min = MSTK_REG_MIN(mregs);
- hour = MSTK_REG_HOUR(mregs);
- day = MSTK_REG_DOM(mregs);
- mon = MSTK_REG_MONTH(mregs);
- year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
- } else if (bregs) {
+ if (bregs) {
unsigned char val = readb(bregs + 0x0e);
unsigned int century;
@@ -596,14 +618,6 @@ static void __init set_system_time(void)
xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec);
-
- if (mregs) {
- tmp = mostek_read(mregs + MOSTEK_CREG);
- tmp &= ~MSTK_CREG_READ;
- mostek_write(mregs + MOSTEK_CREG, tmp);
-
- spin_unlock_irq(&mostek_lock);
- }
}
/* davem suggests we keep this within the 4M locked kernel image */
@@ -1027,7 +1041,7 @@ void __init time_init(void)
setup_clockevent_multiplier(clock);
sparc64_clockevent.max_delta_ns =
- clockevent_delta2ns(0x7fffffffffffffff, &sparc64_clockevent);
+ clockevent_delta2ns(0x7fffffffffffffffUL, &sparc64_clockevent);
sparc64_clockevent.min_delta_ns =
clockevent_delta2ns(0xF, &sparc64_clockevent);
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
index 4ae2e525d68..56ff5521134 100644
--- a/arch/sparc64/kernel/trampoline.S
+++ b/arch/sparc64/kernel/trampoline.S
@@ -105,7 +105,7 @@ startup_continue:
wr %g2, 0, %tick_cmpr
/* Call OBP by hand to lock KERNBASE into i/d tlbs.
- * We lock 2 consequetive entries if we are 'bigkernel'.
+ * We lock 'num_kernel_image_mappings' consequetive entries.
*/
sethi %hi(prom_entry_lock), %g2
1: ldstub [%g2 + %lo(prom_entry_lock)], %g1
@@ -119,6 +119,29 @@ startup_continue:
add %l2, -(192 + 128), %sp
flushw
+ /* Setup the loop variables:
+ * %l3: VADDR base
+ * %l4: TTE base
+ * %l5: Loop iterator, iterates from 0 to 'num_kernel_image_mappings'
+ * %l6: Number of TTE entries to map
+ * %l7: Highest TTE entry number, we count down
+ */
+ sethi %hi(KERNBASE), %l3
+ sethi %hi(kern_locked_tte_data), %l4
+ ldx [%l4 + %lo(kern_locked_tte_data)], %l4
+ clr %l5
+ sethi %hi(num_kernel_image_mappings), %l6
+ lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
+ add %l6, 1, %l6
+
+ mov 15, %l7
+ BRANCH_IF_ANY_CHEETAH(g1,g5,2f)
+
+ mov 63, %l7
+2:
+
+3:
+ /* Lock into I-MMU */
sethi %hi(call_method), %g2
or %g2, %lo(call_method), %g2
stx %g2, [%sp + 2047 + 128 + 0x00]
@@ -132,63 +155,26 @@ startup_continue:
sethi %hi(prom_mmu_ihandle_cache), %g2
lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
stx %g2, [%sp + 2047 + 128 + 0x20]
- sethi %hi(KERNBASE), %g2
- stx %g2, [%sp + 2047 + 128 + 0x28]
- sethi %hi(kern_locked_tte_data), %g2
- ldx [%g2 + %lo(kern_locked_tte_data)], %g2
- stx %g2, [%sp + 2047 + 128 + 0x30]
-
- mov 15, %g2
- BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
- mov 63, %g2
-1:
- stx %g2, [%sp + 2047 + 128 + 0x38]
- sethi %hi(p1275buf), %g2
- or %g2, %lo(p1275buf), %g2
- ldx [%g2 + 0x08], %o1
- call %o1
- add %sp, (2047 + 128), %o0
+ /* Each TTE maps 4MB, convert index to offset. */
+ sllx %l5, 22, %g1
- sethi %hi(bigkernel), %g2
- lduw [%g2 + %lo(bigkernel)], %g2
- brz,pt %g2, do_dtlb
- nop
+ add %l3, %g1, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR
+ add %l4, %g1, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE
- sethi %hi(call_method), %g2
- or %g2, %lo(call_method), %g2
- stx %g2, [%sp + 2047 + 128 + 0x00]
- mov 5, %g2
- stx %g2, [%sp + 2047 + 128 + 0x08]
- mov 1, %g2
- stx %g2, [%sp + 2047 + 128 + 0x10]
- sethi %hi(itlb_load), %g2
- or %g2, %lo(itlb_load), %g2
- stx %g2, [%sp + 2047 + 128 + 0x18]
- sethi %hi(prom_mmu_ihandle_cache), %g2
- lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
- stx %g2, [%sp + 2047 + 128 + 0x20]
- sethi %hi(KERNBASE + 0x400000), %g2
- stx %g2, [%sp + 2047 + 128 + 0x28]
- sethi %hi(kern_locked_tte_data), %g2
- ldx [%g2 + %lo(kern_locked_tte_data)], %g2
- sethi %hi(0x400000), %g1
- add %g2, %g1, %g2
- stx %g2, [%sp + 2047 + 128 + 0x30]
-
- mov 14, %g2
- BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
-
- mov 62, %g2
-1:
+ /* TTE index is highest minus loop index. */
+ sub %l7, %l5, %g2
stx %g2, [%sp + 2047 + 128 + 0x38]
+
sethi %hi(p1275buf), %g2
or %g2, %lo(p1275buf), %g2
ldx [%g2 + 0x08], %o1
call %o1
add %sp, (2047 + 128), %o0
-do_dtlb:
+ /* Lock into D-MMU */
sethi %hi(call_method), %g2
or %g2, %lo(call_method), %g2
stx %g2, [%sp + 2047 + 128 + 0x00]
@@ -202,65 +188,30 @@ do_dtlb:
sethi %hi(prom_mmu_ihandle_cache), %g2
lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
stx %g2, [%sp + 2047 + 128 + 0x20]
- sethi %hi(KERNBASE), %g2
- stx %g2, [%sp + 2047 + 128 + 0x28]
- sethi %hi(kern_locked_tte_data), %g2
- ldx [%g2 + %lo(kern_locked_tte_data)], %g2
- stx %g2, [%sp + 2047 + 128 + 0x30]
- mov 15, %g2
- BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
+ /* Each TTE maps 4MB, convert index to offset. */
+ sllx %l5, 22, %g1
- mov 63, %g2
-1:
+ add %l3, %g1, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR
+ add %l4, %g1, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE
+ /* TTE index is highest minus loop index. */
+ sub %l7, %l5, %g2
stx %g2, [%sp + 2047 + 128 + 0x38]
+
sethi %hi(p1275buf), %g2
or %g2, %lo(p1275buf), %g2
ldx [%g2 + 0x08], %o1
call %o1
add %sp, (2047 + 128), %o0
- sethi %hi(bigkernel), %g2
- lduw [%g2 + %lo(bigkernel)], %g2
- brz,pt %g2, do_unlock
+ add %l5, 1, %l5
+ cmp %l5, %l6
+ bne,pt %xcc, 3b
nop
- sethi %hi(call_method), %g2
- or %g2, %lo(call_method), %g2
- stx %g2, [%sp + 2047 + 128 + 0x00]
- mov 5, %g2
- stx %g2, [%sp + 2047 + 128 + 0x08]
- mov 1, %g2
- stx %g2, [%sp + 2047 + 128 + 0x10]
- sethi %hi(dtlb_load), %g2
- or %g2, %lo(dtlb_load), %g2
- stx %g2, [%sp + 2047 + 128 + 0x18]
- sethi %hi(prom_mmu_ihandle_cache), %g2
- lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
- stx %g2, [%sp + 2047 + 128 + 0x20]
- sethi %hi(KERNBASE + 0x400000), %g2
- stx %g2, [%sp + 2047 + 128 + 0x28]
- sethi %hi(kern_locked_tte_data), %g2
- ldx [%g2 + %lo(kern_locked_tte_data)], %g2
- sethi %hi(0x400000), %g1
- add %g2, %g1, %g2
- stx %g2, [%sp + 2047 + 128 + 0x30]
-
- mov 14, %g2
- BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
-
- mov 62, %g2
-1:
-
- stx %g2, [%sp + 2047 + 128 + 0x38]
- sethi %hi(p1275buf), %g2
- or %g2, %lo(p1275buf), %g2
- ldx [%g2 + 0x08], %o1
- call %o1
- add %sp, (2047 + 128), %o0
-
-do_unlock:
sethi %hi(prom_entry_lock), %g2
stb %g0, [%g2 + %lo(prom_entry_lock)]
membar #StoreStore | #StoreLoad
@@ -269,47 +220,36 @@ do_unlock:
nop
niagara_lock_tlb:
+ sethi %hi(KERNBASE), %l3
+ sethi %hi(kern_locked_tte_data), %l4
+ ldx [%l4 + %lo(kern_locked_tte_data)], %l4
+ clr %l5
+ sethi %hi(num_kernel_image_mappings), %l6
+ lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
+ add %l6, 1, %l6
+
+1:
mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
- sethi %hi(KERNBASE), %o0
+ sllx %l5, 22, %g2
+ add %l3, %g2, %o0
clr %o1
- sethi %hi(kern_locked_tte_data), %o2
- ldx [%o2 + %lo(kern_locked_tte_data)], %o2
+ add %l4, %g2, %o2
mov HV_MMU_IMMU, %o3
ta HV_FAST_TRAP
mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
- sethi %hi(KERNBASE), %o0
+ sllx %l5, 22, %g2
+ add %l3, %g2, %o0
clr %o1
- sethi %hi(kern_locked_tte_data), %o2
- ldx [%o2 + %lo(kern_locked_tte_data)], %o2
+ add %l4, %g2, %o2
mov HV_MMU_DMMU, %o3
ta HV_FAST_TRAP
- sethi %hi(bigkernel), %g2
- lduw [%g2 + %lo(bigkernel)], %g2
- brz,pt %g2, after_lock_tlb
+ add %l5, 1, %l5
+ cmp %l5, %l6
+ bne,pt %xcc, 1b
nop
- mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
- sethi %hi(KERNBASE + 0x400000), %o0
- clr %o1
- sethi %hi(kern_locked_tte_data), %o2
- ldx [%o2 + %lo(kern_locked_tte_data)], %o2
- sethi %hi(0x400000), %o3
- add %o2, %o3, %o2
- mov HV_MMU_IMMU, %o3
- ta HV_FAST_TRAP
-
- mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
- sethi %hi(KERNBASE + 0x400000), %o0
- clr %o1
- sethi %hi(kern_locked_tte_data), %o2
- ldx [%o2 + %lo(kern_locked_tte_data)], %o2
- sethi %hi(0x400000), %o3
- add %o2, %o3, %o2
- mov HV_MMU_DMMU, %o3
- ta HV_FAST_TRAP
-
after_lock_tlb:
wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
wr %g0, 0, %fprs
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 007f5317c0d..96da847023f 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -42,6 +42,7 @@
#endif
#include <asm/prom.h>
+#include "entry.h"
/* When an irrecoverable trap occurs at tl > 0, the trap entry
* code logs the trap state registers at every level in the trap
@@ -77,11 +78,6 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
}
}
-void do_call_debug(struct pt_regs *regs)
-{
- notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
-}
-
void bad_trap(struct pt_regs *regs, long lvl)
{
char buffer[32];
@@ -550,41 +546,6 @@ static unsigned long ecache_flush_physbase;
static unsigned long ecache_flush_linesize;
static unsigned long ecache_flush_size;
-/* WARNING: The error trap handlers in assembly know the precise
- * layout of the following structure.
- *
- * C-level handlers below use this information to log the error
- * and then determine how to recover (if possible).
- */
-struct cheetah_err_info {
-/*0x00*/u64 afsr;
-/*0x08*/u64 afar;
-
- /* D-cache state */
-/*0x10*/u64 dcache_data[4]; /* The actual data */
-/*0x30*/u64 dcache_index; /* D-cache index */
-/*0x38*/u64 dcache_tag; /* D-cache tag/valid */
-/*0x40*/u64 dcache_utag; /* D-cache microtag */
-/*0x48*/u64 dcache_stag; /* D-cache snooptag */
-
- /* I-cache state */
-/*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
-/*0x90*/u64 icache_index; /* I-cache index */
-/*0x98*/u64 icache_tag; /* I-cache phys tag */
-/*0xa0*/u64 icache_utag; /* I-cache microtag */
-/*0xa8*/u64 icache_stag; /* I-cache snooptag */
-/*0xb0*/u64 icache_upper; /* I-cache upper-tag */
-/*0xb8*/u64 icache_lower; /* I-cache lower-tag */
-
- /* E-cache state */
-/*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
-/*0xe0*/u64 ecache_index; /* E-cache index */
-/*0xe8*/u64 ecache_tag; /* E-cache tag/state */
-
-/*0xf0*/u64 __pad[32 - 30];
-};
-#define CHAFSR_INVALID ((u64)-1L)
-
/* This table is ordered in priority of errors and matches the
* AFAR overwrite policy as well.
*/
@@ -758,10 +719,6 @@ static struct afsr_error_table __jalapeno_error_table[] = {
static struct afsr_error_table *cheetah_error_table;
static unsigned long cheetah_afsr_errors;
-/* This is allocated at boot time based upon the largest hardware
- * cpu ID in the system. We allocate two entries per cpu, one for
- * TL==0 logging and one for TL >= 1 logging.
- */
struct cheetah_err_info *cheetah_error_log;
static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
@@ -2102,7 +2059,7 @@ void do_div0(struct pt_regs *regs)
force_sig_info(SIGFPE, &info, current);
}
-void instruction_dump (unsigned int *pc)
+static void instruction_dump(unsigned int *pc)
{
int i;
@@ -2115,7 +2072,7 @@ void instruction_dump (unsigned int *pc)
printk("\n");
}
-static void user_instruction_dump (unsigned int __user *pc)
+static void user_instruction_dump(unsigned int __user *pc)
{
int i;
unsigned int buf[9];
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index b5c30416fda..f37078d9640 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -46,6 +46,7 @@
#include <asm/prom.h>
#include <asm/sstate.h>
#include <asm/mdesc.h>
+#include <asm/cpudata.h>
#define MAX_PHYS_ADDRESS (1UL << 42UL)
#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
@@ -166,7 +167,7 @@ unsigned long sparc64_kern_pri_context __read_mostly;
unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
unsigned long sparc64_kern_sec_context __read_mostly;
-int bigkernel = 0;
+int num_kernel_image_mappings;
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_t dcpage_flushes = ATOMIC_INIT(0);
@@ -572,7 +573,7 @@ static unsigned long kern_large_tte(unsigned long paddr);
static void __init remap_kernel(void)
{
unsigned long phys_page, tte_vaddr, tte_data;
- int tlb_ent = sparc64_highest_locked_tlbent();
+ int i, tlb_ent = sparc64_highest_locked_tlbent();
tte_vaddr = (unsigned long) KERNBASE;
phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
@@ -582,27 +583,20 @@ static void __init remap_kernel(void)
/* Now lock us into the TLBs via Hypervisor or OBP. */
if (tlb_type == hypervisor) {
- hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
- hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
- if (bigkernel) {
- tte_vaddr += 0x400000;
- tte_data += 0x400000;
+ for (i = 0; i < num_kernel_image_mappings; i++) {
hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
+ tte_vaddr += 0x400000;
+ tte_data += 0x400000;
}
} else {
- prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
- prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
- if (bigkernel) {
- tlb_ent -= 1;
- prom_dtlb_load(tlb_ent,
- tte_data + 0x400000,
- tte_vaddr + 0x400000);
- prom_itlb_load(tlb_ent,
- tte_data + 0x400000,
- tte_vaddr + 0x400000);
+ for (i = 0; i < num_kernel_image_mappings; i++) {
+ prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
+ prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
+ tte_vaddr += 0x400000;
+ tte_data += 0x400000;
}
- sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;
+ sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
}
if (tlb_type == cheetah_plus) {
sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
@@ -1280,10 +1274,6 @@ void __cpuinit sun4v_ktsb_register(void)
/* paging_init() sets up the page tables */
-extern void cheetah_ecache_flush_init(void);
-extern void sun4v_patch_tlb_handlers(void);
-
-extern void cpu_probe(void);
extern void central_probe(void);
static unsigned long last_valid_pfn;
@@ -1352,12 +1342,9 @@ void __init paging_init(void)
shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
real_end = (unsigned long)_end;
- if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
- bigkernel = 1;
- if ((real_end > ((unsigned long)KERNBASE + 0x800000))) {
- prom_printf("paging_init: Kernel > 8MB, too large.\n");
- prom_halt();
- }
+ num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
+ printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
+ num_kernel_image_mappings);
/* Set kernel pgd to upper alias so physical page computations
* work.
diff --git a/arch/sparc64/mm/tlb.c b/arch/sparc64/mm/tlb.c
index 3f10fc921b0..a0f000b293d 100644
--- a/arch/sparc64/mm/tlb.c
+++ b/arch/sparc64/mm/tlb.c
@@ -23,10 +23,11 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) = { 0, };
void flush_tlb_pending(void)
{
- struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
+ struct mmu_gather *mp;
preempt_disable();
+ mp = &__get_cpu_var(mmu_gathers);
if (mp->tlb_nr) {
flush_tsb_user(mp);