aboutsummaryrefslogtreecommitdiff
path: root/arch/parisc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/hpux/fs.c2
-rw-r--r--arch/parisc/kernel/binfmt_elf32.c24
-rw-r--r--arch/parisc/kernel/cache.c48
-rw-r--r--arch/parisc/kernel/entry.S21
-rw-r--r--arch/parisc/kernel/hardware.c3
-rw-r--r--arch/parisc/kernel/irq.c151
-rw-r--r--arch/parisc/kernel/processor.c5
-rw-r--r--arch/parisc/kernel/signal.c5
-rw-r--r--arch/parisc/kernel/smp.c7
-rw-r--r--arch/parisc/kernel/sys_parisc.c45
-rw-r--r--arch/parisc/kernel/syscall_table.S4
-rw-r--r--arch/parisc/kernel/time.c208
-rw-r--r--arch/parisc/kernel/traps.c10
-rw-r--r--arch/parisc/mm/init.c23
-rw-r--r--arch/parisc/mm/ioremap.c2
16 files changed, 360 insertions, 200 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 6dd0ea8f88e..d2101237442 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -127,7 +127,7 @@ config PA11
config PREFETCH
def_bool y
- depends on PA8X00
+ depends on PA8X00 || PA7200
config 64BIT
bool "64-bit kernel"
diff --git a/arch/parisc/hpux/fs.c b/arch/parisc/hpux/fs.c
index 6e79dbf3f6b..2d58b92b57e 100644
--- a/arch/parisc/hpux/fs.c
+++ b/arch/parisc/hpux/fs.c
@@ -96,7 +96,7 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
put_user(namlen, &dirent->d_namlen);
copy_to_user(dirent->d_name, name, namlen);
put_user(0, dirent->d_name + namlen);
- ((char *) dirent) += reclen;
+ dirent = (void __user *)dirent + reclen;
buf->current_dir = dirent;
buf->count -= reclen;
return 0;
diff --git a/arch/parisc/kernel/binfmt_elf32.c b/arch/parisc/kernel/binfmt_elf32.c
index d1833f164bb..1e64e7b8811 100644
--- a/arch/parisc/kernel/binfmt_elf32.c
+++ b/arch/parisc/kernel/binfmt_elf32.c
@@ -87,7 +87,7 @@ struct elf_prpsinfo32
*/
#define SET_PERSONALITY(ex, ibcs2) \
- current->personality = PER_LINUX32; \
+ set_thread_flag(TIF_32BIT); \
current->thread.map_base = DEFAULT_MAP_BASE32; \
current->thread.task_size = DEFAULT_TASK_SIZE32 \
@@ -102,25 +102,3 @@ cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
}
#include "../../../fs/binfmt_elf.c"
-
-/* Set up a separate execution domain for ELF32 binaries running
- * on an ELF64 kernel */
-
-static struct exec_domain parisc32_exec_domain = {
- .name = "Linux/ELF32",
- .pers_low = PER_LINUX32,
- .pers_high = PER_LINUX32,
-};
-
-static int __init parisc32_exec_init(void)
-{
- /* steal the identity signal mappings from the default domain */
- parisc32_exec_domain.signal_map = default_exec_domain.signal_map;
- parisc32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
-
- register_exec_domain(&parisc32_exec_domain);
-
- return 0;
-}
-
-__initcall(parisc32_exec_init);
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index bc7c4a4e26a..0be51e92a2f 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -35,15 +35,12 @@ int icache_stride __read_mostly;
EXPORT_SYMBOL(dcache_stride);
-#if defined(CONFIG_SMP)
/* On some machines (e.g. ones with the Merced bus), there can be
* only a single PxTLB broadcast at a time; this must be guaranteed
* by software. We put a spinlock around all TLB flushes to
* ensure this.
*/
DEFINE_SPINLOCK(pa_tlb_lock);
-EXPORT_SYMBOL(pa_tlb_lock);
-#endif
struct pdc_cache_info cache_info __read_mostly;
#ifndef CONFIG_PA20
@@ -91,7 +88,8 @@ update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
flush_kernel_dcache_page(page);
clear_bit(PG_dcache_dirty, &page->flags);
- }
+ } else if (parisc_requires_coherency())
+ flush_kernel_dcache_page(page);
}
void
@@ -370,3 +368,45 @@ void parisc_setup_cache_timing(void)
printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
}
+
+extern void purge_kernel_dcache_page(unsigned long);
+extern void clear_user_page_asm(void *page, unsigned long vaddr);
+
+void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
+{
+ purge_kernel_dcache_page((unsigned long)page);
+ purge_tlb_start();
+ pdtlb_kernel(page);
+ purge_tlb_end();
+ clear_user_page_asm(page, vaddr);
+}
+EXPORT_SYMBOL(clear_user_page);
+
+void flush_kernel_dcache_page_addr(void *addr)
+{
+ flush_kernel_dcache_page_asm(addr);
+ purge_tlb_start();
+ pdtlb_kernel(addr);
+ purge_tlb_end();
+}
+EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
+
+void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+ struct page *pg)
+{
+ /* no coherency needed (all in kmap/kunmap) */
+ copy_user_page_asm(vto, vfrom);
+ if (!parisc_requires_coherency())
+ flush_kernel_dcache_page_asm(vto);
+}
+EXPORT_SYMBOL(copy_user_page);
+
+#ifdef CONFIG_PA8X00
+
+void kunmap_parisc(void *addr)
+{
+ if (parisc_requires_coherency())
+ flush_kernel_dcache_page_addr(addr);
+}
+EXPORT_SYMBOL(kunmap_parisc);
+#endif
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 192357a3b9f..340b5e8d67b 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -30,6 +30,7 @@
#include <asm/psw.h>
+#include <asm/cache.h> /* for L1_CACHE_SHIFT */
#include <asm/assembly.h> /* for LDREG/STREG defines */
#include <asm/pgtable.h>
#include <asm/signal.h>
@@ -478,11 +479,7 @@
bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
copy \pmd,%r9
-#ifdef CONFIG_64BIT
- shld %r9,PxD_VALUE_SHIFT,\pmd
-#else
- shlw %r9,PxD_VALUE_SHIFT,\pmd
-#endif
+ SHLREG %r9,PxD_VALUE_SHIFT,\pmd
EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
@@ -970,11 +967,7 @@ intr_return:
/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
** irq_stat[] is defined using ____cacheline_aligned.
*/
-#ifdef CONFIG_64BIT
- shld %r1, 6, %r20
-#else
- shlw %r1, 5, %r20
-#endif
+ SHLREG %r1,L1_CACHE_SHIFT,%r20
add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
#endif /* CONFIG_SMP */
@@ -1076,7 +1069,7 @@ intr_do_preempt:
BL preempt_schedule_irq, %r2
nop
- b intr_restore /* ssm PSW_SM_I done by intr_restore */
+ b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
#endif /* CONFIG_PREEMPT */
.import do_signal,code
@@ -2115,11 +2108,7 @@ syscall_check_bh:
ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
-#ifdef CONFIG_64BIT
- shld %r26, 6, %r20
-#else
- shlw %r26, 5, %r20
-#endif
+ SHLREG %r26,L1_CACHE_SHIFT,%r20
add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
#endif /* CONFIG_SMP */
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
index 3058bffd8a2..18ba4cb9159 100644
--- a/arch/parisc/kernel/hardware.c
+++ b/arch/parisc/kernel/hardware.c
@@ -231,6 +231,7 @@ static struct hp_hardware hp_hardware_list[] __initdata = {
{HPHW_NPROC,0x5E6,0x4,0x91,"Keystone/Matterhorn W2 650"},
{HPHW_NPROC,0x5E7,0x4,0x91,"Caribe W2 800"},
{HPHW_NPROC,0x5E8,0x4,0x91,"Pikes Peak W2"},
+ {HPHW_NPROC,0x5EB,0x4,0x91,"Perf/Leone 875 W2+"},
{HPHW_NPROC,0x5FF,0x4,0x91,"Hitachi W"},
{HPHW_NPROC,0x600,0x4,0x81,"Gecko (712/60)"},
{HPHW_NPROC,0x601,0x4,0x81,"Gecko 80 (712/80)"},
@@ -584,8 +585,10 @@ static struct hp_hardware hp_hardware_list[] __initdata = {
{HPHW_CONSOLE, 0x01A, 0x0001F, 0x00, "Jason/Anole 64 Null Console"},
{HPHW_CONSOLE, 0x01B, 0x0001F, 0x00, "Jason/Anole 100 Null Console"},
{HPHW_FABRIC, 0x004, 0x000AA, 0x80, "Halfdome DNA Central Agent"},
+ {HPHW_FABRIC, 0x005, 0x000AA, 0x80, "Keystone DNA Central Agent"},
{HPHW_FABRIC, 0x007, 0x000AA, 0x80, "Caribe DNA Central Agent"},
{HPHW_FABRIC, 0x004, 0x000AB, 0x00, "Halfdome TOGO Fabric Crossbar"},
+ {HPHW_FABRIC, 0x005, 0x000AB, 0x00, "Keystone TOGO Fabric Crossbar"},
{HPHW_FABRIC, 0x004, 0x000AC, 0x00, "Halfdome Sakura Fabric Router"},
{HPHW_FIO, 0x025, 0x0002E, 0x80, "Armyknife Optional X.25"},
{HPHW_FIO, 0x004, 0x0004F, 0x0, "8-Port X.25 EISA-ACC (AMSO)"},
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 5b8803cc3d6..9bdd0197ceb 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -45,6 +45,17 @@ extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *);
*/
static volatile unsigned long cpu_eiem = 0;
+/*
+** ack bitmap ... habitually set to 1, but reset to zero
+** between ->ack() and ->end() of the interrupt to prevent
+** re-interruption of a processing interrupt.
+*/
+static volatile unsigned long global_ack_eiem = ~0UL;
+/*
+** Local bitmap, same as above but for per-cpu interrupts
+*/
+static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
+
static void cpu_disable_irq(unsigned int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
@@ -62,13 +73,6 @@ static void cpu_enable_irq(unsigned int irq)
cpu_eiem |= eirr_bit;
- /* FIXME: while our interrupts aren't nested, we cannot reset
- * the eiem mask if we're already in an interrupt. Once we
- * implement nested interrupts, this can go away
- */
- if (!in_interrupt())
- set_eiem(cpu_eiem);
-
/* This is just a simple NOP IPI. But what it does is cause
* all the other CPUs to do a set_eiem(cpu_eiem) at the end
* of the interrupt handler */
@@ -84,13 +88,45 @@ static unsigned int cpu_startup_irq(unsigned int irq)
void no_ack_irq(unsigned int irq) { }
void no_end_irq(unsigned int irq) { }
+void cpu_ack_irq(unsigned int irq)
+{
+ unsigned long mask = EIEM_MASK(irq);
+ int cpu = smp_processor_id();
+
+ /* Clear in EIEM so we can no longer process */
+ if (CHECK_IRQ_PER_CPU(irq_desc[irq].status))
+ per_cpu(local_ack_eiem, cpu) &= ~mask;
+ else
+ global_ack_eiem &= ~mask;
+
+ /* disable the interrupt */
+ set_eiem(cpu_eiem & global_ack_eiem & per_cpu(local_ack_eiem, cpu));
+ /* and now ack it */
+ mtctl(mask, 23);
+}
+
+void cpu_end_irq(unsigned int irq)
+{
+ unsigned long mask = EIEM_MASK(irq);
+ int cpu = smp_processor_id();
+
+ /* set it in the eiems---it's no longer in process */
+ if (CHECK_IRQ_PER_CPU(irq_desc[irq].status))
+ per_cpu(local_ack_eiem, cpu) |= mask;
+ else
+ global_ack_eiem |= mask;
+
+ /* enable the interrupt */
+ set_eiem(cpu_eiem & global_ack_eiem & per_cpu(local_ack_eiem, cpu));
+}
+
#ifdef CONFIG_SMP
int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
{
int cpu_dest;
/* timer and ipi have to always be received on all CPUs */
- if (irq == TIMER_IRQ || irq == IPI_IRQ) {
+ if (CHECK_IRQ_PER_CPU(irq)) {
/* Bad linux design decision. The mask has already
* been set; we must reset it */
irq_desc[irq].affinity = CPU_MASK_ALL;
@@ -119,8 +155,8 @@ static struct hw_interrupt_type cpu_interrupt_type = {
.shutdown = cpu_disable_irq,
.enable = cpu_enable_irq,
.disable = cpu_disable_irq,
- .ack = no_ack_irq,
- .end = no_end_irq,
+ .ack = cpu_ack_irq,
+ .end = cpu_end_irq,
#ifdef CONFIG_SMP
.set_affinity = cpu_set_affinity_irq,
#endif
@@ -209,7 +245,7 @@ int show_interrupts(struct seq_file *p, void *v)
** Then use that to get the Transaction address and data.
*/
-int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *type, void *data)
+int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
{
if (irq_desc[irq].action)
return -EBUSY;
@@ -298,82 +334,69 @@ unsigned int txn_alloc_data(unsigned int virt_irq)
return virt_irq - CPU_IRQ_BASE;
}
+static inline int eirr_to_irq(unsigned long eirr)
+{
+#ifdef CONFIG_64BIT
+ int bit = fls64(eirr);
+#else
+ int bit = fls(eirr);
+#endif
+ return (BITS_PER_LONG - bit) + TIMER_IRQ;
+}
+
/* ONLY called from entry.S:intr_extint() */
void do_cpu_irq_mask(struct pt_regs *regs)
{
unsigned long eirr_val;
-
- irq_enter();
-
- /*
- * Don't allow TIMER or IPI nested interrupts.
- * Allowing any single interrupt to nest can lead to that CPU
- * handling interrupts with all enabled interrupts unmasked.
- */
- set_eiem(0UL);
-
- /* 1) only process IRQs that are enabled/unmasked (cpu_eiem)
- * 2) We loop here on EIRR contents in order to avoid
- * nested interrupts or having to take another interrupt
- * when we could have just handled it right away.
- */
- for (;;) {
- unsigned long bit = (1UL << (BITS_PER_LONG - 1));
- unsigned int irq;
- eirr_val = mfctl(23) & cpu_eiem;
- if (!eirr_val)
- break;
-
- mtctl(eirr_val, 23); /* reset bits we are going to process */
-
- /* Work our way from MSb to LSb...same order we alloc EIRs */
- for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) {
+ int irq, cpu = smp_processor_id();
#ifdef CONFIG_SMP
- cpumask_t dest = irq_desc[irq].affinity;
+ cpumask_t dest;
#endif
- if (!(bit & eirr_val))
- continue;
- /* clear bit in mask - can exit loop sooner */
- eirr_val &= ~bit;
+ local_irq_disable();
+ irq_enter();
-#ifdef CONFIG_SMP
- /* FIXME: because generic set affinity mucks
- * with the affinity before sending it to us
- * we can get the situation where the affinity is
- * wrong for our CPU type interrupts */
- if (irq != TIMER_IRQ && irq != IPI_IRQ &&
- !cpu_isset(smp_processor_id(), dest)) {
- int cpu = first_cpu(dest);
-
- printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
- irq, smp_processor_id(), cpu);
- gsc_writel(irq + CPU_IRQ_BASE,
- cpu_data[cpu].hpa);
- continue;
- }
-#endif
+ eirr_val = mfctl(23) & cpu_eiem & global_ack_eiem &
+ per_cpu(local_ack_eiem, cpu);
+ if (!eirr_val)
+ goto set_out;
+ irq = eirr_to_irq(eirr_val);
- __do_IRQ(irq, regs);
- }
+#ifdef CONFIG_SMP
+ dest = irq_desc[irq].affinity;
+ if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
+ !cpu_isset(smp_processor_id(), dest)) {
+ int cpu = first_cpu(dest);
+
+ printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
+ irq, smp_processor_id(), cpu);
+ gsc_writel(irq + CPU_IRQ_BASE,
+ cpu_data[cpu].hpa);
+ goto set_out;
}
+#endif
+ __do_IRQ(irq, regs);
- set_eiem(cpu_eiem); /* restore original mask */
+ out:
irq_exit();
-}
+ return;
+ set_out:
+ set_eiem(cpu_eiem & global_ack_eiem & per_cpu(local_ack_eiem, cpu));
+ goto out;
+}
static struct irqaction timer_action = {
.handler = timer_interrupt,
.name = "timer",
- .flags = IRQF_DISABLED,
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU,
};
#ifdef CONFIG_SMP
static struct irqaction ipi_action = {
.handler = ipi_interrupt,
.name = "IPI",
- .flags = IRQF_DISABLED,
+ .flags = IRQF_DISABLED | IRQF_PERCPU,
};
#endif
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index 99d7fca9310..fb81e5687e7 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -143,8 +143,9 @@ static int __init processor_probe(struct parisc_device *dev)
p = &cpu_data[cpuid];
boot_cpu_data.cpu_count++;
- /* initialize counters */
- memset(p, 0, sizeof(struct cpuinfo_parisc));
+ /* initialize counters - CPU 0 gets it_value set in time_init() */
+ if (cpuid)
+ memset(p, 0, sizeof(struct cpuinfo_parisc));
p->loops_per_jiffy = loops_per_jiffy;
p->dev = dev; /* Save IODC data in case we need it */
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index bb83880c5ee..ee6653edeb7 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -26,7 +26,6 @@
#include <linux/stddef.h>
#include <linux/compat.h>
#include <linux/elf.h>
-#include <linux/personality.h>
#include <asm/ucontext.h>
#include <asm/rt_sigframe.h>
#include <asm/uaccess.h>
@@ -433,13 +432,13 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
if (in_syscall) {
regs->gr[31] = haddr;
#ifdef __LP64__
- if (personality(current->personality) == PER_LINUX)
+ if (!test_thread_flag(TIF_32BIT))
sigframe_size |= 1;
#endif
} else {
unsigned long psw = USER_PSW;
#ifdef __LP64__
- if (personality(current->personality) == PER_LINUX)
+ if (!test_thread_flag(TIF_32BIT))
psw |= PSW_W;
#endif
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 98e40959a56..faad338f310 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -262,6 +262,9 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
this_cpu, which);
return IRQ_NONE;
} /* Switch */
+ /* let in any pending interrupts */
+ local_irq_enable();
+ local_irq_disable();
} /* while (ops) */
}
return IRQ_HANDLED;
@@ -430,8 +433,9 @@ smp_do_timer(struct pt_regs *regs)
static void __init
smp_cpu_init(int cpunum)
{
- extern int init_per_cpu(int); /* arch/parisc/kernel/setup.c */
+ extern int init_per_cpu(int); /* arch/parisc/kernel/processor.c */
extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */
+ extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */
/* Set modes and Enable floating point coprocessor */
(void) init_per_cpu(cpunum);
@@ -457,6 +461,7 @@ smp_cpu_init(int cpunum)
enter_lazy_tlb(&init_mm, current);
init_IRQ(); /* make sure no IRQ's are enabled or pending */
+ start_cpu_itimer();
}
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 8b5df98e2b3..1db5588ceac 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -31,6 +31,8 @@
#include <linux/shm.h>
#include <linux/smp_lock.h>
#include <linux/syscalls.h>
+#include <linux/utsname.h>
+#include <linux/personality.h>
int sys_pipe(int __user *fildes)
{
@@ -248,3 +250,46 @@ asmlinkage int sys_free_hugepages(unsigned long addr)
{
return -EINVAL;
}
+
+long parisc_personality(unsigned long personality)
+{
+ long err;
+
+ if (personality(current->personality) == PER_LINUX32
+ && personality == PER_LINUX)
+ personality = PER_LINUX32;
+
+ err = sys_personality(personality);
+ if (err == PER_LINUX32)
+ err = PER_LINUX;
+
+ return err;
+}
+
+static inline int override_machine(char __user *mach) {
+#ifdef CONFIG_COMPAT
+ if (personality(current->personality) == PER_LINUX32) {
+ if (__put_user(0, mach + 6) ||
+ __put_user(0, mach + 7))
+ return -EFAULT;
+ }
+
+ return 0;
+#else /*!CONFIG_COMPAT*/
+ return 0;
+#endif /*CONFIG_COMPAT*/
+}
+
+long parisc_newuname(struct new_utsname __user *utsname)
+{
+ int err = 0;
+
+ down_read(&uts_sem);
+ if (copy_to_user(utsname, &system_utsname, sizeof(*utsname)))
+ err = -EFAULT;
+ up_read(&uts_sem);
+
+ err = override_machine(utsname->machine);
+
+ return (long)err;
+}
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index e27b432f90a..701d66a596e 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -132,7 +132,7 @@
ENTRY_SAME(socketpair)
ENTRY_SAME(setpgid)
ENTRY_SAME(send)
- ENTRY_SAME(newuname)
+ ENTRY_OURS(newuname)
ENTRY_SAME(umask) /* 60 */
ENTRY_SAME(chroot)
ENTRY_SAME(ustat)
@@ -221,7 +221,7 @@
ENTRY_SAME(fchdir)
ENTRY_SAME(bdflush)
ENTRY_SAME(sysfs) /* 135 */
- ENTRY_SAME(personality)
+ ENTRY_OURS(personality)
ENTRY_SAME(ni_syscall) /* for afs_syscall */
ENTRY_SAME(setfsuid)
ENTRY_SAME(setfsgid)
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index ab641d67f55..b3496b592a2 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -32,8 +32,7 @@
#include <linux/timex.h>
-static long clocktick __read_mostly; /* timer cycles per tick */
-static long halftick __read_mostly;
+static unsigned long clocktick __read_mostly; /* timer cycles per tick */
#ifdef CONFIG_SMP
extern void smp_do_timer(struct pt_regs *regs);
@@ -41,46 +40,106 @@ extern void smp_do_timer(struct pt_regs *regs);
irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
- long now;
- long next_tick;
- int nticks;
- int cpu = smp_processor_id();
+ unsigned long now;
+ unsigned long next_tick;
+ unsigned long cycles_elapsed;
+ unsigned long cycles_remainder;
+ unsigned int cpu = smp_processor_id();
+
+ /* gcc can optimize for "read-only" case with a local clocktick */
+ unsigned long cpt = clocktick;
profile_tick(CPU_PROFILING, regs);
- now = mfctl(16);
- /* initialize next_tick to time at last clocktick */
+ /* Initialize next_tick to the expected tick time. */
next_tick = cpu_data[cpu].it_value;
- /* since time passes between the interrupt and the mfctl()
- * above, it is never true that last_tick + clocktick == now. If we
- * never miss a clocktick, we could set next_tick = last_tick + clocktick
- * but maybe we'll miss ticks, hence the loop.
- *
- * Variables are *signed*.
+ /* Get current interval timer.
+ * CR16 reads as 64 bits in CPU wide mode.
+ * CR16 reads as 32 bits in CPU narrow mode.
*/
+ now = mfctl(16);
+
+ cycles_elapsed = now - next_tick;
- nticks = 0;
- while((next_tick - now) < halftick) {
- next_tick += clocktick;
- nticks++;
+ if ((cycles_elapsed >> 5) < cpt) {
+ /* use "cheap" math (add/subtract) instead
+ * of the more expensive div/mul method
+ */
+ cycles_remainder = cycles_elapsed;
+ while (cycles_remainder > cpt) {
+ cycles_remainder -= cpt;
+ }
+ } else {
+ cycles_remainder = cycles_elapsed % cpt;
}
- mtctl(next_tick, 16);
+
+ /* Can we differentiate between "early CR16" (aka Scenario 1) and
+ * "long delay" (aka Scenario 3)? I don't think so.
+ *
+ * We expected timer_interrupt to be delivered at least a few hundred
+ * cycles after the IT fires. But it's arbitrary how much time passes
+ * before we call it "late". I've picked one second.
+ */
+/* aproximate HZ with shifts. Intended math is "(elapsed/clocktick) > HZ" */
+#if HZ == 1000
+ if (cycles_elapsed > (cpt << 10) )
+#elif HZ == 250
+ if (cycles_elapsed > (cpt << 8) )
+#elif HZ == 100
+ if (cycles_elapsed > (cpt << 7) )
+#else
+#warn WTF is HZ set to anyway?
+ if (cycles_elapsed > (HZ * cpt) )
+#endif
+ {
+ /* Scenario 3: very long delay? bad in any case */
+ printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!"
+ " cycles %lX rem %lX "
+ " next/now %lX/%lX\n",
+ cpu,
+ cycles_elapsed, cycles_remainder,
+ next_tick, now );
+ }
+
+ /* convert from "division remainder" to "remainder of clock tick" */
+ cycles_remainder = cpt - cycles_remainder;
+
+ /* Determine when (in CR16 cycles) next IT interrupt will fire.
+ * We want IT to fire modulo clocktick even if we miss/skip some.
+ * But those interrupts don't in fact get delivered that regularly.
+ */
+ next_tick = now + cycles_remainder;
+
cpu_data[cpu].it_value = next_tick;
- while (nticks--) {
+ /* Skip one clocktick on purpose if we are likely to miss next_tick.
+ * We want to avoid the new next_tick being less than CR16.
+ * If that happened, itimer wouldn't fire until CR16 wrapped.
+ * We'll catch the tick we missed on the tick after that.
+ */
+ if (!(cycles_remainder >> 13))
+ next_tick += cpt;
+
+ /* Program the IT when to deliver the next interrupt. */
+ /* Only bottom 32-bits of next_tick are written to cr16. */
+ mtctl(next_tick, 16);
+
+
+ /* Done mucking with unreliable delivery of interrupts.
+ * Go do system house keeping.
+ */
#ifdef CONFIG_SMP
- smp_do_timer(regs);
+ smp_do_timer(regs);
#else
- update_process_times(user_mode(regs));
+ update_process_times(user_mode(regs));
#endif
- if (cpu == 0) {
- write_seqlock(&xtime_lock);
- do_timer(1);
- write_sequnlock(&xtime_lock);
- }
+ if (cpu == 0) {
+ write_seqlock(&xtime_lock);
+ do_timer(regs);
+ write_sequnlock(&xtime_lock);
}
-
+
/* check soft power switch status */
if (cpu == 0 && !atomic_read(&power_tasklet.count))
tasklet_schedule(&power_tasklet);
@@ -106,14 +165,12 @@ unsigned long profile_pc(struct pt_regs *regs)
EXPORT_SYMBOL(profile_pc);
-/*** converted from ia64 ***/
/*
* Return the number of micro-seconds that elapsed since the last
* update to wall time (aka xtime). The xtime_lock
* must be at least read-locked when calling this routine.
*/
-static inline unsigned long
-gettimeoffset (void)
+static inline unsigned long gettimeoffset (void)
{
#ifndef CONFIG_SMP
/*
@@ -121,21 +178,44 @@ gettimeoffset (void)
* Once parisc-linux learns the cr16 difference between processors,
* this could be made to work.
*/
- long last_tick;
- long elapsed_cycles;
-
- /* it_value is the intended time of the next tick */
- last_tick = cpu_data[smp_processor_id()].it_value;
-
- /* Subtract one tick and account for possible difference between
- * when we expected the tick and when it actually arrived.
- * (aka wall vs real)
- */
- last_tick -= clocktick * (jiffies - wall_jiffies + 1);
- elapsed_cycles = mfctl(16) - last_tick;
+ unsigned long now;
+ unsigned long prev_tick;
+ unsigned long next_tick;
+ unsigned long elapsed_cycles;
+ unsigned long usec;
+ unsigned long cpuid = smp_processor_id();
+ unsigned long cpt = clocktick;
+
+ next_tick = cpu_data[cpuid].it_value;
+ now = mfctl(16); /* Read the hardware interval timer. */
+
+ prev_tick = next_tick - cpt;
+
+ /* Assume Scenario 1: "now" is later than prev_tick. */
+ elapsed_cycles = now - prev_tick;
+
+/* aproximate HZ with shifts. Intended math is "(elapsed/clocktick) > HZ" */
+#if HZ == 1000
+ if (elapsed_cycles > (cpt << 10) )
+#elif HZ == 250
+ if (elapsed_cycles > (cpt << 8) )
+#elif HZ == 100
+ if (elapsed_cycles > (cpt << 7) )
+#else
+#warn WTF is HZ set to anyway?
+ if (elapsed_cycles > (HZ * cpt) )
+#endif
+ {
+ /* Scenario 3: clock ticks are missing. */
+ printk (KERN_CRIT "gettimeoffset(CPU %ld): missing %ld ticks!"
+ " cycles %lX prev/now/next %lX/%lX/%lX clock %lX\n",
+ cpuid, elapsed_cycles / cpt,
+ elapsed_cycles, prev_tick, now, next_tick, cpt);
+ }
- /* the precision of this math could be improved */
- return elapsed_cycles / (PAGE0->mem_10msec / 10000);
+ /* FIXME: Can we improve the precision? Not with PAGE0. */
+ usec = (elapsed_cycles * 10000) / PAGE0->mem_10msec;
+ return usec;
#else
return 0;
#endif
@@ -146,6 +226,7 @@ do_gettimeofday (struct timeval *tv)
{
unsigned long flags, seq, usec, sec;
+ /* Hold xtime_lock and adjust timeval. */
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
usec = gettimeoffset();
@@ -153,25 +234,13 @@ do_gettimeofday (struct timeval *tv)
usec += (xtime.tv_nsec / 1000);
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
- if (unlikely(usec > LONG_MAX)) {
- /* This can happen if the gettimeoffset adjustment is
- * negative and xtime.tv_nsec is smaller than the
- * adjustment */
- printk(KERN_ERR "do_gettimeofday() spurious xtime.tv_nsec of %ld\n", usec);
- usec += USEC_PER_SEC;
- --sec;
- /* This should never happen, it means the negative
- * time adjustment was more than a second, so there's
- * something seriously wrong */
- BUG_ON(usec > LONG_MAX);
- }
-
-
+ /* Move adjusted usec's into sec's. */
while (usec >= USEC_PER_SEC) {
usec -= USEC_PER_SEC;
++sec;
}
+ /* Return adjusted result. */
tv->tv_sec = sec;
tv->tv_usec = usec;
}
@@ -223,22 +292,23 @@ unsigned long long sched_clock(void)
}
+void __init start_cpu_itimer(void)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long next_tick = mfctl(16) + clocktick;
+
+ mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
+
+ cpu_data[cpu].it_value = next_tick;
+}
+
void __init time_init(void)
{
- unsigned long next_tick;
static struct pdc_tod tod_data;
clocktick = (100 * PAGE0->mem_10msec) / HZ;
- halftick = clocktick / 2;
- /* Setup clock interrupt timing */
-
- next_tick = mfctl(16);
- next_tick += clocktick;
- cpu_data[smp_processor_id()].it_value = next_tick;
-
- /* kick off Itimer (CR16) */
- mtctl(next_tick, 16);
+ start_cpu_itimer(); /* get CPU 0 started */
if(pdc_tod_read(&tod_data) == 0) {
write_seqlock_irq(&xtime_lock);
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 77b28cb8aca..65cd6ca32fe 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -16,6 +16,7 @@
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/timer.h>
+#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/smp.h>
@@ -245,6 +246,15 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
current->comm, current->pid, str, err);
show_regs(regs);
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+
+ if (panic_on_oops) {
+ printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
+ ssleep(5);
+ panic("Fatal exception");
+ }
+
/* Wot's wrong wif bein' racy? */
if (current->thread.flags & PARISC_KERNEL_DEATH) {
printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__);
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 25ad28d63e8..0667f2b4f97 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -31,10 +31,7 @@
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-extern char _text; /* start of kernel code, defined by linker */
extern int data_start;
-extern char _end; /* end of BSS, defined by linker */
-extern char __init_begin, __init_end;
#ifdef CONFIG_DISCONTIGMEM
struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
@@ -319,8 +316,8 @@ static void __init setup_bootmem(void)
reserve_bootmem_node(NODE_DATA(0), 0UL,
(unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE));
- reserve_bootmem_node(NODE_DATA(0),__pa((unsigned long)&_text),
- (unsigned long)(&_end - &_text));
+ reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text),
+ (unsigned long)(_end - _text));
reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT));
@@ -355,8 +352,8 @@ static void __init setup_bootmem(void)
#endif
data_resource.start = virt_to_phys(&data_start);
- data_resource.end = virt_to_phys(&_end)-1;
- code_resource.start = virt_to_phys(&_text);
+ data_resource.end = virt_to_phys(_end) - 1;
+ code_resource.start = virt_to_phys(_text);
code_resource.end = virt_to_phys(&data_start)-1;
/* We don't know which region the kernel will be in, so try
@@ -385,12 +382,12 @@ void free_initmem(void)
*/
local_irq_disable();
- memset(&__init_begin, 0x00,
- (unsigned long)&__init_end - (unsigned long)&__init_begin);
+ memset(__init_begin, 0x00,
+ (unsigned long)__init_end - (unsigned long)__init_begin);
flush_data_cache();
asm volatile("sync" : : );
- flush_icache_range((unsigned long)&__init_begin, (unsigned long)&__init_end);
+ flush_icache_range((unsigned long)__init_begin, (unsigned long)__init_end);
asm volatile("sync" : : );
local_irq_enable();
@@ -398,8 +395,8 @@ void free_initmem(void)
/* align __init_begin and __init_end to page size,
ignoring linker script where we might have tried to save RAM */
- init_begin = PAGE_ALIGN((unsigned long)(&__init_begin));
- init_end = PAGE_ALIGN((unsigned long)(&__init_end));
+ init_begin = PAGE_ALIGN((unsigned long)(__init_begin));
+ init_end = PAGE_ALIGN((unsigned long)(__init_end));
for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
@@ -578,7 +575,7 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long start_padd
extern const unsigned long fault_vector_20;
extern void * const linux_gateway_page;
- ro_start = __pa((unsigned long)&_text);
+ ro_start = __pa((unsigned long)_text);
ro_end = __pa((unsigned long)&data_start);
fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c
index 27384567a1d..47a1d2ac941 100644
--- a/arch/parisc/mm/ioremap.c
+++ b/arch/parisc/mm/ioremap.c
@@ -188,7 +188,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
}
EXPORT_SYMBOL(__ioremap);
-void iounmap(void __iomem *addr)
+void iounmap(const volatile void __iomem *addr)
{
if (addr > high_memory)
return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));