diff options
Diffstat (limited to 'arch/sparc64')
-rw-r--r-- | arch/sparc64/Kconfig | 43 | ||||
-rw-r--r-- | arch/sparc64/kernel/dtlb_backend.S | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/entry.S | 63 | ||||
-rw-r--r-- | arch/sparc64/kernel/irq.c | 588 | ||||
-rw-r--r-- | arch/sparc64/kernel/kprobes.c | 5 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_psycho.c | 3 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_sabre.c | 46 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_schizo.c | 78 | ||||
-rw-r--r-- | arch/sparc64/kernel/power.c | 3 | ||||
-rw-r--r-- | arch/sparc64/kernel/ptrace.c | 46 | ||||
-rw-r--r-- | arch/sparc64/kernel/smp.c | 10 | ||||
-rw-r--r-- | arch/sparc64/kernel/sparc64_ksyms.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/sys32.S | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/systbls.S | 8 | ||||
-rw-r--r-- | arch/sparc64/kernel/time.c | 26 | ||||
-rw-r--r-- | arch/sparc64/kernel/vmlinux.lds.S | 2 | ||||
-rw-r--r-- | arch/sparc64/mm/fault.c | 16 | ||||
-rw-r--r-- | arch/sparc64/mm/ultra.S | 5 | ||||
-rw-r--r-- | arch/sparc64/solaris/entry64.S | 7 |
19 files changed, 422 insertions, 533 deletions
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig index e2b050eb3b9..9afd28e2c4d 100644 --- a/arch/sparc64/Kconfig +++ b/arch/sparc64/Kconfig @@ -43,6 +43,25 @@ config SPARC64_PAGE_SIZE_4MB endchoice +config SECCOMP + bool "Enable seccomp to safely compute untrusted bytecode" + depends on PROC_FS + default y + help + This kernel feature is useful for number crunching applications + that may need to compute untrusted bytecode during their + execution. By using pipes or other transports made available to + the process as file descriptors supporting the read/write + syscalls, it's possible to isolate those applications in + their own address space using seccomp. Once seccomp is + enabled via /proc/<pid>/seccomp, it cannot be disabled + and the task is only allowed to execute a few safe syscalls + defined by each seccomp mode. + + If unsure, say Y. Only embedded should say N here. + +source kernel/Kconfig.hz + source "init/Kconfig" config SYSVIPC_COMPAT @@ -444,6 +463,24 @@ config PRINTER If you have more than 8 printers, you need to increase the LP_NO macro in lp.c and the PARPORT_MAX macro in parport.h. +config PPDEV + tristate "Support for user-space parallel port device drivers" + depends on PARPORT + ---help--- + Saying Y to this adds support for /dev/parport device nodes. This + is needed for programs that want portable access to the parallel + port, for instance deviceid (which displays Plug-and-Play device + IDs). + + This is the parallel port equivalent of SCSI generic support (sg). + It is safe to say N to this -- it is not needed for normal printing + or parallel port CD-ROM/disk support. + + To compile this driver as a module, choose M here: the + module will be called ppdev. + + If unsure, say N. + config ENVCTRL tristate "SUNW, envctrl support" depends on PCI @@ -488,6 +525,8 @@ source "mm/Kconfig" endmenu +source "net/Kconfig" + source "drivers/base/Kconfig" source "drivers/video/Kconfig" @@ -514,7 +553,7 @@ endif source "drivers/ieee1394/Kconfig" -source "net/Kconfig" +source "drivers/net/Kconfig" source "drivers/isdn/Kconfig" @@ -610,6 +649,8 @@ source "drivers/input/Kconfig" source "drivers/i2c/Kconfig" +source "drivers/hwmon/Kconfig" + source "fs/Kconfig" source "drivers/media/Kconfig" diff --git a/arch/sparc64/kernel/dtlb_backend.S b/arch/sparc64/kernel/dtlb_backend.S index b73a3c85877..538522848ad 100644 --- a/arch/sparc64/kernel/dtlb_backend.S +++ b/arch/sparc64/kernel/dtlb_backend.S @@ -16,7 +16,7 @@ #elif PAGE_SHIFT == 19 #define SZ_BITS _PAGE_SZ512K #elif PAGE_SHIFT == 22 -#define SZ_BITS _PAGE_SZ4M +#define SZ_BITS _PAGE_SZ4MB #endif #define VALID_SZ_BITS (_PAGE_VALID | SZ_BITS) diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index eee516a71c1..d781f10adc5 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -22,8 +22,6 @@ #include <asm/estate.h> #include <asm/auxio.h> -/* #define SYSCALL_TRACING 1 */ - #define curptr g6 #define NR_SYSCALLS 284 /* Each OS is different... */ @@ -553,13 +551,11 @@ do_ivec: sllx %g3, 5, %g3 or %g2, %lo(ivector_table), %g2 add %g2, %g3, %g3 - ldx [%g3 + 0x08], %g2 /* irq_info */ ldub [%g3 + 0x04], %g4 /* pil */ - brz,pn %g2, do_ivec_spurious - mov 1, %g2 - + mov 1, %g2 sllx %g2, %g4, %g2 sllx %g4, 2, %g4 + lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */ stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */ @@ -567,9 +563,9 @@ do_ivec: retry do_ivec_xcall: mov 0x50, %g1 - ldxa [%g1 + %g0] ASI_INTR_R, %g1 srl %g3, 0, %g3 + mov 0x60, %g7 ldxa [%g7 + %g0] ASI_INTR_R, %g7 stxa %g0, [%g0] ASI_INTR_RECEIVE @@ -581,19 +577,6 @@ do_ivec_xcall: 1: jmpl %g3, %g0 nop -do_ivec_spurious: - stw %g3, [%g6 + 0x00] /* irq_work(cpu, 0) = bucket */ - rdpr %pstate, %g5 - - wrpr %g5, PSTATE_IG | PSTATE_AG, %pstate - sethi %hi(109f), %g7 - ba,pt %xcc, etrap -109: or %g7, %lo(109b), %g7 - call catch_disabled_ivec - add %sp, PTREGS_OFF, %o0 - ba,pt %xcc, rtrap - clr %l6 - .globl save_alternate_globals save_alternate_globals: /* %o0 = save_area */ rdpr %pstate, %o5 @@ -1569,11 +1552,12 @@ sys_ptrace: add %sp, PTREGS_OFF, %o0 nop .align 32 1: ldx [%curptr + TI_FLAGS], %l5 - andcc %l5, _TIF_SYSCALL_TRACE, %g0 + andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 be,pt %icc, rtrap clr %l6 + add %sp, PTREGS_OFF, %o0 call syscall_trace - nop + mov 1, %o1 ba,pt %xcc, rtrap clr %l6 @@ -1657,18 +1641,20 @@ linux_sparc_ni_syscall: or %l7, %lo(sys_ni_syscall), %l7 linux_syscall_trace32: + add %sp, PTREGS_OFF, %o0 call syscall_trace - nop + clr %o1 srl %i0, 0, %o0 - mov %i4, %o4 + srl %i4, 0, %o4 srl %i1, 0, %o1 srl %i2, 0, %o2 b,pt %xcc, 2f srl %i3, 0, %o3 linux_syscall_trace: + add %sp, PTREGS_OFF, %o0 call syscall_trace - nop + clr %o1 mov %i0, %o0 mov %i1, %o1 mov %i2, %o2 @@ -1686,11 +1672,6 @@ linux_sparc_syscall32: bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI srl %i0, 0, %o0 ! IEU0 sll %g1, 2, %l4 ! IEU0 Group -#ifdef SYSCALL_TRACING - call syscall_trace_entry - add %sp, PTREGS_OFF, %o0 - srl %i0, 0, %o0 -#endif srl %i4, 0, %o4 ! IEU1 lduw [%l7 + %l4], %l7 ! Load srl %i1, 0, %o1 ! IEU0 Group @@ -1698,7 +1679,7 @@ linux_sparc_syscall32: srl %i5, 0, %o5 ! IEU1 srl %i2, 0, %o2 ! IEU0 Group - andcc %l0, _TIF_SYSCALL_TRACE, %g0 ! IEU0 Group + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 bne,pn %icc, linux_syscall_trace32 ! CTI mov %i0, %l5 ! IEU1 call %l7 ! CTI Group brk forced @@ -1714,11 +1695,6 @@ linux_sparc_syscall: bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI mov %i0, %o0 ! IEU0 sll %g1, 2, %l4 ! IEU0 Group -#ifdef SYSCALL_TRACING - call syscall_trace_entry - add %sp, PTREGS_OFF, %o0 - mov %i0, %o0 -#endif mov %i1, %o1 ! IEU1 lduw [%l7 + %l4], %l7 ! Load 4: mov %i2, %o2 ! IEU0 Group @@ -1726,7 +1702,7 @@ linux_sparc_syscall: mov %i3, %o3 ! IEU1 mov %i4, %o4 ! IEU0 Group - andcc %l0, _TIF_SYSCALL_TRACE, %g0 ! IEU1 Group+1 bubble + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 bne,pn %icc, linux_syscall_trace ! CTI Group mov %i0, %l5 ! IEU0 2: call %l7 ! CTI Group brk forced @@ -1735,12 +1711,6 @@ linux_sparc_syscall: 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] ret_sys_call: -#ifdef SYSCALL_TRACING - mov %o0, %o1 - call syscall_trace_exit - add %sp, PTREGS_OFF, %o0 - mov %o1, %o0 -#endif ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc sra %o0, 0, %o0 @@ -1760,7 +1730,7 @@ ret_sys_call: 1: cmp %o0, -ERESTART_RESTARTBLOCK bgeu,pn %xcc, 1f - andcc %l0, _TIF_SYSCALL_TRACE, %l6 + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6 80: /* System call success, clear Carry condition code. */ andn %g3, %g2, %g3 @@ -1775,7 +1745,7 @@ ret_sys_call: /* System call failure, set Carry condition code. * Also, get abs(errno) to return to the process. */ - andcc %l0, _TIF_SYSCALL_TRACE, %l6 + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6 sub %g0, %o0, %o0 or %g3, %g2, %g3 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] @@ -1788,8 +1758,9 @@ ret_sys_call: b,pt %xcc, rtrap stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] linux_syscall_trace2: + add %sp, PTREGS_OFF, %o0 call syscall_trace - nop + mov 1, %o1 stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC] ba,pt %xcc, rtrap stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 42471257730..daa2fb93052 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -71,31 +71,7 @@ struct irq_work_struct { struct irq_work_struct __irq_work[NR_CPUS]; #define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)]) -#ifdef CONFIG_PCI -/* This is a table of physical addresses used to deal with IBF_DMA_SYNC. - * It is used for PCI only to synchronize DMA transfers with IRQ delivery - * for devices behind busses other than APB on Sabre systems. - * - * Currently these physical addresses are just config space accesses - * to the command register for that device. - */ -unsigned long pci_dma_wsync; -unsigned long dma_sync_reg_table[256]; -unsigned char dma_sync_reg_table_entry = 0; -#endif - -/* This is based upon code in the 32-bit Sparc kernel written mostly by - * David Redman (djhr@tadpole.co.uk). - */ -#define MAX_STATIC_ALLOC 4 -static struct irqaction static_irqaction[MAX_STATIC_ALLOC]; -static int static_irq_count; - -/* This is exported so that fast IRQ handlers can get at it... -DaveM */ -struct irqaction *irq_action[NR_IRQS+1] = { - NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL -}; +static struct irqaction *irq_action[NR_IRQS+1]; /* This only synchronizes entities which modify IRQ handler * state and some selected user-level spots that want to @@ -241,17 +217,22 @@ void disable_irq(unsigned int irq) * the CPU %tick register and not by some normal vectored interrupt * source. To handle this special case, we use this dummy INO bucket. */ +static struct irq_desc pil0_dummy_desc; static struct ino_bucket pil0_dummy_bucket = { - 0, /* irq_chain */ - 0, /* pil */ - 0, /* pending */ - 0, /* flags */ - 0, /* __unused */ - NULL, /* irq_info */ - 0UL, /* iclr */ - 0UL, /* imap */ + .irq_info = &pil0_dummy_desc, }; +static void build_irq_error(const char *msg, unsigned int ino, int pil, int inofixup, + unsigned long iclr, unsigned long imap, + struct ino_bucket *bucket) +{ + prom_printf("IRQ: INO %04x (%d:%016lx:%016lx) --> " + "(%d:%d:%016lx:%016lx), halting...\n", + ino, bucket->pil, bucket->iclr, bucket->imap, + pil, inofixup, iclr, imap); + prom_halt(); +} + unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap) { struct ino_bucket *bucket; @@ -280,28 +261,35 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long prom_halt(); } - /* Ok, looks good, set it up. Don't touch the irq_chain or - * the pending flag. - */ bucket = &ivector_table[ino]; - if ((bucket->flags & IBF_ACTIVE) || - (bucket->irq_info != NULL)) { - /* This is a gross fatal error if it happens here. */ - prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n"); - prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n", - ino, pil, inofixup, iclr, imap); - prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n", - bucket->pil, bucket->iclr, bucket->imap); - prom_printf("IRQ: Cannot continue, halting...\n"); + if (bucket->flags & IBF_ACTIVE) + build_irq_error("IRQ: Trying to build active INO bucket.\n", + ino, pil, inofixup, iclr, imap, bucket); + + if (bucket->irq_info) { + if (bucket->imap != imap || bucket->iclr != iclr) + build_irq_error("IRQ: Trying to reinit INO bucket.\n", + ino, pil, inofixup, iclr, imap, bucket); + + goto out; + } + + bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC); + if (!bucket->irq_info) { + prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n"); prom_halt(); } + memset(bucket->irq_info, 0, sizeof(struct irq_desc)); + + /* Ok, looks good, set it up. Don't touch the irq_chain or + * the pending flag. + */ bucket->imap = imap; bucket->iclr = iclr; bucket->pil = pil; bucket->flags = 0; - bucket->irq_info = NULL; - +out: return __irq(bucket); } @@ -319,26 +307,65 @@ static void atomic_bucket_insert(struct ino_bucket *bucket) __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); } +static int check_irq_sharing(int pil, unsigned long irqflags) +{ + struct irqaction *action, *tmp; + + action = *(irq_action + pil); + if (action) { + if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) { + for (tmp = action; tmp->next; tmp = tmp->next) + ; + } else { + return -EBUSY; + } + } + return 0; +} + +static void append_irq_action(int pil, struct irqaction *action) +{ + struct irqaction **pp = irq_action + pil; + + while (*pp) + pp = &((*pp)->next); + *pp = action; +} + +static struct irqaction *get_action_slot(struct ino_bucket *bucket) +{ + struct irq_desc *desc = bucket->irq_info; + int max_irq, i; + + max_irq = 1; + if (bucket->flags & IBF_PCI) + max_irq = MAX_IRQ_DESC_ACTION; + for (i = 0; i < max_irq; i++) { + struct irqaction *p = &desc->action[i]; + u32 mask = (1 << i); + + if (desc->action_active_mask & mask) + continue; + + desc->action_active_mask |= mask; + return p; + } + return NULL; +} + int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char *name, void *dev_id) { - struct irqaction *action, *tmp = NULL; + struct irqaction *action; struct ino_bucket *bucket = __bucket(irq); unsigned long flags; int pending = 0; - if ((bucket != &pil0_dummy_bucket) && - (bucket < &ivector_table[0] || - bucket >= &ivector_table[NUM_IVECS])) { - unsigned int *caller; - - __asm__ __volatile__("mov %%i7, %0" : "=r" (caller)); - printk(KERN_CRIT "request_irq: Old style IRQ registry attempt " - "from %p, irq %08x.\n", caller, irq); + if (unlikely(!handler)) return -EINVAL; - } - if (!handler) - return -EINVAL; + + if (unlikely(!bucket->irq_info)) + return -ENODEV; if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) { /* @@ -356,93 +383,20 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_ spin_lock_irqsave(&irq_action_lock, flags); - action = *(bucket->pil + irq_action); - if (action) { - if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) - for (tmp = action; tmp->next; tmp = tmp->next) - ; - else { - spin_unlock_irqrestore(&irq_action_lock, flags); - return -EBUSY; - } - action = NULL; /* Or else! */ + if (check_irq_sharing(bucket->pil, irqflags)) { + spin_unlock_irqrestore(&irq_action_lock, flags); + return -EBUSY; } - /* If this is flagged as statically allocated then we use our - * private struct which is never freed. - */ - if (irqflags & SA_STATIC_ALLOC) { - if (static_irq_count < MAX_STATIC_ALLOC) - action = &static_irqaction[static_irq_count++]; - else - printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed " - "using kmalloc\n", irq, name); - } - if (action == NULL) - action = (struct irqaction *)kmalloc(sizeof(struct irqaction), - GFP_ATOMIC); - + action = get_action_slot(bucket); if (!action) { spin_unlock_irqrestore(&irq_action_lock, flags); return -ENOMEM; } - if (bucket == &pil0_dummy_bucket) { - bucket->irq_info = action; - bucket->flags |= IBF_ACTIVE; - } else { - if ((bucket->flags & IBF_ACTIVE) != 0) { - void *orig = bucket->irq_info; - void **vector = NULL; - - if ((bucket->flags & IBF_PCI) == 0) { - printk("IRQ: Trying to share non-PCI bucket.\n"); - goto free_and_ebusy; - } - if ((bucket->flags & IBF_MULTI) == 0) { - vector = kmalloc(sizeof(void *) * 4, GFP_ATOMIC); - if (vector == NULL) - goto free_and_enomem; - - /* We might have slept. */ - if ((bucket->flags & IBF_MULTI) != 0) { - int ent; - - kfree(vector); - vector = (void **)bucket->irq_info; - for(ent = 0; ent < 4; ent++) { - if (vector[ent] == NULL) { - vector[ent] = action; - break; - } - } - if (ent == 4) - goto free_and_ebusy; - } else { - vector[0] = orig; - vector[1] = action; - vector[2] = NULL; - vector[3] = NULL; - bucket->irq_info = vector; - bucket->flags |= IBF_MULTI; - } - } else { - int ent; - - vector = (void **)orig; - for (ent = 0; ent < 4; ent++) { - if (vector[ent] == NULL) { - vector[ent] = action; - break; - } - } - if (ent == 4) - goto free_and_ebusy; - } - } else { - bucket->irq_info = action; - bucket->flags |= IBF_ACTIVE; - } + bucket->flags |= IBF_ACTIVE; + pending = 0; + if (bucket != &pil0_dummy_bucket) { pending = bucket->pending; if (pending) bucket->pending = 0; @@ -456,10 +410,7 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_ put_ino_in_irqaction(action, irq); put_smpaff_in_irqaction(action, CPU_MASK_NONE); - if (tmp) - tmp->next = action; - else - *(bucket->pil + irq_action) = action; + append_irq_action(bucket->pil, action); enable_irq(irq); @@ -468,147 +419,103 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_ atomic_bucket_insert(bucket); set_softint(1 << bucket->pil); } + spin_unlock_irqrestore(&irq_action_lock, flags); - if ((bucket != &pil0_dummy_bucket) && (!(irqflags & SA_STATIC_ALLOC))) + + if (bucket != &pil0_dummy_bucket) register_irq_proc(__irq_ino(irq)); #ifdef CONFIG_SMP distribute_irqs(); #endif return 0; - -free_and_ebusy: - kfree(action); - spin_unlock_irqrestore(&irq_action_lock, flags); - return -EBUSY; - -free_and_enomem: - kfree(action); - spin_unlock_irqrestore(&irq_action_lock, flags); - return -ENOMEM; } EXPORT_SYMBOL(request_irq); -void free_irq(unsigned int irq, void *dev_id) +static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id) { - struct irqaction *action; - struct irqaction *tmp = NULL; - unsigned long flags; - struct ino_bucket *bucket = __bucket(irq), *bp; + struct ino_bucket *bucket = __bucket(irq); + struct irqaction *action, **pp; - if ((bucket != &pil0_dummy_bucket) && - (bucket < &ivector_table[0] || - bucket >= &ivector_table[NUM_IVECS])) { - unsigned int *caller; + pp = irq_action + bucket->pil; + action = *pp; + if (unlikely(!action)) + return NULL; - __asm__ __volatile__("mov %%i7, %0" : "=r" (caller)); - printk(KERN_CRIT "free_irq: Old style IRQ removal attempt " - "from %p, irq %08x.\n", caller, irq); - return; - } - - spin_lock_irqsave(&irq_action_lock, flags); - - action = *(bucket->pil + irq_action); - if (!action->handler) { + if (unlikely(!action->handler)) { printk("Freeing free IRQ %d\n", bucket->pil); - return; - } - if (dev_id) { - for ( ; action; action = action->next) { - if (action->dev_id == dev_id) - break; - tmp = action; - } - if (!action) { - printk("Trying to free free shared IRQ %d\n", bucket->pil); - spin_unlock_irqrestore(&irq_action_lock, flags); - return; - } - } else if (action->flags & SA_SHIRQ) { - printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil); - spin_unlock_irqrestore(&irq_action_lock, flags); - return; + return NULL; } - if (action->flags & SA_STATIC_ALLOC) { - printk("Attempt to free statically allocated IRQ %d (%s)\n", - bucket->pil, action->name); - spin_unlock_irqrestore(&irq_action_lock, flags); - return; + while (action && action->dev_id != dev_id) { + pp = &action->next; + action = *pp; } - if (action && tmp) - tmp->next = action->next; - else - *(bucket->pil + irq_action) = action->next; + if (likely(action)) + *pp = action->next; + + return action; +} + +void free_irq(unsigned int irq, void *dev_id) +{ + struct irqaction *action; + struct ino_bucket *bucket; + unsigned long flags; + + spin_lock_irqsave(&irq_action_lock, flags); + + action = unlink_irq_action(irq, dev_id); spin_unlock_irqrestore(&irq_action_lock, flags); + if (unlikely(!action)) + return; + synchronize_irq(irq); spin_lock_irqsave(&irq_action_lock, flags); + bucket = __bucket(irq); if (bucket != &pil0_dummy_bucket) { + struct irq_desc *desc = bucket->irq_info; unsigned long imap = bucket->imap; - void **vector, *orig; - int ent; - - orig = bucket->irq_info; - vector = (void **)orig; - - if ((bucket->flags & IBF_MULTI) != 0) { - int other = 0; - void *orphan = NULL; - for (ent = 0; ent < 4; ent++) { - if (vector[ent] == action) - vector[ent] = NULL; - else if (vector[ent] != NULL) { - orphan = vector[ent]; - other++; - } - } + int ent, i; - /* Only free when no other shared irq - * uses this bucket. - */ - if (other) { - if (other == 1) { - /* Convert back to non-shared bucket. */ - bucket->irq_info = orphan; - bucket->flags &= ~(IBF_MULTI); - kfree(vector); - } - goto out; + for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { + struct irqaction *p = &desc->action[i]; + + if (p == action) { + desc->action_active_mask &= ~(1 << i); + break; } - } else { - bucket->irq_info = NULL; } - /* This unique interrupt source is now inactive. */ - bucket->flags &= ~IBF_ACTIVE; + if (!desc->action_active_mask) { + /* This unique interrupt source is now inactive. */ + bucket->flags &= ~IBF_ACTIVE; - /* See if any other buckets share this bucket's IMAP - * and are still active. - */ - for (ent = 0; ent < NUM_IVECS; ent++) { - bp = &ivector_table[ent]; - if (bp != bucket && - bp->imap == imap && - (bp->flags & IBF_ACTIVE) != 0) - break; - } + /* See if any other buckets share this bucket's IMAP + * and are still active. + */ + for (ent = 0; ent < NUM_IVECS; ent++) { + struct ino_bucket *bp = &ivector_table[ent]; + if (bp != bucket && + bp->imap == imap && + (bp->flags & IBF_ACTIVE) != 0) + break; + } - /* Only disable when no other sub-irq levels of - * the same IMAP are active. - */ - if (ent == NUM_IVECS) - disable_irq(irq); + /* Only disable when no other sub-irq levels of + * the same IMAP are active. + */ + if (ent == NUM_IVECS) + disable_irq(irq); + } } -out: - kfree(action); spin_unlock_irqrestore(&irq_action_lock, flags); } @@ -647,99 +554,55 @@ void synchronize_irq(unsigned int irq) } #endif /* CONFIG_SMP */ -void catch_disabled_ivec(struct pt_regs *regs) +static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs) { - int cpu = smp_processor_id(); - struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0)); + struct irq_desc *desc = bp->irq_info; + unsigned char flags = bp->flags; + u32 action_mask, i; + int random; - /* We can actually see this on Ultra/PCI PCI cards, which are bridges - * to other devices. Here a single IMAP enabled potentially multiple - * unique interrupt sources (which each do have a unique ICLR register. - * - * So what we do is just register that the IVEC arrived, when registered - * for real the request_irq() code will check the bit and signal - * a local CPU interrupt for it. - */ -#if 0 - printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n", - bucket - &ivector_table[0], regs->tpc); -#endif - *irq_work(cpu, 0) = 0; - bucket->pending = 1; -} - -/* Tune this... */ -#define FORWARD_VOLUME 12 - -#ifdef CONFIG_SMP - -static inline void redirect_intr(int cpu, struct ino_bucket *bp) -{ - /* Ok, here is what is going on: - * 1) Retargeting IRQs on Starfire is very - * expensive so just forget about it on them. - * 2) Moving around very high priority interrupts - * is a losing game. - * 3) If the current cpu is idle, interrupts are - * useful work, so keep them here. But do not - * pass to our neighbour if he is not very idle. - * 4) If sysadmin explicitly asks for directed intrs, - * Just Do It. - */ - struct irqaction *ap = bp->irq_info; - cpumask_t cpu_mask; - unsigned int buddy, ticks; + bp->flags |= IBF_INPROGRESS; - cpu_mask = get_smpaff_in_irqaction(ap); - cpus_and(cpu_mask, cpu_mask, cpu_online_map); - if (cpus_empty(cpu_mask)) - cpu_mask = cpu_online_map; - - if (this_is_starfire != 0 || - bp->pil >= 10 || current->pid == 0) + if (unlikely(!(flags & IBF_ACTIVE))) { + bp->pending = 1; goto out; - - /* 'cpu' is the MID (ie. UPAID), calculate the MID - * of our buddy. - */ - buddy = cpu + 1; - if (buddy >= NR_CPUS) - buddy = 0; - - ticks = 0; - while (!cpu_isset(buddy, cpu_mask)) { - if (++buddy >= NR_CPUS) - buddy = 0; - if (++ticks > NR_CPUS) { - put_smpaff_in_irqaction(ap, CPU_MASK_NONE); - goto out; - } } - if (buddy == cpu) - goto out; + if (desc->pre_handler) + desc->pre_handler(bp, + desc->pre_handler_arg1, + desc->pre_handler_arg2); - /* Voo-doo programming. */ - if (cpu_data(buddy).idle_volume < FORWARD_VOLUME) - goto out; + action_mask = desc->action_active_mask; + random = 0; + for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { + struct irqaction *p = &desc->action[i]; + u32 mask = (1 << i); - /* This just so happens to be correct on Cheetah - * at the moment. - */ - buddy <<= 26; + if (!(action_mask & mask)) + continue; - /* Push it to our buddy. */ - upa_writel(buddy | IMAP_VALID, bp->imap); + action_mask &= ~mask; + if (p->handler(__irq(bp), p->dev_id, regs) == IRQ_HANDLED) + random |= p->flags; + + if (!action_mask) + break; + } + if (bp->pil != 0) { + upa_writel(ICLR_IDLE, bp->iclr); + /* Test and add entropy */ + if (random & SA_SAMPLE_RANDOM) + add_interrupt_randomness(irq); + } out: - return; + bp->flags &= ~IBF_INPROGRESS; } -#endif - void handler_irq(int irq, struct pt_regs *regs) { - struct ino_bucket *bp, *nbp; + struct ino_bucket *bp; int cpu = smp_processor_id(); #ifndef CONFIG_SMP @@ -757,8 +620,6 @@ void handler_irq(int irq, struct pt_regs *regs) clear_softint(clr_mask); } #else - int should_forward = 0; - clear_softint(1 << irq); #endif @@ -773,63 +634,12 @@ void handler_irq(int irq, struct pt_regs *regs) #else bp = __bucket(xchg32(irq_work(cpu, irq), 0)); #endif - for ( ; bp != NULL; bp = nbp) { - unsigned char flags = bp->flags; - unsigned char random = 0; + while (bp) { + struct ino_bucket *nbp = __bucket(bp->irq_chain); - nbp = __bucket(bp->irq_chain); bp->irq_chain = 0; - - bp->flags |= IBF_INPROGRESS; - - if ((flags & IBF_ACTIVE) != 0) { -#ifdef CONFIG_PCI - if ((flags & IBF_DMA_SYNC) != 0) { - upa_readl(dma_sync_reg_table[bp->synctab_ent]); - upa_readq(pci_dma_wsync); - } -#endif - if ((flags & IBF_MULTI) == 0) { - struct irqaction *ap = bp->irq_info; - int ret; - - ret = ap->handler(__irq(bp), ap->dev_id, regs); - if (ret == IRQ_HANDLED) - random |= ap->flags; - } else { - void **vector = (void **)bp->irq_info; - int ent; - for (ent = 0; ent < 4; ent++) { - struct irqaction *ap = vector[ent]; - if (ap != NULL) { - int ret; - - ret = ap->handler(__irq(bp), - ap->dev_id, - regs); - if (ret == IRQ_HANDLED) - random |= ap->flags; - } - } - } - /* Only the dummy bucket lacks IMAP/ICLR. */ - if (bp->pil != 0) { -#ifdef CONFIG_SMP - if (should_forward) { - redirect_intr(cpu, bp); - should_forward = 0; - } -#endif - upa_writel(ICLR_IDLE, bp->iclr); - - /* Test and add entropy */ - if (random & SA_SAMPLE_RANDOM) - add_interrupt_randomness(irq); - } - } else - bp->pending = 1; - - bp->flags &= ~IBF_INPROGRESS; + process_bucket(irq, bp, regs); + bp = nbp; } irq_exit(); } @@ -959,7 +769,10 @@ static void distribute_irqs(void) */ for (level = 1; level < NR_IRQS; level++) { struct irqaction *p = irq_action[level]; - if (level == 12) continue; + + if (level == 12) + continue; + while(p) { cpu = retarget_one_irq(p, cpu); p = p->next; @@ -1104,7 +917,8 @@ static int irq_affinity_read_proc (char *page, char **start, off_t off, int count, int *eof, void *data) { struct ino_bucket *bp = ivector_table + (long)data; - struct irqaction *ap = bp->irq_info; + struct irq_desc *desc = bp->irq_info; + struct irqaction *ap = desc->action; cpumask_t mask; int len; @@ -1122,11 +936,13 @@ static int irq_affinity_read_proc (char *page, char **start, off_t off, static inline void set_intr_affinity(int irq, cpumask_t hw_aff) { struct ino_bucket *bp = ivector_table + irq; + struct irq_desc *desc = bp->irq_info; + struct irqaction *ap = desc->action; /* Users specify affinity in terms of hw cpu ids. * As soon as we do this, handler_irq() might see and take action. */ - put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, hw_aff); + put_smpaff_in_irqaction(ap, hw_aff); /* Migration is simply done by the next cpu to service this * interrupt. diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c index bdac631cf01..bbf11f85dab 100644 --- a/arch/sparc64/kernel/kprobes.c +++ b/arch/sparc64/kernel/kprobes.c @@ -433,3 +433,8 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) return 0; } +/* architecture specific initialization */ +int arch_init_kprobes(void) +{ + return 0; +} diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index 534320ef0db..91ab466d6c6 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c @@ -1303,8 +1303,7 @@ static void psycho_controller_hwinit(struct pci_controller_info *p) { u64 tmp; - /* PROM sets the IRQ retry value too low, increase it. */ - psycho_write(p->pbm_A.controller_regs + PSYCHO_IRQ_RETRY, 0xff); + psycho_write(p->pbm_A.controller_regs + PSYCHO_IRQ_RETRY, 5); /* Enable arbiter for all PCI slots. */ tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIA_CTRL); diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index 53d333b4a4e..52bf3431a42 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c @@ -595,6 +595,23 @@ static int __init sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino) return ret; } +/* When a device lives behind a bridge deeper in the PCI bus topology + * than APB, a special sequence must run to make sure all pending DMA + * transfers at the time of IRQ delivery are visible in the coherency + * domain by the cpu. This sequence is to perform a read on the far + * side of the non-APB bridge, then perform a read of Sabre's DMA + * write-sync register. + */ +static void sabre_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2) +{ + struct pci_dev *pdev = _arg1; + unsigned long sync_reg = (unsigned long) _arg2; + u16 _unused; + + pci_read_config_word(pdev, PCI_VENDOR_ID, &_unused); + sabre_read(sync_reg); +} + static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm, struct pci_dev *pdev, unsigned int ino) @@ -639,24 +656,14 @@ static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm, if (pdev) { struct pcidev_cookie *pcp = pdev->sysdata; - /* When a device lives behind a bridge deeper in the - * PCI bus topology than APB, a special sequence must - * run to make sure all pending DMA transfers at the - * time of IRQ delivery are visible in the coherency - * domain by the cpu. This sequence is to perform - * a read on the far side of the non-APB bridge, then - * perform a read of Sabre's DMA write-sync register. - * - * Currently, the PCI_CONFIG register for the device - * is used for this read from the far side of the bridge. - */ if (pdev->bus->number != pcp->pbm->pci_first_busno) { - bucket->flags |= IBF_DMA_SYNC; - bucket->synctab_ent = dma_sync_reg_table_entry++; - dma_sync_reg_table[bucket->synctab_ent] = - (unsigned long) sabre_pci_config_mkaddr( - pcp->pbm, - pdev->bus->number, pdev->devfn, PCI_COMMAND); + struct pci_controller_info *p = pcp->pbm->parent; + struct irq_desc *d = bucket->irq_info; + + d->pre_handler = sabre_wsync_handler; + d->pre_handler_arg1 = pdev; + d->pre_handler_arg2 = (void *) + p->pbm_A.controller_regs + SABRE_WRSYNC; } } return __irq(bucket); @@ -1626,10 +1633,9 @@ void __init sabre_init(int pnode, char *model_name) */ p->pbm_A.controller_regs = pr_regs[0].phys_addr; p->pbm_B.controller_regs = pr_regs[0].phys_addr; - pci_dma_wsync = p->pbm_A.controller_regs + SABRE_WRSYNC; - printk("PCI: Found SABRE, main regs at %016lx, wsync at %016lx\n", - p->pbm_A.controller_regs, pci_dma_wsync); + printk("PCI: Found SABRE, main regs at %016lx\n", + p->pbm_A.controller_regs); /* Clear interrupts */ diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index 5753175b94e..6a182bb6628 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c @@ -15,6 +15,7 @@ #include <asm/iommu.h> #include <asm/irq.h> #include <asm/upa.h> +#include <asm/pstate.h> #include "pci_impl.h" #include "iommu_common.h" @@ -326,6 +327,44 @@ static int __init schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino) return ret; } +static void tomatillo_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2) +{ + unsigned long sync_reg = (unsigned long) _arg2; + u64 mask = 1 << (__irq_ino(__irq(bucket)) & IMAP_INO); + u64 val; + int limit; + + schizo_write(sync_reg, mask); + + limit = 100000; + val = 0; + while (--limit) { + val = schizo_read(sync_reg); + if (!(val & mask)) + break; + } + if (limit <= 0) { + printk("tomatillo_wsync_handler: DMA won't sync [%lx:%lx]\n", + val, mask); + } + + if (_arg1) { + static unsigned char cacheline[64] + __attribute__ ((aligned (64))); + + __asm__ __volatile__("rd %%fprs, %0\n\t" + "or %0, %4, %1\n\t" + "wr %1, 0x0, %%fprs\n\t" + "stda %%f0, [%5] %6\n\t" + "wr %0, 0x0, %%fprs\n\t" + "membar #Sync" + : "=&r" (mask), "=&r" (val) + : "0" (mask), "1" (val), + "i" (FPRS_FEF), "r" (&cacheline[0]), + "i" (ASI_BLK_COMMIT_P)); + } +} + static unsigned int schizo_irq_build(struct pci_pbm_info *pbm, struct pci_dev *pdev, unsigned int ino) @@ -369,6 +408,15 @@ static unsigned int schizo_irq_build(struct pci_pbm_info *pbm, bucket = __bucket(build_irq(pil, ign_fixup, iclr, imap)); bucket->flags |= IBF_PCI; + if (pdev && pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) { + struct irq_desc *p = bucket->irq_info; + + p->pre_handler = tomatillo_wsync_handler; + p->pre_handler_arg1 = ((pbm->chip_version <= 4) ? + (void *) 1 : (void *) 0); + p->pre_handler_arg2 = (void *) pbm->sync_reg; + } + return __irq(bucket); } @@ -885,6 +933,7 @@ static irqreturn_t schizo_ce_intr(int irq, void *dev_id, struct pt_regs *regs) #define SCHIZO_PCI_CTRL (0x2000UL) #define SCHIZO_PCICTRL_BUS_UNUS (1UL << 63UL) /* Safari */ +#define SCHIZO_PCICTRL_DTO_INT (1UL << 61UL) /* Tomatillo */ #define SCHIZO_PCICTRL_ARB_PRIO (0x1ff << 52UL) /* Tomatillo */ #define SCHIZO_PCICTRL_ESLCK (1UL << 51UL) /* Safari */ #define SCHIZO_PCICTRL_ERRSLOT (7UL << 48UL) /* Safari */ @@ -1887,37 +1936,27 @@ static void __init schizo_pbm_hw_init(struct pci_pbm_info *pbm) { u64 tmp; - /* Set IRQ retry to infinity. */ - schizo_write(pbm->pbm_regs + SCHIZO_PCI_IRQ_RETRY, - SCHIZO_IRQ_RETRY_INF); + schizo_write(pbm->pbm_regs + SCHIZO_PCI_IRQ_RETRY, 5); - /* Enable arbiter for all PCI slots. Also, disable PCI interval - * timer so that DTO (Discard TimeOuts) are not reported because - * some Schizo revisions report them erroneously. - */ tmp = schizo_read(pbm->pbm_regs + SCHIZO_PCI_CTRL); - if (pbm->chip_type == PBM_CHIP_TYPE_SCHIZO_PLUS && - pbm->chip_version == 0x5 && - pbm->chip_revision == 0x1) - tmp |= 0x0f; - else - tmp |= 0xff; - tmp &= ~SCHIZO_PCICTRL_PTO; + /* Enable arbiter for all PCI slots. */ + tmp |= 0xff; + if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO && pbm->chip_version >= 0x2) tmp |= 0x3UL << SCHIZO_PCICTRL_PTO_SHIFT; - else - tmp |= 0x1UL << SCHIZO_PCICTRL_PTO_SHIFT; if (!prom_getbool(pbm->prom_node, "no-bus-parking")) tmp |= SCHIZO_PCICTRL_PARK; + else + tmp &= ~SCHIZO_PCICTRL_PARK; if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO && pbm->chip_version <= 0x1) - tmp |= (1UL << 61); + tmp |= SCHIZO_PCICTRL_DTO_INT; else - tmp &= ~(1UL << 61); + tmp &= ~SCHIZO_PCICTRL_DTO_INT; if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) tmp |= (SCHIZO_PCICTRL_MRM_PREF | @@ -2015,6 +2054,9 @@ static void __init schizo_pbm_init(struct pci_controller_info *p, pbm->pbm_regs = pr_regs[0].phys_addr; pbm->controller_regs = pr_regs[1].phys_addr - 0x10000UL; + if (chip_type == PBM_CHIP_TYPE_TOMATILLO) + pbm->sync_reg = pr_regs[3].phys_addr + 0x1a18UL; + sprintf(pbm->name, (chip_type == PBM_CHIP_TYPE_TOMATILLO ? "TOMATILLO%d PBM%c" : diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c index 52f14e399b1..533104c7907 100644 --- a/arch/sparc64/kernel/power.c +++ b/arch/sparc64/kernel/power.c @@ -4,6 +4,8 @@ * Copyright (C) 1999 David S. Miller (davem@redhat.com) */ +#define __KERNEL_SYSCALLS__ + #include <linux/config.h> #include <linux/kernel.h> #include <linux/module.h> @@ -17,7 +19,6 @@ #include <asm/ebus.h> #include <asm/auxio.h> -#define __KERNEL_SYSCALLS__ #include <linux/unistd.h> /* diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c index 80a76e2ad73..23ad839d113 100644 --- a/arch/sparc64/kernel/ptrace.c +++ b/arch/sparc64/kernel/ptrace.c @@ -19,6 +19,8 @@ #include <linux/smp.h> #include <linux/smp_lock.h> #include <linux/security.h> +#include <linux/seccomp.h> +#include <linux/audit.h> #include <linux/signal.h> #include <asm/asi.h> @@ -628,15 +630,27 @@ out: unlock_kernel(); } -asmlinkage void syscall_trace(void) +asmlinkage void syscall_trace(struct pt_regs *regs, int syscall_exit_p) { -#ifdef DEBUG_PTRACE - printk("%s [%d]: syscall_trace\n", current->comm, current->pid); -#endif - if (!test_thread_flag(TIF_SYSCALL_TRACE)) - return; + /* do the secure computing check first */ + secure_computing(regs->u_regs[UREG_G1]); + + if (unlikely(current->audit_context) && syscall_exit_p) { + unsigned long tstate = regs->tstate; + int result = AUDITSC_SUCCESS; + + if (unlikely(tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) + result = AUDITSC_FAILURE; + + audit_syscall_exit(current, result, regs->u_regs[UREG_I0]); + } + if (!(current->ptrace & PT_PTRACED)) - return; + goto out; + + if (!test_thread_flag(TIF_SYSCALL_TRACE)) + goto out; + ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); @@ -645,12 +659,20 @@ asmlinkage void syscall_trace(void) * for normal use. strace only continues with a signal if the * stopping signal is not SIGTRAP. -brl */ -#ifdef DEBUG_PTRACE - printk("%s [%d]: syscall_trace exit= %x\n", current->comm, - current->pid, current->exit_code); -#endif if (current->exit_code) { - send_sig (current->exit_code, current, 1); + send_sig(current->exit_code, current, 1); current->exit_code = 0; } + +out: + if (unlikely(current->audit_context) && !syscall_exit_p) + audit_syscall_entry(current, + (test_thread_flag(TIF_32BIT) ? + AUDIT_ARCH_SPARC : + AUDIT_ARCH_SPARC64), + regs->u_regs[UREG_G1], + regs->u_regs[UREG_I0], + regs->u_regs[UREG_I1], + regs->u_regs[UREG_I2], + regs->u_regs[UREG_I3]); } diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index e5b9c7a2778..7e8e2919e18 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -45,8 +45,8 @@ extern void calibrate_delay(void); /* Please don't make this stuff initdata!!! --DaveM */ static unsigned char boot_cpu_id; -cpumask_t cpu_online_map = CPU_MASK_NONE; -cpumask_t phys_cpu_present_map = CPU_MASK_NONE; +cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; +cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; static cpumask_t smp_commenced_mask; static cpumask_t cpu_callout_map; @@ -155,7 +155,7 @@ void cpu_panic(void) panic("SMP bolixed\n"); } -static unsigned long current_tick_offset; +static unsigned long current_tick_offset __read_mostly; /* This tick register synchronization scheme is taken entirely from * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. @@ -1193,8 +1193,8 @@ void smp_send_stop(void) { } -unsigned long __per_cpu_base; -unsigned long __per_cpu_shift; +unsigned long __per_cpu_base __read_mostly; +unsigned long __per_cpu_shift __read_mostly; EXPORT_SYMBOL(__per_cpu_base); EXPORT_SYMBOL(__per_cpu_shift); diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 56cd96f4a5c..9202d925a9c 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c @@ -79,7 +79,7 @@ extern void linux_sparc_syscall(void); extern void rtrap(void); extern void show_regs(struct pt_regs *); extern void solaris_syscall(void); -extern void syscall_trace(void); +extern void syscall_trace(struct pt_regs *, int); extern u32 sunos_sys_table[], sys_call_table32[]; extern void tl0_solaris(void); extern void sys_sigsuspend(void); diff --git a/arch/sparc64/kernel/sys32.S b/arch/sparc64/kernel/sys32.S index 5a95e98c531..5f9e4fae612 100644 --- a/arch/sparc64/kernel/sys32.S +++ b/arch/sparc64/kernel/sys32.S @@ -135,6 +135,8 @@ SIGN2(sys32_shutdown, sys_shutdown, %o0, %o1) SIGN3(sys32_socketpair, sys_socketpair, %o0, %o1, %o2) SIGN1(sys32_getpeername, sys_getpeername, %o0) SIGN1(sys32_getsockname, sys_getsockname, %o0) +SIGN2(sys32_ioprio_get, sys_ioprio_get, %o0, %o1) +SIGN3(sys32_ioprio_set, sys_ioprio_set, %o0, %o1, %o2) .globl sys32_mmap2 sys32_mmap2: diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S index a5e36a4c892..bceb91a8a2b 100644 --- a/arch/sparc64/kernel/systbls.S +++ b/arch/sparc64/kernel/systbls.S @@ -59,11 +59,11 @@ sys_call_table32: /*180*/ .word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall .word sys32_setpgid, sys32_fremovexattr, sys32_tkill, sys32_exit_group, sparc64_newuname /*190*/ .word sys32_init_module, sparc64_personality, sys_remap_file_pages, sys32_epoll_create, sys32_epoll_ctl - .word sys32_epoll_wait, sys_nis_syscall, sys_getppid, sys32_sigaction, sys_sgetmask + .word sys32_epoll_wait, sys32_ioprio_set, sys_getppid, sys32_sigaction, sys_sgetmask /*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir .word sys32_readahead, sys32_socketcall, sys32_syslog, sys32_lookup_dcookie, sys32_fadvise64 /*210*/ .word sys32_fadvise64_64, sys32_tgkill, sys32_waitpid, sys_swapoff, sys32_sysinfo - .word sys32_ipc, sys32_sigreturn, sys_clone, sys_nis_syscall, sys32_adjtimex + .word sys32_ipc, sys32_sigreturn, sys_clone, sys32_ioprio_get, sys32_adjtimex /*220*/ .word sys32_sigprocmask, sys_ni_syscall, sys32_delete_module, sys_ni_syscall, sys32_getpgid .word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys32_setfsuid16, sys32_setfsgid16 /*230*/ .word sys32_select, compat_sys_time, sys_nis_syscall, compat_sys_stime, compat_sys_statfs64 @@ -125,11 +125,11 @@ sys_call_table: /*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_nis_syscall, sys_ni_syscall .word sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sparc64_newuname /*190*/ .word sys_init_module, sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl - .word sys_epoll_wait, sys_nis_syscall, sys_getppid, sys_nis_syscall, sys_sgetmask + .word sys_epoll_wait, sys_ioprio_set, sys_getppid, sys_nis_syscall, sys_sgetmask /*200*/ .word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall .word sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64 /*210*/ .word sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo - .word sys_ipc, sys_nis_syscall, sys_clone, sys_nis_syscall, sys_adjtimex + .word sys_ipc, sys_nis_syscall, sys_clone, sys_ioprio_get, sys_adjtimex /*220*/ .word sys_nis_syscall, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid .word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid /*230*/ .word sys_select, sys_nis_syscall, sys_nis_syscall, sys_stime, sys_statfs64 diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index 71b4e380769..362b9c26871 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c @@ -73,7 +73,7 @@ static __initdata struct sparc64_tick_ops dummy_tick_ops = { .get_tick = dummy_get_tick, }; -struct sparc64_tick_ops *tick_ops = &dummy_tick_ops; +struct sparc64_tick_ops *tick_ops __read_mostly = &dummy_tick_ops; #define TICK_PRIV_BIT (1UL << 63) @@ -195,7 +195,7 @@ static unsigned long tick_add_tick(unsigned long adj, unsigned long offset) return new_tick; } -static struct sparc64_tick_ops tick_operations = { +static struct sparc64_tick_ops tick_operations __read_mostly = { .init_tick = tick_init_tick, .get_tick = tick_get_tick, .get_compare = tick_get_compare, @@ -276,7 +276,7 @@ static unsigned long stick_add_compare(unsigned long adj) return new_compare; } -static struct sparc64_tick_ops stick_operations = { +static struct sparc64_tick_ops stick_operations __read_mostly = { .init_tick = stick_init_tick, .get_tick = stick_get_tick, .get_compare = stick_get_compare, @@ -422,7 +422,7 @@ static unsigned long hbtick_add_compare(unsigned long adj) return val; } -static struct sparc64_tick_ops hbtick_operations = { +static struct sparc64_tick_ops hbtick_operations __read_mostly = { .init_tick = hbtick_init_tick, .get_tick = hbtick_get_tick, .get_compare = hbtick_get_compare, @@ -437,10 +437,9 @@ static struct sparc64_tick_ops hbtick_operations = { * NOTE: On SUN5 systems the ticker interrupt comes in using 2 * interrupts, one at level14 and one with softint bit 0. */ -unsigned long timer_tick_offset; -unsigned long timer_tick_compare; +unsigned long timer_tick_offset __read_mostly; -static unsigned long timer_ticks_per_nsec_quotient; +static unsigned long timer_ticks_per_nsec_quotient __read_mostly; #define TICK_SIZE (tick_nsec / 1000) @@ -464,7 +463,7 @@ static inline void timer_check_rtc(void) static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs) { - unsigned long ticks, pstate; + unsigned long ticks, compare, pstate; write_seqlock(&xtime_lock); @@ -483,14 +482,14 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs) : "=r" (pstate) : "i" (PSTATE_IE)); - timer_tick_compare = tick_ops->add_compare(timer_tick_offset); + compare = tick_ops->add_compare(timer_tick_offset); ticks = tick_ops->get_tick(); /* Restore PSTATE_IE. */ __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : /* no outputs */ : "r" (pstate)); - } while (time_after_eq(ticks, timer_tick_compare)); + } while (time_after_eq(ticks, compare)); timer_check_rtc(); @@ -506,11 +505,6 @@ void timer_tick_interrupt(struct pt_regs *regs) do_timer(regs); - /* - * Only keep timer_tick_offset uptodate, but don't set TICK_CMPR. - */ - timer_tick_compare = tick_ops->get_compare() + timer_tick_offset; - timer_check_rtc(); write_sequnlock(&xtime_lock); @@ -973,7 +967,7 @@ static void sparc64_start_timers(irqreturn_t (*cfunc)(int, void *, struct pt_reg int err; /* Register IRQ handler. */ - err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, SA_STATIC_ALLOC, + err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, 0, "timer", NULL); if (err) { diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S index 382fd6798bb..950423da8a6 100644 --- a/arch/sparc64/kernel/vmlinux.lds.S +++ b/arch/sparc64/kernel/vmlinux.lds.S @@ -32,6 +32,8 @@ SECTIONS .data1 : { *(.data1) } . = ALIGN(64); .data.cacheline_aligned : { *(.data.cacheline_aligned) } + . = ALIGN(64); + .data.read_mostly : { *(.data.read_mostly) } _edata = .; PROVIDE (edata = .); .fixup : { *(.fixup) } diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index 3ffee7b51ae..52e9375288a 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c @@ -34,22 +34,6 @@ extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; /* - * To debug kernel during syscall entry. - */ -void syscall_trace_entry(struct pt_regs *regs) -{ - printk("scall entry: %s[%d]/cpu%d: %d\n", current->comm, current->pid, smp_processor_id(), (int) regs->u_regs[UREG_G1]); -} - -/* - * To debug kernel during syscall exit. - */ -void syscall_trace_exit(struct pt_regs *regs) -{ - printk("scall exit: %s[%d]/cpu%d: %d\n", current->comm, current->pid, smp_processor_id(), (int) regs->u_regs[UREG_G1]); -} - -/* * To debug kernel to catch accesses to certain virtual/physical addresses. * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints. * flags = VM_READ watches memread accesses, flags = VM_WRITE watches memwrite accesses. diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index 7a2431d3abc..36377089379 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -72,6 +72,7 @@ __flush_tlb_pending: flush %g6 retl wrpr %g7, 0x0, %pstate + nop .align 32 .globl __flush_tlb_kernel_range @@ -249,7 +250,7 @@ __cheetah_flush_tlb_mm: /* 15 insns */ retl wrpr %g7, 0x0, %pstate -__cheetah_flush_tlb_pending: /* 22 insns */ +__cheetah_flush_tlb_pending: /* 23 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ rdpr %pstate, %g7 sllx %o1, 3, %o1 @@ -317,7 +318,7 @@ cheetah_patch_cachetlbops: sethi %hi(__cheetah_flush_tlb_pending), %o1 or %o1, %lo(__cheetah_flush_tlb_pending), %o1 call cheetah_patch_one - mov 22, %o2 + mov 23, %o2 #ifdef DCACHE_ALIASING_POSSIBLE sethi %hi(__flush_dcache_page), %o0 diff --git a/arch/sparc64/solaris/entry64.S b/arch/sparc64/solaris/entry64.S index 0cc9dad75c5..4b6ae583c0a 100644 --- a/arch/sparc64/solaris/entry64.S +++ b/arch/sparc64/solaris/entry64.S @@ -24,8 +24,9 @@ .text solaris_syscall_trace: + add %sp, PTREGS_OFF, %o0 call syscall_trace - nop + mov 0, %o1 srl %i0, 0, %o0 mov %i4, %o4 srl %i1, 0, %o1 @@ -159,8 +160,10 @@ ret_from_solaris: stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] !npc = npc+4 solaris_syscall_trace2: + add %sp, PTREGS_OFF, %o0 call syscall_trace - add %l1, 0x4, %l2 /* npc = npc+4 */ + mov 1, %o1 + add %l1, 0x4, %l2 /* npc = npc+4 */ andcc %l1, 1, %g0 bne,pn %icc, 2b nop |