diff options
Diffstat (limited to 'arch/sparc64')
26 files changed, 649 insertions, 1030 deletions
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c index 48756958116..77ef5df4e5a 100644 --- a/arch/sparc64/kernel/cpu.c +++ b/arch/sparc64/kernel/cpu.c @@ -39,6 +39,8 @@ struct cpu_fp_info linux_sparc_fpu[] = { { 0x3e, 0x15, 0, "UltraSparc III+ integrated FPU"}, { 0x3e, 0x16, 0, "UltraSparc IIIi integrated FPU"}, { 0x3e, 0x18, 0, "UltraSparc IV integrated FPU"}, + { 0x3e, 0x19, 0, "UltraSparc IV+ integrated FPU"}, + { 0x3e, 0x22, 0, "UltraSparc IIIi+ integrated FPU"}, }; #define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info)) @@ -53,6 +55,8 @@ struct cpu_iu_info linux_sparc_chips[] = { { 0x3e, 0x15, "TI UltraSparc III+ (Cheetah+)"}, { 0x3e, 0x16, "TI UltraSparc IIIi (Jalapeno)"}, { 0x3e, 0x18, "TI UltraSparc IV (Jaguar)"}, + { 0x3e, 0x19, "TI UltraSparc IV+ (Panther)"}, + { 0x3e, 0x22, "TI UltraSparc IIIi+ (Serrano)"}, }; #define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info)) diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index 2879b107292..11a848402fb 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -33,7 +33,7 @@ /* This is trivial with the new code... */ .globl do_fpdis do_fpdis: - sethi %hi(TSTATE_PEF), %g4 ! IEU0 + sethi %hi(TSTATE_PEF), %g4 rdpr %tstate, %g5 andcc %g5, %g4, %g0 be,pt %xcc, 1f @@ -50,18 +50,18 @@ do_fpdis: add %g0, %g0, %g0 ba,a,pt %xcc, rtrap_clr_l6 -1: ldub [%g6 + TI_FPSAVED], %g5 ! Load Group - wr %g0, FPRS_FEF, %fprs ! LSU Group+4bubbles - andcc %g5, FPRS_FEF, %g0 ! IEU1 Group - be,a,pt %icc, 1f ! CTI - clr %g7 ! IEU0 - ldx [%g6 + TI_GSR], %g7 ! Load Group -1: andcc %g5, FPRS_DL, %g0 ! IEU1 - bne,pn %icc, 2f ! CTI - fzero %f0 ! FPA - andcc %g5, FPRS_DU, %g0 ! IEU1 Group - bne,pn %icc, 1f ! CTI - fzero %f2 ! FPA +1: ldub [%g6 + TI_FPSAVED], %g5 + wr %g0, FPRS_FEF, %fprs + andcc %g5, FPRS_FEF, %g0 + be,a,pt %icc, 1f + clr %g7 + ldx [%g6 + TI_GSR], %g7 +1: andcc %g5, FPRS_DL, %g0 + bne,pn %icc, 2f + fzero %f0 + andcc %g5, FPRS_DU, %g0 + bne,pn %icc, 1f + fzero %f2 faddd %f0, %f2, %f4 fmuld %f0, %f2, %f6 faddd %f0, %f2, %f8 @@ -97,15 +97,17 @@ do_fpdis: faddd %f0, %f2, %f4 fmuld %f0, %f2, %f6 ldxa [%g3] ASI_DMMU, %g5 -cplus_fptrap_insn_1: - sethi %hi(0), %g2 + sethi %hi(sparc64_kern_sec_context), %g2 + ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 stxa %g2, [%g3] ASI_DMMU membar #Sync add %g6, TI_FPREGS + 0xc0, %g2 faddd %f0, %f2, %f8 fmuld %f0, %f2, %f10 - ldda [%g1] ASI_BLK_S, %f32 ! grrr, where is ASI_BLK_NUCLEUS 8-( + membar #Sync + ldda [%g1] ASI_BLK_S, %f32 ldda [%g2] ASI_BLK_S, %f48 + membar #Sync faddd %f0, %f2, %f12 fmuld %f0, %f2, %f14 faddd %f0, %f2, %f16 @@ -116,7 +118,6 @@ cplus_fptrap_insn_1: fmuld %f0, %f2, %f26 faddd %f0, %f2, %f28 fmuld %f0, %f2, %f30 - membar #Sync b,pt %xcc, fpdis_exit nop 2: andcc %g5, FPRS_DU, %g0 @@ -126,15 +127,17 @@ cplus_fptrap_insn_1: fzero %f34 ldxa [%g3] ASI_DMMU, %g5 add %g6, TI_FPREGS, %g1 -cplus_fptrap_insn_2: - sethi %hi(0), %g2 + sethi %hi(sparc64_kern_sec_context), %g2 + ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 stxa %g2, [%g3] ASI_DMMU membar #Sync add %g6, TI_FPREGS + 0x40, %g2 faddd %f32, %f34, %f36 fmuld %f32, %f34, %f38 - ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-( + membar #Sync + ldda [%g1] ASI_BLK_S, %f0 ldda [%g2] ASI_BLK_S, %f16 + membar #Sync faddd %f32, %f34, %f40 fmuld %f32, %f34, %f42 faddd %f32, %f34, %f44 @@ -147,18 +150,18 @@ cplus_fptrap_insn_2: fmuld %f32, %f34, %f58 faddd %f32, %f34, %f60 fmuld %f32, %f34, %f62 - membar #Sync ba,pt %xcc, fpdis_exit nop 3: mov SECONDARY_CONTEXT, %g3 add %g6, TI_FPREGS, %g1 ldxa [%g3] ASI_DMMU, %g5 -cplus_fptrap_insn_3: - sethi %hi(0), %g2 + sethi %hi(sparc64_kern_sec_context), %g2 + ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 stxa %g2, [%g3] ASI_DMMU membar #Sync mov 0x40, %g2 - ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-( + membar #Sync + ldda [%g1] ASI_BLK_S, %f0 ldda [%g1 + %g2] ASI_BLK_S, %f16 add %g1, 0x80, %g1 ldda [%g1] ASI_BLK_S, %f32 @@ -319,8 +322,8 @@ do_fptrap_after_fsr: stx %g3, [%g6 + TI_GSR] mov SECONDARY_CONTEXT, %g3 ldxa [%g3] ASI_DMMU, %g5 -cplus_fptrap_insn_4: - sethi %hi(0), %g2 + sethi %hi(sparc64_kern_sec_context), %g2 + ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 stxa %g2, [%g3] ASI_DMMU membar #Sync add %g6, TI_FPREGS, %g2 @@ -341,33 +344,6 @@ cplus_fptrap_insn_4: ba,pt %xcc, etrap wr %g0, 0, %fprs -cplus_fptrap_1: - sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2 - - .globl cheetah_plus_patch_fpdis -cheetah_plus_patch_fpdis: - /* We configure the dTLB512_0 for 4MB pages and the - * dTLB512_1 for 8K pages when in context zero. - */ - sethi %hi(cplus_fptrap_1), %o0 - lduw [%o0 + %lo(cplus_fptrap_1)], %o1 - - set cplus_fptrap_insn_1, %o2 - stw %o1, [%o2] - flush %o2 - set cplus_fptrap_insn_2, %o2 - stw %o1, [%o2] - flush %o2 - set cplus_fptrap_insn_3, %o2 - stw %o1, [%o2] - flush %o2 - set cplus_fptrap_insn_4, %o2 - stw %o1, [%o2] - flush %o2 - - retl - nop - /* The registers for cross calls will be: * * DATA 0: [low 32-bits] Address of function to call, jmp to this diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S index 50d2af1d98a..0d8eba21111 100644 --- a/arch/sparc64/kernel/etrap.S +++ b/arch/sparc64/kernel/etrap.S @@ -68,12 +68,8 @@ etrap_irq: wrpr %g3, 0, %otherwin wrpr %g2, 0, %wstate -cplus_etrap_insn_1: - sethi %hi(0), %g3 - sllx %g3, 32, %g3 -cplus_etrap_insn_2: - sethi %hi(0), %g2 - or %g3, %g2, %g3 + sethi %hi(sparc64_kern_pri_context), %g2 + ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 stxa %g3, [%l4] ASI_DMMU flush %l6 wr %g0, ASI_AIUS, %asi @@ -215,12 +211,8 @@ scetrap: rdpr %pil, %g2 mov PRIMARY_CONTEXT, %l4 wrpr %g3, 0, %otherwin wrpr %g2, 0, %wstate -cplus_etrap_insn_3: - sethi %hi(0), %g3 - sllx %g3, 32, %g3 -cplus_etrap_insn_4: - sethi %hi(0), %g2 - or %g3, %g2, %g3 + sethi %hi(sparc64_kern_pri_context), %g2 + ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 stxa %g3, [%l4] ASI_DMMU flush %l6 @@ -264,38 +256,3 @@ cplus_etrap_insn_4: #undef TASK_REGOFF #undef ETRAP_PSTATE1 - -cplus_einsn_1: - sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3 -cplus_einsn_2: - sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2 - - .globl cheetah_plus_patch_etrap -cheetah_plus_patch_etrap: - /* We configure the dTLB512_0 for 4MB pages and the - * dTLB512_1 for 8K pages when in context zero. - */ - sethi %hi(cplus_einsn_1), %o0 - sethi %hi(cplus_etrap_insn_1), %o2 - lduw [%o0 + %lo(cplus_einsn_1)], %o1 - or %o2, %lo(cplus_etrap_insn_1), %o2 - stw %o1, [%o2] - flush %o2 - sethi %hi(cplus_etrap_insn_3), %o2 - or %o2, %lo(cplus_etrap_insn_3), %o2 - stw %o1, [%o2] - flush %o2 - - sethi %hi(cplus_einsn_2), %o0 - sethi %hi(cplus_etrap_insn_2), %o2 - lduw [%o0 + %lo(cplus_einsn_2)], %o1 - or %o2, %lo(cplus_etrap_insn_2), %o2 - stw %o1, [%o2] - flush %o2 - sethi %hi(cplus_etrap_insn_4), %o2 - or %o2, %lo(cplus_etrap_insn_4), %o2 - stw %o1, [%o2] - flush %o2 - - retl - nop diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index ecc748fb9ad..4c942f71184 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S @@ -191,8 +191,9 @@ prom_boot_mapping_phys_low: stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 5 stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1: "translate" stx %l5, [%sp + 2047 + 128 + 0x20] ! arg2: prom_mmu_ihandle_cache - srlx %l0, 22, %l3 - sllx %l3, 22, %l3 + /* PAGE align */ + srlx %l0, 13, %l3 + sllx %l3, 13, %l3 stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: vaddr, our PC stx %g0, [%sp + 2047 + 128 + 0x30] ! res1 stx %g0, [%sp + 2047 + 128 + 0x38] ! res2 @@ -211,6 +212,9 @@ prom_boot_mapping_phys_low: ldx [%sp + 2047 + 128 + 0x48], %l2 ! physaddr high stx %l2, [%l4 + 0x0] ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low + /* 4MB align */ + srlx %l3, 22, %l3 + sllx %l3, 22, %l3 stx %l3, [%l4 + 0x8] /* Leave service as-is, "call-method" */ @@ -325,23 +329,7 @@ cheetah_tlb_fixup: 1: sethi %hi(tlb_type), %g1 stw %g2, [%g1 + %lo(tlb_type)] - BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) - ba,pt %xcc, 2f - nop - -1: /* Patch context register writes to support nucleus page - * size correctly. - */ - call cheetah_plus_patch_etrap - nop - call cheetah_plus_patch_rtrap - nop - call cheetah_plus_patch_fpdis - nop - call cheetah_plus_patch_winfixup - nop - -2: /* Patch copy/page operations to cheetah optimized versions. */ + /* Patch copy/page operations to cheetah optimized versions. */ call cheetah_patch_copyops nop call cheetah_patch_copy_page @@ -398,32 +386,79 @@ tlb_fixup_done: nop /* Not reached... */ -/* IMPORTANT NOTE: Whenever making changes here, check - * trampoline.S as well. -jj */ - .globl setup_tba -setup_tba: /* i0 = is_starfire */ - save %sp, -160, %sp + /* This is meant to allow the sharing of this code between + * boot processor invocation (via setup_tba() below) and + * secondary processor startup (via trampoline.S). The + * former does use this code, the latter does not yet due + * to some complexities. That should be fixed up at some + * point. + */ + .globl setup_trap_table +setup_trap_table: + save %sp, -192, %sp + + /* Force interrupts to be disabled. Transferring over to + * the Linux trap table is a very delicate operation. + * Until we are actually on the Linux trap table, we cannot + * get the PAGE_OFFSET linear mappings translated. We need + * that mapping to be setup in order to initialize the firmware + * page tables. + * + * So there is this window of time, from the return from + * prom_set_trap_table() until inherit_prom_mappings_post() + * (in arch/sparc64/mm/init.c) completes, during which no + * firmware address space accesses can be made. + */ + rdpr %pstate, %o1 + andn %o1, PSTATE_IE, %o1 + wrpr %o1, 0x0, %pstate + wrpr %g0, 15, %pil - rdpr %tba, %g7 - sethi %hi(prom_tba), %o1 - or %o1, %lo(prom_tba), %o1 - stx %g7, [%o1] + /* Ok, now make the final valid firmware call to jump over + * to the Linux trap table. + */ + call prom_set_trap_table + sethi %hi(sparc64_ttable_tl0), %o0 + + /* Start using proper page size encodings in ctx register. */ + sethi %hi(sparc64_kern_pri_context), %g3 + ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 + mov PRIMARY_CONTEXT, %g1 + stxa %g2, [%g1] ASI_DMMU + membar #Sync + + /* The Linux trap handlers expect various trap global registers + * to be setup with some fixed values. So here we set these + * up very carefully. These globals are: + * + * Alternate Globals (PSTATE_AG): + * + * %g6 --> current_thread_info() + * + * MMU Globals (PSTATE_MG): + * + * %g1 --> TLB_SFSR + * %g2 --> ((_PAGE_VALID | _PAGE_SZ4MB | + * _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W) + * ^ 0xfffff80000000000) + * (this %g2 value is used for computing the PAGE_OFFSET kernel + * TLB entries quickly, the virtual address of the fault XOR'd + * with this %g2 value is the PTE to load into the TLB) + * %g3 --> VPTE_BASE_CHEETAH or VPTE_BASE_SPITFIRE + * + * Interrupt Globals (PSTATE_IG, setup by init_irqwork_curcpu()): + * + * %g6 --> __irq_work[smp_processor_id()] + */ - /* Setup "Linux" globals 8-) */ rdpr %pstate, %o1 mov %g6, %o2 - wrpr %o1, (PSTATE_AG|PSTATE_IE), %pstate - sethi %hi(sparc64_ttable_tl0), %g1 - wrpr %g1, %tba + wrpr %o1, PSTATE_AG, %pstate mov %o2, %g6 - /* Set up MMU globals */ - wrpr %o1, (PSTATE_MG|PSTATE_IE), %pstate - - /* Set fixed globals used by dTLB miss handler. */ #define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000) #define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W) - + wrpr %o1, PSTATE_MG, %pstate mov TSB_REG, %g1 stxa %g0, [%g1] ASI_DMMU membar #Sync @@ -435,17 +470,17 @@ setup_tba: /* i0 = is_starfire */ sllx %g2, 32, %g2 or %g2, KERN_LOWBITS, %g2 - BRANCH_IF_ANY_CHEETAH(g3,g7,cheetah_vpte_base) - ba,pt %xcc, spitfire_vpte_base + BRANCH_IF_ANY_CHEETAH(g3,g7,8f) + ba,pt %xcc, 9f nop -cheetah_vpte_base: +8: sethi %uhi(VPTE_BASE_CHEETAH), %g3 or %g3, %ulo(VPTE_BASE_CHEETAH), %g3 ba,pt %xcc, 2f sllx %g3, 32, %g3 -spitfire_vpte_base: +9: sethi %uhi(VPTE_BASE_SPITFIRE), %g3 or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3 sllx %g3, 32, %g3 @@ -471,36 +506,37 @@ spitfire_vpte_base: sllx %o2, 32, %o2 wr %o2, %asr25 - /* Ok, we're done setting up all the state our trap mechanims needs, - * now get back into normal globals and let the PROM know what is up. - */ 2: wrpr %g0, %g0, %wstate - wrpr %o1, PSTATE_IE, %pstate + wrpr %o1, 0x0, %pstate call init_irqwork_curcpu nop - call prom_set_trap_table - sethi %hi(sparc64_ttable_tl0), %o0 - - BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f) - ba,pt %xcc, 2f - nop - -1: /* Start using proper page size encodings in ctx register. */ - sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3 - mov PRIMARY_CONTEXT, %g1 - sllx %g3, 32, %g3 - sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2 - or %g3, %g2, %g3 - stxa %g3, [%g1] ASI_DMMU - membar #Sync - -2: + /* Now we can turn interrupts back on. */ rdpr %pstate, %o1 or %o1, PSTATE_IE, %o1 wrpr %o1, 0, %pstate + wrpr %g0, 0x0, %pil + + ret + restore + + .globl setup_tba +setup_tba: /* i0 = is_starfire */ + save %sp, -192, %sp + + /* The boot processor is the only cpu which invokes this + * routine, the other cpus set things up via trampoline.S. + * So save the OBP trap table address here. + */ + rdpr %tba, %g7 + sethi %hi(prom_tba), %o1 + or %o1, %lo(prom_tba), %o1 + stx %g7, [%o1] + + call setup_trap_table + nop ret restore @@ -540,8 +576,11 @@ bootup_user_stack_end: prom_tba: .xword 0 tlb_type: .word 0 /* Must NOT end up in BSS */ .section ".fixup",#alloc,#execinstr - .globl __ret_efault + + .globl __ret_efault, __retl_efault __ret_efault: ret restore %g0, -EFAULT, %o0 - +__retl_efault: + retl + mov -EFAULT, %o0 diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index c9b69167632..233526ba3ab 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -27,6 +27,7 @@ #include <asm/atomic.h> #include <asm/system.h> #include <asm/irq.h> +#include <asm/io.h> #include <asm/sbus.h> #include <asm/iommu.h> #include <asm/upa.h> diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c index 946cee0257e..9e8362ea310 100644 --- a/arch/sparc64/kernel/power.c +++ b/arch/sparc64/kernel/power.c @@ -17,6 +17,7 @@ #include <asm/system.h> #include <asm/ebus.h> +#include <asm/isa.h> #include <asm/auxio.h> #include <linux/unistd.h> @@ -100,46 +101,83 @@ again: return 0; } -static int __init has_button_interrupt(struct linux_ebus_device *edev) +static int __init has_button_interrupt(unsigned int irq, int prom_node) { - if (edev->irqs[0] == PCI_IRQ_NONE) + if (irq == PCI_IRQ_NONE) return 0; - if (!prom_node_has_property(edev->prom_node, "button")) + if (!prom_node_has_property(prom_node, "button")) return 0; return 1; } -void __init power_init(void) +static int __init power_probe_ebus(struct resource **resp, unsigned int *irq_p, int *prom_node_p) { struct linux_ebus *ebus; struct linux_ebus_device *edev; + + for_each_ebus(ebus) { + for_each_ebusdev(edev, ebus) { + if (!strcmp(edev->prom_name, "power")) { + *resp = &edev->resource[0]; + *irq_p = edev->irqs[0]; + *prom_node_p = edev->prom_node; + return 0; + } + } + } + return -ENODEV; +} + +static int __init power_probe_isa(struct resource **resp, unsigned int *irq_p, int *prom_node_p) +{ + struct sparc_isa_bridge *isa_bus; + struct sparc_isa_device *isa_dev; + + for_each_isa(isa_bus) { + for_each_isadev(isa_dev, isa_bus) { + if (!strcmp(isa_dev->prom_name, "power")) { + *resp = &isa_dev->resource; + *irq_p = isa_dev->irq; + *prom_node_p = isa_dev->prom_node; + return 0; + } + } + } + return -ENODEV; +} + +void __init power_init(void) +{ + struct resource *res = NULL; + unsigned int irq; + int prom_node; static int invoked; if (invoked) return; invoked = 1; - for_each_ebus(ebus) { - for_each_ebusdev(edev, ebus) { - if (!strcmp(edev->prom_name, "power")) - goto found; - } - } + if (!power_probe_ebus(&res, &irq, &prom_node)) + goto found; + + if (!power_probe_isa(&res, &irq, &prom_node)) + goto found; + return; found: - power_reg = ioremap(edev->resource[0].start, 0x4); + power_reg = ioremap(res->start, 0x4); printk("power: Control reg at %p ... ", power_reg); poweroff_method = machine_halt; /* able to use the standard halt */ - if (has_button_interrupt(edev)) { + if (has_button_interrupt(irq, prom_node)) { if (kernel_thread(powerd, NULL, CLONE_FS) < 0) { printk("Failed to start power daemon.\n"); return; } printk("powerd running.\n"); - if (request_irq(edev->irqs[0], + if (request_irq(irq, power_handler, SA_SHIRQ, "power", NULL) < 0) printk("power: Error, cannot register IRQ handler.\n"); } else { diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c index 5efbff90d66..774ecbb8a03 100644 --- a/arch/sparc64/kernel/ptrace.c +++ b/arch/sparc64/kernel/ptrace.c @@ -31,6 +31,7 @@ #include <asm/visasm.h> #include <asm/spitfire.h> #include <asm/page.h> +#include <asm/cpudata.h> /* Returning from ptrace is a bit tricky because the syscall return * low level code assumes any value returned which is negative and @@ -132,12 +133,16 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) { unsigned long start = __pa(kaddr); unsigned long end = start + len; + unsigned long dcache_line_size; + + dcache_line_size = local_cpu_data().dcache_line_size; if (tlb_type == spitfire) { - for (; start < end; start += 32) + for (; start < end; start += dcache_line_size) spitfire_put_dcache_tag(start & 0x3fe0, 0x0); } else { - for (; start < end; start += 32) + start &= ~(dcache_line_size - 1); + for (; start < end; start += dcache_line_size) __asm__ __volatile__( "stxa %%g0, [%0] %1\n\t" "membar #Sync" @@ -150,8 +155,11 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, if (write && tlb_type == spitfire) { unsigned long start = (unsigned long) kaddr; unsigned long end = start + len; + unsigned long icache_line_size; + + icache_line_size = local_cpu_data().icache_line_size; - for (; start < end; start += 32) + for (; start < end; start += icache_line_size) flushi(start); } } diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index fafd227735f..090dcca00d2 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S @@ -256,9 +256,8 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 brnz,pn %l3, kern_rtt mov PRIMARY_CONTEXT, %l7 ldxa [%l7 + %l7] ASI_DMMU, %l0 -cplus_rtrap_insn_1: - sethi %hi(0), %l1 - sllx %l1, 32, %l1 + sethi %hi(sparc64_kern_pri_nuc_bits), %l1 + ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1 or %l0, %l1, %l0 stxa %l0, [%l7] ASI_DMMU flush %g6 @@ -313,53 +312,36 @@ kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5 wr %g1, FPRS_FEF, %fprs ldx [%o1 + %o5], %g1 add %g6, TI_XFSR, %o1 - membar #StoreLoad | #LoadLoad sll %o0, 8, %o2 add %g6, TI_FPREGS, %o3 brz,pn %l6, 1f add %g6, TI_FPREGS+0x40, %o4 + membar #Sync ldda [%o3 + %o2] ASI_BLK_P, %f0 ldda [%o4 + %o2] ASI_BLK_P, %f16 + membar #Sync 1: andcc %l2, FPRS_DU, %g0 be,pn %icc, 1f wr %g1, 0, %gsr add %o2, 0x80, %o2 + membar #Sync ldda [%o3 + %o2] ASI_BLK_P, %f32 ldda [%o4 + %o2] ASI_BLK_P, %f48 - 1: membar #Sync ldx [%o1 + %o5], %fsr 2: stb %l5, [%g6 + TI_FPDEPTH] ba,pt %xcc, rt_continue nop 5: wr %g0, FPRS_FEF, %fprs - membar #StoreLoad | #LoadLoad sll %o0, 8, %o2 add %g6, TI_FPREGS+0x80, %o3 add %g6, TI_FPREGS+0xc0, %o4 + membar #Sync ldda [%o3 + %o2] ASI_BLK_P, %f32 ldda [%o4 + %o2] ASI_BLK_P, %f48 membar #Sync wr %g0, FPRS_DU, %fprs ba,pt %xcc, rt_continue stb %l5, [%g6 + TI_FPDEPTH] - -cplus_rinsn_1: - sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1 - - .globl cheetah_plus_patch_rtrap -cheetah_plus_patch_rtrap: - /* We configure the dTLB512_0 for 4MB pages and the - * dTLB512_1 for 8K pages when in context zero. - */ - sethi %hi(cplus_rinsn_1), %o0 - sethi %hi(cplus_rtrap_insn_1), %o2 - lduw [%o0 + %lo(cplus_rinsn_1)], %o1 - or %o2, %lo(cplus_rtrap_insn_1), %o2 - stw %o1, [%o2] - flush %o2 - - retl - nop diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 8e8baf2354d..c1f34237cdf 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -187,17 +187,13 @@ int prom_callback(long *args) } if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) { - unsigned long kernel_pctx = 0; - - if (tlb_type == cheetah_plus) - kernel_pctx |= (CTX_CHEETAH_PLUS_NUC | - CTX_CHEETAH_PLUS_CTX0); + extern unsigned long sparc64_kern_pri_context; /* Spitfire Errata #32 workaround */ __asm__ __volatile__("stxa %0, [%1] %2\n\t" "flush %%g6" : /* No outputs */ - : "r" (kernel_pctx), + : "r" (sparc64_kern_pri_context), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); @@ -464,8 +460,6 @@ static void __init boot_flags_init(char *commands) } } -extern int prom_probe_memory(void); -extern unsigned long start, end; extern void panic_setup(char *, int *); extern unsigned short root_flags; @@ -492,12 +486,8 @@ void register_prom_callbacks(void) "' linux-.soft2 to .soft2"); } -extern void paging_init(void); - void __init setup_arch(char **cmdline_p) { - int i; - /* Initialize PROM console and command line. */ *cmdline_p = prom_getbootargs(); strcpy(saved_command_line, *cmdline_p); @@ -516,21 +506,6 @@ void __init setup_arch(char **cmdline_p) boot_flags_init(*cmdline_p); idprom_init(); - (void) prom_probe_memory(); - - phys_base = 0xffffffffffffffffUL; - for (i = 0; sp_banks[i].num_bytes != 0; i++) { - unsigned long top; - - if (sp_banks[i].base_addr < phys_base) - phys_base = sp_banks[i].base_addr; - top = sp_banks[i].base_addr + - sp_banks[i].num_bytes; - } - pfn_base = phys_base >> PAGE_SHIFT; - - kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; - kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; if (!root_flags) root_mountflags &= ~MS_RDONLY; diff --git a/arch/sparc64/kernel/sys32.S b/arch/sparc64/kernel/sys32.S index 5f9e4fae612..9cd272ac3ac 100644 --- a/arch/sparc64/kernel/sys32.S +++ b/arch/sparc64/kernel/sys32.S @@ -157,173 +157,199 @@ sys32_socketcall: /* %o0=call, %o1=args */ or %g2, %lo(__socketcall_table_begin), %g2 jmpl %g2 + %o0, %g0 nop +do_einval: + retl + mov -EINVAL, %o0 - /* Each entry is exactly 32 bytes. */ .align 32 __socketcall_table_begin: + + /* Each entry is exactly 32 bytes. */ do_sys_socket: /* sys_socket(int, int, int) */ - ldswa [%o1 + 0x0] %asi, %o0 +1: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_socket), %g1 - ldswa [%o1 + 0x8] %asi, %o2 +2: ldswa [%o1 + 0x8] %asi, %o2 jmpl %g1 + %lo(sys_socket), %g0 - ldswa [%o1 + 0x4] %asi, %o1 +3: ldswa [%o1 + 0x4] %asi, %o1 nop nop nop do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */ - ldswa [%o1 + 0x0] %asi, %o0 +4: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_bind), %g1 - ldswa [%o1 + 0x8] %asi, %o2 +5: ldswa [%o1 + 0x8] %asi, %o2 jmpl %g1 + %lo(sys_bind), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +6: lduwa [%o1 + 0x4] %asi, %o1 nop nop nop do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */ - ldswa [%o1 + 0x0] %asi, %o0 +7: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_connect), %g1 - ldswa [%o1 + 0x8] %asi, %o2 +8: ldswa [%o1 + 0x8] %asi, %o2 jmpl %g1 + %lo(sys_connect), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +9: lduwa [%o1 + 0x4] %asi, %o1 nop nop nop do_sys_listen: /* sys_listen(int, int) */ - ldswa [%o1 + 0x0] %asi, %o0 +10: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_listen), %g1 jmpl %g1 + %lo(sys_listen), %g0 - ldswa [%o1 + 0x4] %asi, %o1 +11: ldswa [%o1 + 0x4] %asi, %o1 nop nop nop nop do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */ - ldswa [%o1 + 0x0] %asi, %o0 +12: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_accept), %g1 - lduwa [%o1 + 0x8] %asi, %o2 +13: lduwa [%o1 + 0x8] %asi, %o2 jmpl %g1 + %lo(sys_accept), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +14: lduwa [%o1 + 0x4] %asi, %o1 nop nop nop do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */ - ldswa [%o1 + 0x0] %asi, %o0 +15: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_getsockname), %g1 - lduwa [%o1 + 0x8] %asi, %o2 +16: lduwa [%o1 + 0x8] %asi, %o2 jmpl %g1 + %lo(sys_getsockname), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +17: lduwa [%o1 + 0x4] %asi, %o1 nop nop nop do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */ - ldswa [%o1 + 0x0] %asi, %o0 +18: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_getpeername), %g1 - lduwa [%o1 + 0x8] %asi, %o2 +19: lduwa [%o1 + 0x8] %asi, %o2 jmpl %g1 + %lo(sys_getpeername), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +20: lduwa [%o1 + 0x4] %asi, %o1 nop nop nop do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */ - ldswa [%o1 + 0x0] %asi, %o0 +21: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_socketpair), %g1 - ldswa [%o1 + 0x8] %asi, %o2 - lduwa [%o1 + 0xc] %asi, %o3 +22: ldswa [%o1 + 0x8] %asi, %o2 +23: lduwa [%o1 + 0xc] %asi, %o3 jmpl %g1 + %lo(sys_socketpair), %g0 - ldswa [%o1 + 0x4] %asi, %o1 +24: ldswa [%o1 + 0x4] %asi, %o1 nop nop do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */ - ldswa [%o1 + 0x0] %asi, %o0 +25: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_send), %g1 - lduwa [%o1 + 0x8] %asi, %o2 - lduwa [%o1 + 0xc] %asi, %o3 +26: lduwa [%o1 + 0x8] %asi, %o2 +27: lduwa [%o1 + 0xc] %asi, %o3 jmpl %g1 + %lo(sys_send), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +28: lduwa [%o1 + 0x4] %asi, %o1 nop nop do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */ - ldswa [%o1 + 0x0] %asi, %o0 +29: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_recv), %g1 - lduwa [%o1 + 0x8] %asi, %o2 - lduwa [%o1 + 0xc] %asi, %o3 +30: lduwa [%o1 + 0x8] %asi, %o2 +31: lduwa [%o1 + 0xc] %asi, %o3 jmpl %g1 + %lo(sys_recv), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +32: lduwa [%o1 + 0x4] %asi, %o1 nop nop do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */ - ldswa [%o1 + 0x0] %asi, %o0 +33: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_sendto), %g1 - lduwa [%o1 + 0x8] %asi, %o2 - lduwa [%o1 + 0xc] %asi, %o3 - lduwa [%o1 + 0x10] %asi, %o4 - ldswa [%o1 + 0x14] %asi, %o5 +34: lduwa [%o1 + 0x8] %asi, %o2 +35: lduwa [%o1 + 0xc] %asi, %o3 +36: lduwa [%o1 + 0x10] %asi, %o4 +37: ldswa [%o1 + 0x14] %asi, %o5 jmpl %g1 + %lo(sys_sendto), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +38: lduwa [%o1 + 0x4] %asi, %o1 do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */ - ldswa [%o1 + 0x0] %asi, %o0 +39: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_recvfrom), %g1 - lduwa [%o1 + 0x8] %asi, %o2 - lduwa [%o1 + 0xc] %asi, %o3 - lduwa [%o1 + 0x10] %asi, %o4 - lduwa [%o1 + 0x14] %asi, %o5 +40: lduwa [%o1 + 0x8] %asi, %o2 +41: lduwa [%o1 + 0xc] %asi, %o3 +42: lduwa [%o1 + 0x10] %asi, %o4 +43: lduwa [%o1 + 0x14] %asi, %o5 jmpl %g1 + %lo(sys_recvfrom), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +44: lduwa [%o1 + 0x4] %asi, %o1 do_sys_shutdown: /* sys_shutdown(int, int) */ - ldswa [%o1 + 0x0] %asi, %o0 +45: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_shutdown), %g1 jmpl %g1 + %lo(sys_shutdown), %g0 - ldswa [%o1 + 0x4] %asi, %o1 +46: ldswa [%o1 + 0x4] %asi, %o1 nop nop nop nop do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */ - ldswa [%o1 + 0x0] %asi, %o0 +47: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(compat_sys_setsockopt), %g1 - ldswa [%o1 + 0x8] %asi, %o2 - lduwa [%o1 + 0xc] %asi, %o3 - ldswa [%o1 + 0x10] %asi, %o4 +48: ldswa [%o1 + 0x8] %asi, %o2 +49: lduwa [%o1 + 0xc] %asi, %o3 +50: ldswa [%o1 + 0x10] %asi, %o4 jmpl %g1 + %lo(compat_sys_setsockopt), %g0 - ldswa [%o1 + 0x4] %asi, %o1 +51: ldswa [%o1 + 0x4] %asi, %o1 nop do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */ - ldswa [%o1 + 0x0] %asi, %o0 +52: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(compat_sys_getsockopt), %g1 - ldswa [%o1 + 0x8] %asi, %o2 - lduwa [%o1 + 0xc] %asi, %o3 - lduwa [%o1 + 0x10] %asi, %o4 +53: ldswa [%o1 + 0x8] %asi, %o2 +54: lduwa [%o1 + 0xc] %asi, %o3 +55: lduwa [%o1 + 0x10] %asi, %o4 jmpl %g1 + %lo(compat_sys_getsockopt), %g0 - ldswa [%o1 + 0x4] %asi, %o1 +56: ldswa [%o1 + 0x4] %asi, %o1 nop do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */ - ldswa [%o1 + 0x0] %asi, %o0 +57: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(compat_sys_sendmsg), %g1 - lduwa [%o1 + 0x8] %asi, %o2 +58: lduwa [%o1 + 0x8] %asi, %o2 jmpl %g1 + %lo(compat_sys_sendmsg), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +59: lduwa [%o1 + 0x4] %asi, %o1 nop nop nop do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */ - ldswa [%o1 + 0x0] %asi, %o0 +60: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(compat_sys_recvmsg), %g1 - lduwa [%o1 + 0x8] %asi, %o2 +61: lduwa [%o1 + 0x8] %asi, %o2 jmpl %g1 + %lo(compat_sys_recvmsg), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +62: lduwa [%o1 + 0x4] %asi, %o1 nop nop nop -__socketcall_table_end: - -do_einval: - retl - mov -EINVAL, %o0 -do_efault: - retl - mov -EFAULT, %o0 .section __ex_table .align 4 - .word __socketcall_table_begin, 0, __socketcall_table_end, do_efault + .word 1b, __retl_efault, 2b, __retl_efault + .word 3b, __retl_efault, 4b, __retl_efault + .word 5b, __retl_efault, 6b, __retl_efault + .word 7b, __retl_efault, 8b, __retl_efault + .word 9b, __retl_efault, 10b, __retl_efault + .word 11b, __retl_efault, 12b, __retl_efault + .word 13b, __retl_efault, 14b, __retl_efault + .word 15b, __retl_efault, 16b, __retl_efault + .word 17b, __retl_efault, 18b, __retl_efault + .word 19b, __retl_efault, 20b, __retl_efault + .word 21b, __retl_efault, 22b, __retl_efault + .word 23b, __retl_efault, 24b, __retl_efault + .word 25b, __retl_efault, 26b, __retl_efault + .word 27b, __retl_efault, 28b, __retl_efault + .word 29b, __retl_efault, 30b, __retl_efault + .word 31b, __retl_efault, 32b, __retl_efault + .word 33b, __retl_efault, 34b, __retl_efault + .word 35b, __retl_efault, 36b, __retl_efault + .word 37b, __retl_efault, 38b, __retl_efault + .word 39b, __retl_efault, 40b, __retl_efault + .word 41b, __retl_efault, 42b, __retl_efault + .word 43b, __retl_efault, 44b, __retl_efault + .word 45b, __retl_efault, 46b, __retl_efault + .word 47b, __retl_efault, 48b, __retl_efault + .word 49b, __retl_efault, 50b, __retl_efault + .word 51b, __retl_efault, 52b, __retl_efault + .word 53b, __retl_efault, 54b, __retl_efault + .word 55b, __retl_efault, 56b, __retl_efault + .word 57b, __retl_efault, 58b, __retl_efault + .word 59b, __retl_efault, 60b, __retl_efault + .word 61b, __retl_efault, 62b, __retl_efault .previous diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index 89f2fcfcd66..9478551cb02 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S @@ -336,20 +336,13 @@ do_unlock: call init_irqwork_curcpu nop - BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f) - ba,pt %xcc, 2f - nop - -1: /* Start using proper page size encodings in ctx register. */ - sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3 + /* Start using proper page size encodings in ctx register. */ + sethi %hi(sparc64_kern_pri_context), %g3 + ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 mov PRIMARY_CONTEXT, %g1 - sllx %g3, 32, %g3 - sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2 - or %g3, %g2, %g3 - stxa %g3, [%g1] ASI_DMMU + stxa %g2, [%g1] ASI_DMMU membar #Sync -2: rdpr %pstate, %o1 or %o1, PSTATE_IE, %o1 wrpr %o1, 0, %pstate diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index f8e7005fede..5570e7bb22b 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -189,19 +189,18 @@ void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, un if (regs->tstate & TSTATE_PRIV) { /* Test if this comes from uaccess places. */ - unsigned long fixup; - unsigned long g2 = regs->u_regs[UREG_G2]; + const struct exception_table_entry *entry; - if ((fixup = search_extables_range(regs->tpc, &g2))) { - /* Ouch, somebody is trying ugly VM hole tricks on us... */ + entry = search_exception_tables(regs->tpc); + if (entry) { + /* Ouch, somebody is trying VM hole tricks on us... */ #ifdef DEBUG_EXCEPTIONS printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc); - printk("EX_TABLE: insn<%016lx> fixup<%016lx> " - "g2<%016lx>\n", regs->tpc, fixup, g2); + printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n", + regs->tpc, entry->fixup); #endif - regs->tpc = fixup; + regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; - regs->u_regs[UREG_G2] = g2; return; } /* Shit... */ @@ -758,26 +757,12 @@ void __init cheetah_ecache_flush_init(void) ecache_flush_size = (2 * largest_size); ecache_flush_linesize = smallest_linesize; - /* Discover a physically contiguous chunk of physical - * memory in 'sp_banks' of size ecache_flush_size calculated - * above. Store the physical base of this area at - * ecache_flush_physbase. - */ - for (node = 0; ; node++) { - if (sp_banks[node].num_bytes == 0) - break; - if (sp_banks[node].num_bytes >= ecache_flush_size) { - ecache_flush_physbase = sp_banks[node].base_addr; - break; - } - } + ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size); - /* Note: Zero would be a valid value of ecache_flush_physbase so - * don't use that as the success test. :-) - */ - if (sp_banks[node].num_bytes == 0) { + if (ecache_flush_physbase == ~0UL) { prom_printf("cheetah_ecache_flush_init: Cannot find %d byte " - "contiguous physical memory.\n", ecache_flush_size); + "contiguous physical memory.\n", + ecache_flush_size); prom_halt(); } @@ -1346,16 +1331,12 @@ static int cheetah_fix_ce(unsigned long physaddr) /* Return non-zero if PADDR is a valid physical memory address. */ static int cheetah_check_main_memory(unsigned long paddr) { - int i; + unsigned long vaddr = PAGE_OFFSET + paddr; - for (i = 0; ; i++) { - if (sp_banks[i].num_bytes == 0) - break; - if (paddr >= sp_banks[i].base_addr && - paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes)) - return 1; - } - return 0; + if (vaddr > (unsigned long) high_memory) + return 0; + + return kern_addr_valid(vaddr); } void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar) @@ -1610,10 +1591,10 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned /* OK, usermode access. */ recoverable = 1; } else { - unsigned long g2 = regs->u_regs[UREG_G2]; - unsigned long fixup = search_extables_range(regs->tpc, &g2); + const struct exception_table_entry *entry; - if (fixup != 0UL) { + entry = search_exception_tables(regs->tpc); + if (entry) { /* OK, kernel access to userspace. */ recoverable = 1; @@ -1632,9 +1613,8 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned * recoverable condition. */ if (recoverable) { - regs->tpc = fixup; + regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; - regs->u_regs[UREG_G2] = g2; } } } diff --git a/arch/sparc64/kernel/una_asm.S b/arch/sparc64/kernel/una_asm.S index da48400bcc9..1f5b5b708ce 100644 --- a/arch/sparc64/kernel/una_asm.S +++ b/arch/sparc64/kernel/una_asm.S @@ -6,13 +6,6 @@ .text -kernel_unaligned_trap_fault: - call kernel_mna_trap_fault - nop - retl - nop - .size kern_unaligned_trap_fault, .-kern_unaligned_trap_fault - .globl __do_int_store __do_int_store: rd %asi, %o4 @@ -51,24 +44,24 @@ __do_int_store: 0: wr %o4, 0x0, %asi retl - nop + mov 0, %o0 .size __do_int_store, .-__do_int_store .section __ex_table - .word 4b, kernel_unaligned_trap_fault - .word 5b, kernel_unaligned_trap_fault - .word 6b, kernel_unaligned_trap_fault - .word 7b, kernel_unaligned_trap_fault - .word 8b, kernel_unaligned_trap_fault - .word 9b, kernel_unaligned_trap_fault - .word 10b, kernel_unaligned_trap_fault - .word 11b, kernel_unaligned_trap_fault - .word 12b, kernel_unaligned_trap_fault - .word 13b, kernel_unaligned_trap_fault - .word 14b, kernel_unaligned_trap_fault - .word 15b, kernel_unaligned_trap_fault - .word 16b, kernel_unaligned_trap_fault - .word 17b, kernel_unaligned_trap_fault + .word 4b, __retl_efault + .word 5b, __retl_efault + .word 6b, __retl_efault + .word 7b, __retl_efault + .word 8b, __retl_efault + .word 9b, __retl_efault + .word 10b, __retl_efault + .word 11b, __retl_efault + .word 12b, __retl_efault + .word 13b, __retl_efault + .word 14b, __retl_efault + .word 15b, __retl_efault + .word 16b, __retl_efault + .word 17b, __retl_efault .previous .globl do_int_load @@ -133,21 +126,21 @@ do_int_load: 0: wr %o5, 0x0, %asi retl - nop + mov 0, %o0 .size __do_int_load, .-__do_int_load .section __ex_table - .word 4b, kernel_unaligned_trap_fault - .word 5b, kernel_unaligned_trap_fault - .word 6b, kernel_unaligned_trap_fault - .word 7b, kernel_unaligned_trap_fault - .word 8b, kernel_unaligned_trap_fault - .word 9b, kernel_unaligned_trap_fault - .word 10b, kernel_unaligned_trap_fault - .word 11b, kernel_unaligned_trap_fault - .word 12b, kernel_unaligned_trap_fault - .word 13b, kernel_unaligned_trap_fault - .word 14b, kernel_unaligned_trap_fault - .word 15b, kernel_unaligned_trap_fault - .word 16b, kernel_unaligned_trap_fault + .word 4b, __retl_efault + .word 5b, __retl_efault + .word 6b, __retl_efault + .word 7b, __retl_efault + .word 8b, __retl_efault + .word 9b, __retl_efault + .word 10b, __retl_efault + .word 11b, __retl_efault + .word 12b, __retl_efault + .word 13b, __retl_efault + .word 14b, __retl_efault + .word 15b, __retl_efault + .word 16b, __retl_efault .previous diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c index 42718f6a7d3..70faf630603 100644 --- a/arch/sparc64/kernel/unaligned.c +++ b/arch/sparc64/kernel/unaligned.c @@ -180,14 +180,14 @@ static void __attribute_used__ unaligned_panic(char *str, struct pt_regs *regs) die_if_kernel(str, regs); } -extern void do_int_load(unsigned long *dest_reg, int size, - unsigned long *saddr, int is_signed, int asi); +extern int do_int_load(unsigned long *dest_reg, int size, + unsigned long *saddr, int is_signed, int asi); -extern void __do_int_store(unsigned long *dst_addr, int size, - unsigned long src_val, int asi); +extern int __do_int_store(unsigned long *dst_addr, int size, + unsigned long src_val, int asi); -static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr, - struct pt_regs *regs, int asi, int orig_asi) +static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr, + struct pt_regs *regs, int asi, int orig_asi) { unsigned long zero = 0; unsigned long *src_val_p = &zero; @@ -219,7 +219,7 @@ static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr, break; }; } - __do_int_store(dst_addr, size, src_val, asi); + return __do_int_store(dst_addr, size, src_val, asi); } static inline void advance(struct pt_regs *regs) @@ -242,14 +242,14 @@ static inline int ok_for_kernel(unsigned int insn) return !floating_point_load_or_store_p(insn); } -void kernel_mna_trap_fault(void) +static void kernel_mna_trap_fault(void) { struct pt_regs *regs = current_thread_info()->kern_una_regs; unsigned int insn = current_thread_info()->kern_una_insn; - unsigned long g2 = regs->u_regs[UREG_G2]; - unsigned long fixup = search_extables_range(regs->tpc, &g2); + const struct exception_table_entry *entry; - if (!fixup) { + entry = search_exception_tables(regs->tpc); + if (!entry) { unsigned long address; address = compute_effective_address(regs, insn, @@ -270,9 +270,8 @@ void kernel_mna_trap_fault(void) die_if_kernel("Oops", regs); /* Not reached */ } - regs->tpc = fixup; + regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; - regs->u_regs [UREG_G2] = g2; regs->tstate &= ~TSTATE_ASI; regs->tstate |= (ASI_AIUS << 24UL); @@ -294,8 +293,8 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u kernel_mna_trap_fault(); } else { - unsigned long addr; - int orig_asi, asi; + unsigned long addr, *reg_addr; + int orig_asi, asi, err; addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f)); @@ -319,11 +318,12 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u }; switch (dir) { case load: - do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs), - size, (unsigned long *) addr, - decode_signedness(insn), asi); - if (unlikely(asi != orig_asi)) { - unsigned long val_in = *(unsigned long *) addr; + reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs); + err = do_int_load(reg_addr, size, + (unsigned long *) addr, + decode_signedness(insn), asi); + if (likely(!err) && unlikely(asi != orig_asi)) { + unsigned long val_in = *reg_addr; switch (size) { case 2: val_in = swab16(val_in); @@ -339,21 +339,24 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u BUG(); break; }; - *(unsigned long *) addr = val_in; + *reg_addr = val_in; } break; case store: - do_int_store(((insn>>25)&0x1f), size, - (unsigned long *) addr, regs, - asi, orig_asi); + err = do_int_store(((insn>>25)&0x1f), size, + (unsigned long *) addr, regs, + asi, orig_asi); break; default: panic("Impossible kernel unaligned trap."); /* Not reached... */ } - advance(regs); + if (unlikely(err)) + kernel_mna_trap_fault(); + else + advance(regs); } } diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c index 9080e7cd4bb..0340041f614 100644 --- a/arch/sparc64/kernel/us3_cpufreq.c +++ b/arch/sparc64/kernel/us3_cpufreq.c @@ -208,7 +208,10 @@ static int __init us3_freq_init(void) impl = ((ver >> 32) & 0xffff); if (manuf == CHEETAH_MANUF && - (impl == CHEETAH_IMPL || impl == CHEETAH_PLUS_IMPL)) { + (impl == CHEETAH_IMPL || + impl == CHEETAH_PLUS_IMPL || + impl == JAGUAR_IMPL || + impl == PANTHER_IMPL)) { struct cpufreq_driver *driver; ret = -ENOMEM; diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S index 99c809a1e5a..39160926267 100644 --- a/arch/sparc64/kernel/winfixup.S +++ b/arch/sparc64/kernel/winfixup.S @@ -16,23 +16,14 @@ .text set_pcontext: -cplus_winfixup_insn_1: - sethi %hi(0), %l1 + sethi %hi(sparc64_kern_pri_context), %l1 + ldx [%l1 + %lo(sparc64_kern_pri_context)], %l1 mov PRIMARY_CONTEXT, %g1 - sllx %l1, 32, %l1 -cplus_winfixup_insn_2: - sethi %hi(0), %g2 - or %l1, %g2, %l1 stxa %l1, [%g1] ASI_DMMU flush %g6 retl nop -cplus_wfinsn_1: - sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1 -cplus_wfinsn_2: - sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2 - .align 32 /* Here are the rules, pay attention. @@ -395,23 +386,3 @@ window_dax_from_user_common: add %sp, PTREGS_OFF, %o0 ba,pt %xcc, rtrap clr %l6 - - - .globl cheetah_plus_patch_winfixup -cheetah_plus_patch_winfixup: - sethi %hi(cplus_wfinsn_1), %o0 - sethi %hi(cplus_winfixup_insn_1), %o2 - lduw [%o0 + %lo(cplus_wfinsn_1)], %o1 - or %o2, %lo(cplus_winfixup_insn_1), %o2 - stw %o1, [%o2] - flush %o2 - - sethi %hi(cplus_wfinsn_2), %o0 - sethi %hi(cplus_winfixup_insn_2), %o2 - lduw [%o0 + %lo(cplus_wfinsn_2)], %o1 - or %o2, %lo(cplus_winfixup_insn_2), %o2 - stw %o1, [%o2] - flush %o2 - - retl - nop diff --git a/arch/sparc64/lib/VISsave.S b/arch/sparc64/lib/VISsave.S index 4e18989bd60..a0ded5c5aa5 100644 --- a/arch/sparc64/lib/VISsave.S +++ b/arch/sparc64/lib/VISsave.S @@ -59,15 +59,17 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 be,pn %icc, 9b add %g6, TI_FPREGS, %g2 andcc %o5, FPRS_DL, %g0 - membar #StoreStore | #LoadStore be,pn %icc, 4f add %g6, TI_FPREGS+0x40, %g3 + membar #Sync stda %f0, [%g2 + %g1] ASI_BLK_P stda %f16, [%g3 + %g1] ASI_BLK_P + membar #Sync andcc %o5, FPRS_DU, %g0 be,pn %icc, 5f 4: add %g1, 128, %g1 + membar #Sync stda %f32, [%g2 + %g1] ASI_BLK_P stda %f48, [%g3 + %g1] ASI_BLK_P @@ -87,7 +89,7 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 sll %g1, 5, %g1 add %g6, TI_FPREGS+0xc0, %g3 wr %g0, FPRS_FEF, %fprs - membar #StoreStore | #LoadStore + membar #Sync stda %f32, [%g2 + %g1] ASI_BLK_P stda %f48, [%g3 + %g1] ASI_BLK_P membar #Sync @@ -128,8 +130,8 @@ VISenterhalf: be,pn %icc, 4f add %g6, TI_FPREGS, %g2 - membar #StoreStore | #LoadStore add %g6, TI_FPREGS+0x40, %g3 + membar #Sync stda %f0, [%g2 + %g1] ASI_BLK_P stda %f16, [%g3 + %g1] ASI_BLK_P membar #Sync diff --git a/arch/sparc64/lib/strncpy_from_user.S b/arch/sparc64/lib/strncpy_from_user.S index 09cbbaa0ebf..e1264650ca7 100644 --- a/arch/sparc64/lib/strncpy_from_user.S +++ b/arch/sparc64/lib/strncpy_from_user.S @@ -125,15 +125,11 @@ __strncpy_from_user: add %o2, %o3, %o0 .size __strncpy_from_user, .-__strncpy_from_user - .section .fixup,#alloc,#execinstr - .align 4 -4: retl - mov -EFAULT, %o0 - .section __ex_table,#alloc .align 4 - .word 60b, 4b - .word 61b, 4b - .word 62b, 4b - .word 63b, 4b - .word 64b, 4b + .word 60b, __retl_efault + .word 61b, __retl_efault + .word 62b, __retl_efault + .word 63b, __retl_efault + .word 64b, __retl_efault + .previous diff --git a/arch/sparc64/lib/user_fixup.c b/arch/sparc64/lib/user_fixup.c index 0278e34125d..19d1fdb17d0 100644 --- a/arch/sparc64/lib/user_fixup.c +++ b/arch/sparc64/lib/user_fixup.c @@ -11,61 +11,56 @@ /* Calculating the exact fault address when using * block loads and stores can be very complicated. + * * Instead of trying to be clever and handling all * of the cases, just fix things up simply here. */ -unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size) +static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset) { - char *dst = to; - const char __user *src = from; + unsigned long fault_addr = current_thread_info()->fault_address; + unsigned long end = start + size; - while (size) { - if (__get_user(*dst, src)) - break; - dst++; - src++; - size--; + if (fault_addr < start || fault_addr >= end) { + *offset = 0; + } else { + *offset = start - fault_addr; + size = end - fault_addr; } + return size; +} - if (size) - memset(dst, 0, size); +unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size) +{ + unsigned long offset; + + size = compute_size((unsigned long) from, size, &offset); + if (likely(size)) + memset(to + offset, 0, size); return size; } unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size) { - char __user *dst = to; - const char *src = from; - - while (size) { - if (__put_user(*src, dst)) - break; - dst++; - src++; - size--; - } + unsigned long offset; - return size; + return compute_size((unsigned long) to, size, &offset); } unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size) { - char __user *dst = to; - char __user *src = from; + unsigned long fault_addr = current_thread_info()->fault_address; + unsigned long start = (unsigned long) to; + unsigned long end = start + size; - while (size) { - char tmp; + if (fault_addr >= start && fault_addr < end) + return end - fault_addr; - if (__get_user(tmp, src)) - break; - if (__put_user(tmp, dst)) - break; - dst++; - src++; - size--; - } + start = (unsigned long) from; + end = start + size; + if (fault_addr >= start && fault_addr < end) + return end - fault_addr; return size; } diff --git a/arch/sparc64/mm/Makefile b/arch/sparc64/mm/Makefile index cda87333a77..9d0960e69f4 100644 --- a/arch/sparc64/mm/Makefile +++ b/arch/sparc64/mm/Makefile @@ -5,6 +5,6 @@ EXTRA_AFLAGS := -ansi EXTRA_CFLAGS := -Werror -obj-y := ultra.o tlb.o fault.o init.o generic.o extable.o +obj-y := ultra.o tlb.o fault.o init.o generic.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o diff --git a/arch/sparc64/mm/extable.c b/arch/sparc64/mm/extable.c deleted file mode 100644 index ec334297ff4..00000000000 --- a/arch/sparc64/mm/extable.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * linux/arch/sparc64/mm/extable.c - */ - -#include <linux/config.h> -#include <linux/module.h> -#include <asm/uaccess.h> - -extern const struct exception_table_entry __start___ex_table[]; -extern const struct exception_table_entry __stop___ex_table[]; - -void sort_extable(struct exception_table_entry *start, - struct exception_table_entry *finish) -{ -} - -/* Caller knows they are in a range if ret->fixup == 0 */ -const struct exception_table_entry * -search_extable(const struct exception_table_entry *start, - const struct exception_table_entry *last, - unsigned long value) -{ - const struct exception_table_entry *walk; - - /* Single insn entries are encoded as: - * word 1: insn address - * word 2: fixup code address - * - * Range entries are encoded as: - * word 1: first insn address - * word 2: 0 - * word 3: last insn address + 4 bytes - * word 4: fixup code address - * - * See asm/uaccess.h for more details. - */ - - /* 1. Try to find an exact match. */ - for (walk = start; walk <= last; walk++) { - if (walk->fixup == 0) { - /* A range entry, skip both parts. */ - walk++; - continue; - } - - if (walk->insn == value) - return walk; - } - - /* 2. Try to find a range match. */ - for (walk = start; walk <= (last - 1); walk++) { - if (walk->fixup) - continue; - - if (walk[0].insn <= value && walk[1].insn > value) - return walk; - - walk++; - } - - return NULL; -} - -/* Special extable search, which handles ranges. Returns fixup */ -unsigned long search_extables_range(unsigned long addr, unsigned long *g2) -{ - const struct exception_table_entry *entry; - - entry = search_exception_tables(addr); - if (!entry) - return 0; - - /* Inside range? Fix g2 and return correct fixup */ - if (!entry->fixup) { - *g2 = (addr - entry->insn) / 4; - return (entry + 1)->fixup; - } - - return entry->fixup; -} diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index db1e3310e90..31fbc67719a 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c @@ -32,8 +32,6 @@ #define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0])) -extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; - /* * To debug kernel to catch accesses to certain virtual/physical addresses. * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints. @@ -71,53 +69,6 @@ void set_brkpt(unsigned long addr, unsigned char mask, int flags, int mode) : "memory"); } -/* Nice, simple, prom library does all the sweating for us. ;) */ -unsigned long __init prom_probe_memory (void) -{ - register struct linux_mlist_p1275 *mlist; - register unsigned long bytes, base_paddr, tally; - register int i; - - i = 0; - mlist = *prom_meminfo()->p1275_available; - bytes = tally = mlist->num_bytes; - base_paddr = mlist->start_adr; - - sp_banks[0].base_addr = base_paddr; - sp_banks[0].num_bytes = bytes; - - while (mlist->theres_more != (void *) 0) { - i++; - mlist = mlist->theres_more; - bytes = mlist->num_bytes; - tally += bytes; - if (i >= SPARC_PHYS_BANKS-1) { - printk ("The machine has more banks than " - "this kernel can support\n" - "Increase the SPARC_PHYS_BANKS " - "setting (currently %d)\n", - SPARC_PHYS_BANKS); - i = SPARC_PHYS_BANKS-1; - break; - } - - sp_banks[i].base_addr = mlist->start_adr; - sp_banks[i].num_bytes = mlist->num_bytes; - } - - i++; - sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL; - sp_banks[i].num_bytes = 0; - - /* Now mask all bank sizes on a page boundary, it is all we can - * use anyways. - */ - for (i = 0; sp_banks[i].num_bytes != 0; i++) - sp_banks[i].num_bytes &= PAGE_MASK; - - return tally; -} - static void __kprobes unhandled_fault(unsigned long address, struct task_struct *tsk, struct pt_regs *regs) @@ -242,7 +193,6 @@ static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn) static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, unsigned int insn, unsigned long address) { - unsigned long g2; unsigned char asi = ASI_P; if ((!insn) && (regs->tstate & TSTATE_PRIV)) @@ -273,11 +223,9 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, } } - g2 = regs->u_regs[UREG_G2]; - /* Is this in ex_table? */ if (regs->tstate & TSTATE_PRIV) { - unsigned long fixup; + const struct exception_table_entry *entry; if (asi == ASI_P && (insn & 0xc0800000) == 0xc0800000) { if (insn & 0x2000) @@ -288,10 +236,9 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, /* Look in asi.h: All _S asis have LS bit set */ if ((asi & 0x1) && - (fixup = search_extables_range(regs->tpc, &g2))) { - regs->tpc = fixup; + (entry = search_exception_tables(regs->tpc))) { + regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; - regs->u_regs[UREG_G2] = g2; return; } } else { @@ -461,7 +408,7 @@ good_area: } up_read(&mm->mmap_sem); - goto fault_done; + return; /* * Something tried to access memory that isn't in our memory map.. @@ -473,8 +420,7 @@ bad_area: handle_kernel_fault: do_kernel_fault(regs, si_code, fault_code, insn, address); - - goto fault_done; + return; /* * We ran out of memory, or some other thing happened to us that made @@ -505,9 +451,4 @@ do_sigbus: /* Kernel mode? Handle exceptions or die */ if (regs->tstate & TSTATE_PRIV) goto handle_kernel_fault; - -fault_done: - /* These values are no longer needed, clear them. */ - set_thread_fault_code(0); - current_thread_info()->fault_address = 0; } diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 9f6ca624892..0d2e967c720 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -21,6 +21,7 @@ #include <linux/seq_file.h> #include <linux/kprobes.h> #include <linux/cache.h> +#include <linux/sort.h> #include <asm/head.h> #include <asm/system.h> @@ -41,7 +42,72 @@ extern void device_scan(void); -struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; +#define MAX_BANKS 32 + +static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; +static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata; +static int pavail_ents __initdata; +static int pavail_rescan_ents __initdata; + +static int cmp_p64(const void *a, const void *b) +{ + const struct linux_prom64_registers *x = a, *y = b; + + if (x->phys_addr > y->phys_addr) + return 1; + if (x->phys_addr < y->phys_addr) + return -1; + return 0; +} + +static void __init read_obp_memory(const char *property, + struct linux_prom64_registers *regs, + int *num_ents) +{ + int node = prom_finddevice("/memory"); + int prop_size = prom_getproplen(node, property); + int ents, ret, i; + + ents = prop_size / sizeof(struct linux_prom64_registers); + if (ents > MAX_BANKS) { + prom_printf("The machine has more %s property entries than " + "this kernel can support (%d).\n", + property, MAX_BANKS); + prom_halt(); + } + + ret = prom_getproperty(node, property, (char *) regs, prop_size); + if (ret == -1) { + prom_printf("Couldn't get %s property from /memory.\n"); + prom_halt(); + } + + *num_ents = ents; + + /* Sanitize what we got from the firmware, by page aligning + * everything. + */ + for (i = 0; i < ents; i++) { + unsigned long base, size; + + base = regs[i].phys_addr; + size = regs[i].reg_size; + + size &= PAGE_MASK; + if (base & ~PAGE_MASK) { + unsigned long new_base = PAGE_ALIGN(base); + + size -= new_base - base; + if ((long) size < 0L) + size = 0UL; + base = new_base; + } + regs[i].phys_addr = base; + regs[i].reg_size = size; + } + sort(regs, ents, sizeof(struct linux_prom64_registers), + cmp_p64, NULL); +} unsigned long *sparc64_valid_addr_bitmap __read_mostly; @@ -67,6 +133,12 @@ extern unsigned int sparc_ramdisk_size; struct page *mem_map_zero __read_mostly; +unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly; + +unsigned long sparc64_kern_pri_context __read_mostly; +unsigned long sparc64_kern_pri_nuc_bits __read_mostly; +unsigned long sparc64_kern_sec_context __read_mostly; + int bigkernel = 0; /* XXX Tune this... */ @@ -296,6 +368,7 @@ struct linux_prom_translation { unsigned long data; }; static struct linux_prom_translation prom_trans[512] __initdata; +static unsigned int prom_trans_ents __initdata; extern unsigned long prom_boot_page; extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); @@ -309,57 +382,7 @@ unsigned long kern_locked_tte_data; unsigned long prom_pmd_phys __read_mostly; unsigned int swapper_pgd_zero __read_mostly; -/* Allocate power-of-2 aligned chunks from the end of the - * kernel image. Return physical address. - */ -static inline unsigned long early_alloc_phys(unsigned long size) -{ - unsigned long base; - - BUILD_BUG_ON(size & (size - 1)); - - kern_size = (kern_size + (size - 1)) & ~(size - 1); - base = kern_base + kern_size; - kern_size += size; - - return base; -} - -static inline unsigned long load_phys32(unsigned long pa) -{ - unsigned long val; - - __asm__ __volatile__("lduwa [%1] %2, %0" - : "=&r" (val) - : "r" (pa), "i" (ASI_PHYS_USE_EC)); - - return val; -} - -static inline unsigned long load_phys64(unsigned long pa) -{ - unsigned long val; - - __asm__ __volatile__("ldxa [%1] %2, %0" - : "=&r" (val) - : "r" (pa), "i" (ASI_PHYS_USE_EC)); - - return val; -} - -static inline void store_phys32(unsigned long pa, unsigned long val) -{ - __asm__ __volatile__("stwa %0, [%1] %2" - : /* no outputs */ - : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC)); -} - -static inline void store_phys64(unsigned long pa, unsigned long val) -{ - __asm__ __volatile__("stxa %0, [%1] %2" - : /* no outputs */ - : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC)); -} +static pmd_t *prompmd __read_mostly; #define BASE_PAGE_SIZE 8192 @@ -369,34 +392,28 @@ static inline void store_phys64(unsigned long pa, unsigned long val) */ unsigned long prom_virt_to_phys(unsigned long promva, int *error) { - unsigned long pmd_phys = (prom_pmd_phys + - ((promva >> 23) & 0x7ff) * sizeof(pmd_t)); - unsigned long pte_phys; - pmd_t pmd_ent; - pte_t pte_ent; + pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff); + pte_t *ptep; unsigned long base; - pmd_val(pmd_ent) = load_phys32(pmd_phys); - if (pmd_none(pmd_ent)) { + if (pmd_none(*pmdp)) { if (error) *error = 1; return 0; } - - pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL; - pte_phys += ((promva >> 13) & 0x3ff) * sizeof(pte_t); - pte_val(pte_ent) = load_phys64(pte_phys); - if (!pte_present(pte_ent)) { + ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff); + if (!pte_present(*ptep)) { if (error) *error = 1; return 0; } if (error) { *error = 0; - return pte_val(pte_ent); + return pte_val(*ptep); } - base = pte_val(pte_ent) & _PAGE_PADDR; - return (base + (promva & (BASE_PAGE_SIZE - 1))); + base = pte_val(*ptep) & _PAGE_PADDR; + + return base + (promva & (BASE_PAGE_SIZE - 1)); } /* The obp translations are saved based on 8k pagesize, since obp can @@ -409,25 +426,20 @@ static void __init build_obp_range(unsigned long start, unsigned long end, unsig unsigned long vaddr; for (vaddr = start; vaddr < end; vaddr += BASE_PAGE_SIZE) { - unsigned long val, pte_phys, pmd_phys; - pmd_t pmd_ent; - int i; - - pmd_phys = (prom_pmd_phys + - (((vaddr >> 23) & 0x7ff) * sizeof(pmd_t))); - pmd_val(pmd_ent) = load_phys32(pmd_phys); - if (pmd_none(pmd_ent)) { - pte_phys = early_alloc_phys(BASE_PAGE_SIZE); - - for (i = 0; i < BASE_PAGE_SIZE / sizeof(pte_t); i++) - store_phys64(pte_phys+i*sizeof(pte_t),0); + unsigned long val; + pmd_t *pmd; + pte_t *pte; - pmd_val(pmd_ent) = pte_phys >> 11UL; - store_phys32(pmd_phys, pmd_val(pmd_ent)); + pmd = prompmd + ((vaddr >> 23) & 0x7ff); + if (pmd_none(*pmd)) { + pte = __alloc_bootmem(BASE_PAGE_SIZE, BASE_PAGE_SIZE, + PAGE_SIZE); + if (!pte) + prom_halt(); + memset(pte, 0, BASE_PAGE_SIZE); + pmd_set(pmd, pte); } - - pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL; - pte_phys += (((vaddr >> 13) & 0x3ff) * sizeof(pte_t)); + pte = (pte_t *) __pmd_page(*pmd) + ((vaddr >> 13) & 0x3ff); val = data; @@ -435,7 +447,8 @@ static void __init build_obp_range(unsigned long start, unsigned long end, unsig if (tlb_type == spitfire) val &= ~0x0003fe0000000000UL; - store_phys64(pte_phys, val | _PAGE_MODIFIED); + set_pte_at(&init_mm, vaddr, pte, + __pte(val | _PAGE_MODIFIED)); data += BASE_PAGE_SIZE; } @@ -448,13 +461,17 @@ static inline int in_obp_range(unsigned long vaddr) } #define OBP_PMD_SIZE 2048 -static void __init build_obp_pgtable(int prom_trans_ents) +static void __init build_obp_pgtable(void) { unsigned long i; - prom_pmd_phys = early_alloc_phys(OBP_PMD_SIZE); - for (i = 0; i < OBP_PMD_SIZE; i += 4) - store_phys32(prom_pmd_phys + i, 0); + prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, PAGE_SIZE); + if (!prompmd) + prom_halt(); + + memset(prompmd, 0, OBP_PMD_SIZE); + + prom_pmd_phys = __pa(prompmd); for (i = 0; i < prom_trans_ents; i++) { unsigned long start, end; @@ -474,7 +491,7 @@ static void __init build_obp_pgtable(int prom_trans_ents) /* Read OBP translations property into 'prom_trans[]'. * Return the number of entries. */ -static int __init read_obp_translations(void) +static void __init read_obp_translations(void) { int n, node; @@ -495,8 +512,10 @@ static int __init read_obp_translations(void) prom_printf("prom_mappings: Couldn't get property.\n"); prom_halt(); } + n = n / sizeof(struct linux_prom_translation); - return n; + + prom_trans_ents = n; } static void __init remap_kernel(void) @@ -516,28 +535,38 @@ static void __init remap_kernel(void) prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); prom_itlb_load(tlb_ent, tte_data, tte_vaddr); if (bigkernel) { - prom_dtlb_load(tlb_ent - 1, + tlb_ent -= 1; + prom_dtlb_load(tlb_ent, tte_data + 0x400000, tte_vaddr + 0x400000); - prom_itlb_load(tlb_ent - 1, + prom_itlb_load(tlb_ent, tte_data + 0x400000, tte_vaddr + 0x400000); } + sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; + if (tlb_type == cheetah_plus) { + sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | + CTX_CHEETAH_PLUS_NUC); + sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC; + sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0; + } } -static void __init inherit_prom_mappings(void) -{ - int n; - n = read_obp_translations(); - build_obp_pgtable(n); +static void __init inherit_prom_mappings_pre(void) +{ + read_obp_translations(); /* Now fixup OBP's idea about where we really are mapped. */ prom_printf("Remapping the kernel... "); remap_kernel(); prom_printf("done.\n"); +} +static void __init inherit_prom_mappings_post(void) +{ + build_obp_pgtable(); register_prom_callbacks(); } @@ -722,8 +751,8 @@ void inherit_locked_prom_mappings(int save_p) } } if (tlb_type == spitfire) { - int high = SPITFIRE_HIGHEST_LOCKED_TLBENT - bigkernel; - for (i = 0; i < high; i++) { + int high = sparc64_highest_unlocked_tlb_ent; + for (i = 0; i <= high; i++) { unsigned long data; /* Spitfire Errata #32 workaround */ @@ -811,9 +840,9 @@ void inherit_locked_prom_mappings(int save_p) } } } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { - int high = CHEETAH_HIGHEST_LOCKED_TLBENT - bigkernel; + int high = sparc64_highest_unlocked_tlb_ent; - for (i = 0; i < high; i++) { + for (i = 0; i <= high; i++) { unsigned long data; data = cheetah_get_ldtlb_data(i); @@ -1206,14 +1235,14 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) int i; #ifdef CONFIG_DEBUG_BOOTMEM - prom_printf("bootmem_init: Scan sp_banks, "); + prom_printf("bootmem_init: Scan pavail, "); #endif bytes_avail = 0UL; - for (i = 0; sp_banks[i].num_bytes != 0; i++) { - end_of_phys_memory = sp_banks[i].base_addr + - sp_banks[i].num_bytes; - bytes_avail += sp_banks[i].num_bytes; + for (i = 0; i < pavail_ents; i++) { + end_of_phys_memory = pavail[i].phys_addr + + pavail[i].reg_size; + bytes_avail += pavail[i].reg_size; if (cmdline_memory_size) { if (bytes_avail > cmdline_memory_size) { unsigned long slack = bytes_avail - cmdline_memory_size; @@ -1221,12 +1250,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) bytes_avail -= slack; end_of_phys_memory -= slack; - sp_banks[i].num_bytes -= slack; - if (sp_banks[i].num_bytes == 0) { - sp_banks[i].base_addr = 0xdeadbeef; + pavail[i].reg_size -= slack; + if ((long)pavail[i].reg_size <= 0L) { + pavail[i].phys_addr = 0xdeadbeefUL; + pavail[i].reg_size = 0UL; + pavail_ents = i; } else { - sp_banks[i+1].num_bytes = 0; - sp_banks[i+1].base_addr = 0xdeadbeef; + pavail[i+1].reg_size = 0Ul; + pavail[i+1].phys_addr = 0xdeadbeefUL; + pavail_ents = i + 1; } break; } @@ -1280,12 +1312,12 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) /* Now register the available physical memory with the * allocator. */ - for (i = 0; sp_banks[i].num_bytes != 0; i++) { + for (i = 0; i < pavail_ents; i++) { #ifdef CONFIG_DEBUG_BOOTMEM - prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n", - i, sp_banks[i].base_addr, sp_banks[i].num_bytes); + prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n", + i, pavail[i].phys_addr, pavail[i].reg_size); #endif - free_bootmem(sp_banks[i].base_addr, sp_banks[i].num_bytes); + free_bootmem(pavail[i].phys_addr, pavail[i].reg_size); } #ifdef CONFIG_BLK_DEV_INITRD @@ -1334,7 +1366,7 @@ static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, unsigned long alloc_bytes = 0UL; if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) { - prom_printf("kernel_map: Unaligned sp_banks[%lx:%lx]\n", + prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n", vstart, vend); prom_halt(); } @@ -1381,23 +1413,24 @@ static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, return alloc_bytes; } -extern struct linux_mlist_p1275 *prom_ptot_ptr; +static struct linux_prom64_registers pall[MAX_BANKS] __initdata; +static int pall_ents __initdata; + extern unsigned int kvmap_linear_patch[1]; static void __init kernel_physical_mapping_init(void) { - struct linux_mlist_p1275 *p = prom_ptot_ptr; - unsigned long mem_alloced = 0UL; + unsigned long i, mem_alloced = 0UL; + + read_obp_memory("reg", &pall[0], &pall_ents); - while (p) { + for (i = 0; i < pall_ents; i++) { unsigned long phys_start, phys_end; - phys_start = p->start_adr; - phys_end = phys_start + p->num_bytes; + phys_start = pall[i].phys_addr; + phys_end = phys_start + pall[i].reg_size; mem_alloced += kernel_map_range(phys_start, phys_end, PAGE_KERNEL); - - p = p->theres_more; } printk("Allocated %ld bytes for kernel page tables.\n", @@ -1425,6 +1458,18 @@ void kernel_map_pages(struct page *page, int numpages, int enable) } #endif +unsigned long __init find_ecache_flush_span(unsigned long size) +{ + int i; + + for (i = 0; i < pavail_ents; i++) { + if (pavail[i].reg_size >= size) + return pavail[i].phys_addr; + } + + return ~0UL; +} + /* paging_init() sets up the page tables */ extern void cheetah_ecache_flush_init(void); @@ -1435,7 +1480,19 @@ pgd_t swapper_pg_dir[2048]; void __init paging_init(void) { unsigned long end_pfn, pages_avail, shift; - unsigned long real_end; + unsigned long real_end, i; + + /* Find available physical memory... */ + read_obp_memory("available", &pavail[0], &pavail_ents); + + phys_base = 0xffffffffffffffffUL; + for (i = 0; i < pavail_ents; i++) + phys_base = min(phys_base, pavail[i].phys_addr); + + pfn_base = phys_base >> PAGE_SHIFT; + + kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; + kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; set_bit(0, mmu_context_bmap); @@ -1462,8 +1519,7 @@ void __init paging_init(void) swapper_pgd_zero = pgd_val(swapper_pg_dir[0]); - /* Inherit non-locked OBP mappings. */ - inherit_prom_mappings(); + inherit_prom_mappings_pre(); /* Ok, we can use our TLB miss and window trap handlers safely. * We need to do a quick peek here to see if we are on StarFire @@ -1474,15 +1530,23 @@ void __init paging_init(void) extern void setup_tba(int); setup_tba(this_is_starfire); } - - inherit_locked_prom_mappings(1); - __flush_tlb_all(); + /* Everything from this point forward, until we are done with + * inherit_prom_mappings_post(), must complete successfully + * without calling into the firmware. The firwmare page tables + * have not been built, but we are running on the Linux kernel's + * trap table. + */ + /* Setup bootmem... */ pages_avail = 0; last_valid_pfn = end_pfn = bootmem_init(&pages_avail); + inherit_prom_mappings_post(); + + inherit_locked_prom_mappings(1); + #ifdef CONFIG_DEBUG_PAGEALLOC kernel_physical_mapping_init(); #endif @@ -1507,128 +1571,35 @@ void __init paging_init(void) device_scan(); } -/* Ok, it seems that the prom can allocate some more memory chunks - * as a side effect of some prom calls we perform during the - * boot sequence. My most likely theory is that it is from the - * prom_set_traptable() call, and OBP is allocating a scratchpad - * for saving client program register state etc. - */ -static void __init sort_memlist(struct linux_mlist_p1275 *thislist) -{ - int swapi = 0; - int i, mitr; - unsigned long tmpaddr, tmpsize; - unsigned long lowest; - - for (i = 0; thislist[i].theres_more != 0; i++) { - lowest = thislist[i].start_adr; - for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++) - if (thislist[mitr].start_adr < lowest) { - lowest = thislist[mitr].start_adr; - swapi = mitr; - } - if (lowest == thislist[i].start_adr) - continue; - tmpaddr = thislist[swapi].start_adr; - tmpsize = thislist[swapi].num_bytes; - for (mitr = swapi; mitr > i; mitr--) { - thislist[mitr].start_adr = thislist[mitr-1].start_adr; - thislist[mitr].num_bytes = thislist[mitr-1].num_bytes; - } - thislist[i].start_adr = tmpaddr; - thislist[i].num_bytes = tmpsize; - } -} - -void __init rescan_sp_banks(void) -{ - struct linux_prom64_registers memlist[64]; - struct linux_mlist_p1275 avail[64], *mlist; - unsigned long bytes, base_paddr; - int num_regs, node = prom_finddevice("/memory"); - int i; - - num_regs = prom_getproperty(node, "available", - (char *) memlist, sizeof(memlist)); - num_regs = (num_regs / sizeof(struct linux_prom64_registers)); - for (i = 0; i < num_regs; i++) { - avail[i].start_adr = memlist[i].phys_addr; - avail[i].num_bytes = memlist[i].reg_size; - avail[i].theres_more = &avail[i + 1]; - } - avail[i - 1].theres_more = NULL; - sort_memlist(avail); - - mlist = &avail[0]; - i = 0; - bytes = mlist->num_bytes; - base_paddr = mlist->start_adr; - - sp_banks[0].base_addr = base_paddr; - sp_banks[0].num_bytes = bytes; - - while (mlist->theres_more != NULL){ - i++; - mlist = mlist->theres_more; - bytes = mlist->num_bytes; - if (i >= SPARC_PHYS_BANKS-1) { - printk ("The machine has more banks than " - "this kernel can support\n" - "Increase the SPARC_PHYS_BANKS " - "setting (currently %d)\n", - SPARC_PHYS_BANKS); - i = SPARC_PHYS_BANKS-1; - break; - } - - sp_banks[i].base_addr = mlist->start_adr; - sp_banks[i].num_bytes = mlist->num_bytes; - } - - i++; - sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL; - sp_banks[i].num_bytes = 0; - - for (i = 0; sp_banks[i].num_bytes != 0; i++) - sp_banks[i].num_bytes &= PAGE_MASK; -} - static void __init taint_real_pages(void) { - struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS]; int i; - for (i = 0; i < SPARC_PHYS_BANKS; i++) { - saved_sp_banks[i].base_addr = - sp_banks[i].base_addr; - saved_sp_banks[i].num_bytes = - sp_banks[i].num_bytes; - } - - rescan_sp_banks(); + read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents); - /* Find changes discovered in the sp_bank rescan and + /* Find changes discovered in the physmem available rescan and * reserve the lost portions in the bootmem maps. */ - for (i = 0; saved_sp_banks[i].num_bytes; i++) { + for (i = 0; i < pavail_ents; i++) { unsigned long old_start, old_end; - old_start = saved_sp_banks[i].base_addr; + old_start = pavail[i].phys_addr; old_end = old_start + - saved_sp_banks[i].num_bytes; + pavail[i].reg_size; while (old_start < old_end) { int n; - for (n = 0; sp_banks[n].num_bytes; n++) { + for (n = 0; pavail_rescan_ents; n++) { unsigned long new_start, new_end; - new_start = sp_banks[n].base_addr; - new_end = new_start + sp_banks[n].num_bytes; + new_start = pavail_rescan[n].phys_addr; + new_end = new_start + + pavail_rescan[n].reg_size; if (new_start <= old_start && new_end >= (old_start + PAGE_SIZE)) { - set_bit (old_start >> 22, - sparc64_valid_addr_bitmap); + set_bit(old_start >> 22, + sparc64_valid_addr_bitmap); goto do_next_page; } } diff --git a/arch/sparc64/prom/Makefile b/arch/sparc64/prom/Makefile index c7898a5ee45..3d33ed27bc2 100644 --- a/arch/sparc64/prom/Makefile +++ b/arch/sparc64/prom/Makefile @@ -6,5 +6,5 @@ EXTRA_AFLAGS := -ansi EXTRA_CFLAGS := -Werror -lib-y := bootstr.o devops.o init.o memory.o misc.o \ +lib-y := bootstr.o devops.o init.o misc.o \ tree.o console.o printf.o p1275.o cif.o diff --git a/arch/sparc64/prom/init.c b/arch/sparc64/prom/init.c index 8b4b622d090..f3cc2d8578b 100644 --- a/arch/sparc64/prom/init.c +++ b/arch/sparc64/prom/init.c @@ -27,7 +27,6 @@ int prom_chosen_node; * failure. It gets passed the pointer to the PROM vector. */ -extern void prom_meminit(void); extern void prom_cif_init(void *, void *); void __init prom_init(void *cif_handler, void *cif_stack) @@ -90,8 +89,6 @@ void __init prom_init(void *cif_handler, void *cif_stack) printk ("PROMLIB: Sun IEEE Boot Prom %s\n", buffer + bufadjust); - prom_meminit(); - /* Initialization successful. */ return; diff --git a/arch/sparc64/prom/memory.c b/arch/sparc64/prom/memory.c deleted file mode 100644 index f4a8143e052..00000000000 --- a/arch/sparc64/prom/memory.c +++ /dev/null @@ -1,152 +0,0 @@ -/* $Id: memory.c,v 1.5 1999/08/31 06:55:04 davem Exp $ - * memory.c: Prom routine for acquiring various bits of information - * about RAM on the machine, both virtual and physical. - * - * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) - * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) - */ - -#include <linux/kernel.h> -#include <linux/init.h> - -#include <asm/openprom.h> -#include <asm/oplib.h> - -/* This routine, for consistency, returns the ram parameters in the - * V0 prom memory descriptor format. I choose this format because I - * think it was the easiest to work with. I feel the religious - * arguments now... ;) Also, I return the linked lists sorted to - * prevent paging_init() upset stomach as I have not yet written - * the pepto-bismol kernel module yet. - */ - -struct linux_prom64_registers prom_reg_memlist[64]; -struct linux_prom64_registers prom_reg_tmp[64]; - -struct linux_mlist_p1275 prom_phys_total[64]; -struct linux_mlist_p1275 prom_prom_taken[64]; -struct linux_mlist_p1275 prom_phys_avail[64]; - -struct linux_mlist_p1275 *prom_ptot_ptr = prom_phys_total; -struct linux_mlist_p1275 *prom_ptak_ptr = prom_prom_taken; -struct linux_mlist_p1275 *prom_pavl_ptr = prom_phys_avail; - -struct linux_mem_p1275 prom_memlist; - - -/* Internal Prom library routine to sort a linux_mlist_p1275 memory - * list. Used below in initialization. - */ -static void __init -prom_sortmemlist(struct linux_mlist_p1275 *thislist) -{ - int swapi = 0; - int i, mitr; - unsigned long tmpaddr, tmpsize; - unsigned long lowest; - - for(i=0; thislist[i].theres_more; i++) { - lowest = thislist[i].start_adr; - for(mitr = i+1; thislist[mitr-1].theres_more; mitr++) - if(thislist[mitr].start_adr < lowest) { - lowest = thislist[mitr].start_adr; - swapi = mitr; - } - if(lowest == thislist[i].start_adr) continue; - tmpaddr = thislist[swapi].start_adr; - tmpsize = thislist[swapi].num_bytes; - for(mitr = swapi; mitr > i; mitr--) { - thislist[mitr].start_adr = thislist[mitr-1].start_adr; - thislist[mitr].num_bytes = thislist[mitr-1].num_bytes; - } - thislist[i].start_adr = tmpaddr; - thislist[i].num_bytes = tmpsize; - } -} - -/* Initialize the memory lists based upon the prom version. */ -void __init prom_meminit(void) -{ - int node = 0; - unsigned int iter, num_regs; - - node = prom_finddevice("/memory"); - num_regs = prom_getproperty(node, "available", - (char *) prom_reg_memlist, - sizeof(prom_reg_memlist)); - num_regs = (num_regs/sizeof(struct linux_prom64_registers)); - for(iter=0; iter<num_regs; iter++) { - prom_phys_avail[iter].start_adr = - prom_reg_memlist[iter].phys_addr; - prom_phys_avail[iter].num_bytes = - prom_reg_memlist[iter].reg_size; - prom_phys_avail[iter].theres_more = - &prom_phys_avail[iter+1]; - } - prom_phys_avail[iter-1].theres_more = NULL; - - num_regs = prom_getproperty(node, "reg", - (char *) prom_reg_memlist, - sizeof(prom_reg_memlist)); - num_regs = (num_regs/sizeof(struct linux_prom64_registers)); - for(iter=0; iter<num_regs; iter++) { - prom_phys_total[iter].start_adr = - prom_reg_memlist[iter].phys_addr; - prom_phys_total[iter].num_bytes = - prom_reg_memlist[iter].reg_size; - prom_phys_total[iter].theres_more = - &prom_phys_total[iter+1]; - } - prom_phys_total[iter-1].theres_more = NULL; - - node = prom_finddevice("/virtual-memory"); - num_regs = prom_getproperty(node, "available", - (char *) prom_reg_memlist, - sizeof(prom_reg_memlist)); - num_regs = (num_regs/sizeof(struct linux_prom64_registers)); - - /* Convert available virtual areas to taken virtual - * areas. First sort, then convert. - */ - for(iter=0; iter<num_regs; iter++) { - prom_prom_taken[iter].start_adr = - prom_reg_memlist[iter].phys_addr; - prom_prom_taken[iter].num_bytes = - prom_reg_memlist[iter].reg_size; - prom_prom_taken[iter].theres_more = - &prom_prom_taken[iter+1]; - } - prom_prom_taken[iter-1].theres_more = NULL; - - prom_sortmemlist(prom_prom_taken); - - /* Finally, convert. */ - for(iter=0; iter<num_regs; iter++) { - prom_prom_taken[iter].start_adr = - prom_prom_taken[iter].start_adr + - prom_prom_taken[iter].num_bytes; - prom_prom_taken[iter].num_bytes = - prom_prom_taken[iter+1].start_adr - - prom_prom_taken[iter].start_adr; - } - prom_prom_taken[iter-1].num_bytes = - -1UL - prom_prom_taken[iter-1].start_adr; - - /* Sort the other two lists. */ - prom_sortmemlist(prom_phys_total); - prom_sortmemlist(prom_phys_avail); - - /* Link all the lists into the top-level descriptor. */ - prom_memlist.p1275_totphys=&prom_ptot_ptr; - prom_memlist.p1275_prommap=&prom_ptak_ptr; - prom_memlist.p1275_available=&prom_pavl_ptr; -} - -/* This returns a pointer to our libraries internal p1275 format - * memory descriptor. - */ -struct linux_mem_p1275 * -prom_meminfo(void) -{ - return &prom_memlist; -} |