diff options
Diffstat (limited to 'arch/sparc64')
-rw-r--r-- | arch/sparc64/kernel/pci_iommu.c | 221 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_psycho.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_sabre.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_schizo.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/sbus.c | 41 | ||||
-rw-r--r-- | arch/sparc64/kernel/setup.c | 11 | ||||
-rw-r--r-- | arch/sparc64/kernel/smp.c | 3 | ||||
-rw-r--r-- | arch/sparc64/kernel/traps.c | 19 |
8 files changed, 186 insertions, 115 deletions
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index 292983413ae..2803bc7c2c7 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c @@ -8,6 +8,7 @@ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> +#include <linux/delay.h> #include <asm/pbm.h> @@ -195,6 +196,34 @@ static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long return NULL; } +static int iommu_alloc_ctx(struct pci_iommu *iommu) +{ + int lowest = iommu->ctx_lowest_free; + int sz = IOMMU_NUM_CTXS - lowest; + int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest); + + if (unlikely(n == sz)) { + n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); + if (unlikely(n == lowest)) { + printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); + n = 0; + } + } + if (n) + __set_bit(n, iommu->ctx_bitmap); + + return n; +} + +static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx) +{ + if (likely(ctx)) { + __clear_bit(ctx, iommu->ctx_bitmap); + if (ctx < iommu->ctx_lowest_free) + iommu->ctx_lowest_free = ctx; + } +} + /* Allocate and map kernel buffer of size SIZE using consistent mode * DMA for PCI device PDEV. Return non-NULL cpu-side address if * successful and set *DMA_ADDRP to the PCI side dma address. @@ -235,7 +264,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad npages = size >> IO_PAGE_SHIFT; ctx = 0; if (iommu->iommu_ctxflush) - ctx = iommu->iommu_cur_ctx++; + ctx = iommu_alloc_ctx(iommu); first_page = __pa(first_page); while (npages--) { iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) | @@ -316,6 +345,8 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_ } } + iommu_free_ctx(iommu, ctx); + spin_unlock_irqrestore(&iommu->lock, flags); order = get_order(size); @@ -359,7 +390,7 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct base_paddr = __pa(oaddr & IO_PAGE_MASK); ctx = 0; if (iommu->iommu_ctxflush) - ctx = iommu->iommu_cur_ctx++; + ctx = iommu_alloc_ctx(iommu); if (strbuf->strbuf_enabled) iopte_protection = IOPTE_STREAMING(ctx); else @@ -379,6 +410,70 @@ bad: return PCI_DMA_ERROR_CODE; } +static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction) +{ + int limit; + + if (strbuf->strbuf_ctxflush && + iommu->iommu_ctxflush) { + unsigned long matchreg, flushreg; + u64 val; + + flushreg = strbuf->strbuf_ctxflush; + matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); + + pci_iommu_write(flushreg, ctx); + val = pci_iommu_read(matchreg); + val &= 0xffff; + if (!val) + goto do_flush_sync; + + while (val) { + if (val & 0x1) + pci_iommu_write(flushreg, ctx); + val >>= 1; + } + val = pci_iommu_read(matchreg); + if (unlikely(val)) { + printk(KERN_WARNING "pci_strbuf_flush: ctx flush " + "timeout matchreg[%lx] ctx[%lx]\n", + val, ctx); + goto do_page_flush; + } + } else { + unsigned long i; + + do_page_flush: + for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) + pci_iommu_write(strbuf->strbuf_pflush, vaddr); + } + +do_flush_sync: + /* If the device could not have possibly put dirty data into + * the streaming cache, no flush-flag synchronization needs + * to be performed. + */ + if (direction == PCI_DMA_TODEVICE) + return; + + PCI_STC_FLUSHFLAG_INIT(strbuf); + pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); + (void) pci_iommu_read(iommu->write_complete_reg); + + limit = 100000; + while (!PCI_STC_FLUSHFLAG_SET(strbuf)) { + limit--; + if (!limit) + break; + udelay(1); + membar("#LoadLoad"); + } + if (!limit) + printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout " + "vaddr[%08x] ctx[%lx] npages[%ld]\n", + vaddr, ctx, npages); +} + /* Unmap a single streaming mode DMA translation. */ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) { @@ -386,7 +481,7 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int struct pci_iommu *iommu; struct pci_strbuf *strbuf; iopte_t *base; - unsigned long flags, npages, i, ctx; + unsigned long flags, npages, ctx; if (direction == PCI_DMA_NONE) BUG(); @@ -414,29 +509,8 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; /* Step 1: Kick data out of streaming buffers if necessary. */ - if (strbuf->strbuf_enabled) { - u32 vaddr = bus_addr; - - PCI_STC_FLUSHFLAG_INIT(strbuf); - if (strbuf->strbuf_ctxflush && - iommu->iommu_ctxflush) { - unsigned long matchreg, flushreg; - - flushreg = strbuf->strbuf_ctxflush; - matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); - do { - pci_iommu_write(flushreg, ctx); - } while(((long)pci_iommu_read(matchreg)) < 0L); - } else { - for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) - pci_iommu_write(strbuf->strbuf_pflush, vaddr); - } - - pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); - (void) pci_iommu_read(iommu->write_complete_reg); - while (!PCI_STC_FLUSHFLAG_SET(strbuf)) - membar("#LoadLoad"); - } + if (strbuf->strbuf_enabled) + pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); /* Step 2: Clear out first TSB entry. */ iopte_make_dummy(iommu, base); @@ -444,6 +518,8 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, npages, ctx); + iommu_free_ctx(iommu, ctx); + spin_unlock_irqrestore(&iommu->lock, flags); } @@ -583,7 +659,7 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int /* Step 4: Choose a context if necessary. */ ctx = 0; if (iommu->iommu_ctxflush) - ctx = iommu->iommu_cur_ctx++; + ctx = iommu_alloc_ctx(iommu); /* Step 5: Create the mappings. */ if (strbuf->strbuf_enabled) @@ -647,29 +723,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; /* Step 1: Kick data out of streaming buffers if necessary. */ - if (strbuf->strbuf_enabled) { - u32 vaddr = (u32) bus_addr; - - PCI_STC_FLUSHFLAG_INIT(strbuf); - if (strbuf->strbuf_ctxflush && - iommu->iommu_ctxflush) { - unsigned long matchreg, flushreg; - - flushreg = strbuf->strbuf_ctxflush; - matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); - do { - pci_iommu_write(flushreg, ctx); - } while(((long)pci_iommu_read(matchreg)) < 0L); - } else { - for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) - pci_iommu_write(strbuf->strbuf_pflush, vaddr); - } - - pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); - (void) pci_iommu_read(iommu->write_complete_reg); - while (!PCI_STC_FLUSHFLAG_SET(strbuf)) - membar("#LoadLoad"); - } + if (strbuf->strbuf_enabled) + pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); /* Step 2: Clear out first TSB entry. */ iopte_make_dummy(iommu, base); @@ -677,6 +732,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, npages, ctx); + iommu_free_ctx(iommu, ctx); + spin_unlock_irqrestore(&iommu->lock, flags); } @@ -715,28 +772,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size } /* Step 2: Kick data out of streaming buffers. */ - PCI_STC_FLUSHFLAG_INIT(strbuf); - if (iommu->iommu_ctxflush && - strbuf->strbuf_ctxflush) { - unsigned long matchreg, flushreg; - - flushreg = strbuf->strbuf_ctxflush; - matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); - do { - pci_iommu_write(flushreg, ctx); - } while(((long)pci_iommu_read(matchreg)) < 0L); - } else { - unsigned long i; - - for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE) - pci_iommu_write(strbuf->strbuf_pflush, bus_addr); - } - - /* Step 3: Perform flush synchronization sequence. */ - pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); - (void) pci_iommu_read(iommu->write_complete_reg); - while (!PCI_STC_FLUSHFLAG_SET(strbuf)) - membar("#LoadLoad"); + pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); spin_unlock_irqrestore(&iommu->lock, flags); } @@ -749,7 +785,8 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i struct pcidev_cookie *pcp; struct pci_iommu *iommu; struct pci_strbuf *strbuf; - unsigned long flags, ctx; + unsigned long flags, ctx, npages, i; + u32 bus_addr; pcp = pdev->sysdata; iommu = pcp->pbm->iommu; @@ -772,36 +809,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i } /* Step 2: Kick data out of streaming buffers. */ - PCI_STC_FLUSHFLAG_INIT(strbuf); - if (iommu->iommu_ctxflush && - strbuf->strbuf_ctxflush) { - unsigned long matchreg, flushreg; - - flushreg = strbuf->strbuf_ctxflush; - matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); - do { - pci_iommu_write(flushreg, ctx); - } while (((long)pci_iommu_read(matchreg)) < 0L); - } else { - unsigned long i, npages; - u32 bus_addr; - - bus_addr = sglist[0].dma_address & IO_PAGE_MASK; - - for(i = 1; i < nelems; i++) - if (!sglist[i].dma_length) - break; - i--; - npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT; - for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE) - pci_iommu_write(strbuf->strbuf_pflush, bus_addr); - } - - /* Step 3: Perform flush synchronization sequence. */ - pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); - (void) pci_iommu_read(iommu->write_complete_reg); - while (!PCI_STC_FLUSHFLAG_SET(strbuf)) - membar("#LoadLoad"); + bus_addr = sglist[0].dma_address & IO_PAGE_MASK; + for(i = 1; i < nelems; i++) + if (!sglist[i].dma_length) + break; + i--; + npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) + - bus_addr) >> IO_PAGE_SHIFT; + pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); spin_unlock_irqrestore(&iommu->lock, flags); } diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index 3567fa879e1..534320ef0db 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c @@ -1212,7 +1212,7 @@ static void __init psycho_iommu_init(struct pci_controller_info *p) /* Setup initial software IOMMU state. */ spin_lock_init(&iommu->lock); - iommu->iommu_cur_ctx = 0; + iommu->ctx_lowest_free = 1; /* Register addresses. */ iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL; diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index 5525d1ec4af..53d333b4a4e 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c @@ -1265,7 +1265,7 @@ static void __init sabre_iommu_init(struct pci_controller_info *p, /* Setup initial software IOMMU state. */ spin_lock_init(&iommu->lock); - iommu->iommu_cur_ctx = 0; + iommu->ctx_lowest_free = 1; /* Register addresses. */ iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL; diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index e93fcadc372..5753175b94e 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c @@ -1753,7 +1753,7 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm) /* Setup initial software IOMMU state. */ spin_lock_init(&iommu->lock); - iommu->iommu_cur_ctx = 0; + iommu->ctx_lowest_free = 1; /* Register addresses, SCHIZO has iommu ctx flushing. */ iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL; diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c index 14d9c3a21b9..89f5e019f24 100644 --- a/arch/sparc64/kernel/sbus.c +++ b/arch/sparc64/kernel/sbus.c @@ -117,19 +117,42 @@ static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages #define STRBUF_TAG_VALID 0x02UL -static void strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages) +static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages, int direction) { - iommu->strbuf_flushflag = 0UL; - while (npages--) - upa_writeq(base + (npages << IO_PAGE_SHIFT), + unsigned long n; + int limit; + + n = npages; + while (n--) + upa_writeq(base + (n << IO_PAGE_SHIFT), iommu->strbuf_regs + STRBUF_PFLUSH); + /* If the device could not have possibly put dirty data into + * the streaming cache, no flush-flag synchronization needs + * to be performed. + */ + if (direction == SBUS_DMA_TODEVICE) + return; + + iommu->strbuf_flushflag = 0UL; + /* Whoopee cushion! */ upa_writeq(__pa(&iommu->strbuf_flushflag), iommu->strbuf_regs + STRBUF_FSYNC); upa_readq(iommu->sbus_control_reg); - while (iommu->strbuf_flushflag == 0UL) + + limit = 100000; + while (iommu->strbuf_flushflag == 0UL) { + limit--; + if (!limit) + break; + udelay(1); membar("#LoadLoad"); + } + if (!limit) + printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout " + "vaddr[%08x] npages[%ld]\n", + base, npages); } static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages) @@ -406,7 +429,7 @@ void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size, spin_lock_irqsave(&iommu->lock, flags); free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT); - strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT); + sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT, direction); spin_unlock_irqrestore(&iommu->lock, flags); } @@ -569,7 +592,7 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int iommu = sdev->bus->iommu; spin_lock_irqsave(&iommu->lock, flags); free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT); - strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT); + sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT, direction); spin_unlock_irqrestore(&iommu->lock, flags); } @@ -581,7 +604,7 @@ void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK)); spin_lock_irqsave(&iommu->lock, flags); - strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT); + sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT, direction); spin_unlock_irqrestore(&iommu->lock, flags); } @@ -605,7 +628,7 @@ void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base; spin_lock_irqsave(&iommu->lock, flags); - strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT); + sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT, direction); spin_unlock_irqrestore(&iommu->lock, flags); } diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 12c3d84b746..b7e6a91952b 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -383,6 +383,17 @@ static void __init process_switch(char c) /* Use PROM debug console. */ register_console(&prom_debug_console); break; + case 'P': + /* Force UltraSPARC-III P-Cache on. */ + if (tlb_type != cheetah) { + printk("BOOT: Ignoring P-Cache force option.\n"); + break; + } + cheetah_pcache_forced_on = 1; + add_taint(TAINT_MACHINE_CHECK); + cheetah_enable_pcache(); + break; + default: printk("Unknown boot switch (-%c)\n", c); break; diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 6dff06a44e7..e5b9c7a2778 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -123,6 +123,9 @@ void __init smp_callin(void) smp_setup_percpu_timer(); + if (cheetah_pcache_forced_on) + cheetah_enable_pcache(); + local_irq_enable(); calibrate_delay(); diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 56b203a2af6..a9f4596d7c2 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -421,6 +421,25 @@ asmlinkage void cee_log(unsigned long ce_status, } } +int cheetah_pcache_forced_on; + +void cheetah_enable_pcache(void) +{ + unsigned long dcr; + + printk("CHEETAH: Enabling P-Cache on cpu %d.\n", + smp_processor_id()); + + __asm__ __volatile__("ldxa [%%g0] %1, %0" + : "=r" (dcr) + : "i" (ASI_DCU_CONTROL_REG)); + dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL); + __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (dcr), "i" (ASI_DCU_CONTROL_REG)); +} + /* Cheetah error trap handling. */ static unsigned long ecache_flush_physbase; static unsigned long ecache_flush_linesize; |