aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/sysdev
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/sysdev')
-rw-r--r--arch/powerpc/sysdev/Makefile2
-rw-r--r--arch/powerpc/sysdev/dart.h41
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c (renamed from arch/powerpc/sysdev/u3_iommu.c)173
-rw-r--r--arch/powerpc/sysdev/mpic.c199
4 files changed, 287 insertions, 128 deletions
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index b3e3636a57b..14b9abde2d2 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -4,6 +4,6 @@ obj-$(CONFIG_PPC_I8259) += i8259.o
obj-$(CONFIG_PPC_MPC106) += grackle.o
obj-$(CONFIG_BOOKE) += dcr.o
obj-$(CONFIG_40x) += dcr.o
-obj-$(CONFIG_U3_DART) += u3_iommu.o
+obj-$(CONFIG_U3_DART) += dart_iommu.o
obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o
obj-$(CONFIG_83xx) += ipic.o
diff --git a/arch/powerpc/sysdev/dart.h b/arch/powerpc/sysdev/dart.h
index 33ed9ed7fc1..c2d05763ccb 100644
--- a/arch/powerpc/sysdev/dart.h
+++ b/arch/powerpc/sysdev/dart.h
@@ -20,29 +20,44 @@
#define _POWERPC_SYSDEV_DART_H
-/* physical base of DART registers */
-#define DART_BASE 0xf8033000UL
-
/* Offset from base to control register */
-#define DARTCNTL 0
+#define DART_CNTL 0
+
/* Offset from base to exception register */
-#define DARTEXCP 0x10
+#define DART_EXCP_U3 0x10
/* Offset from base to TLB tag registers */
-#define DARTTAG 0x1000
+#define DART_TAGS_U3 0x1000
+/* U4 registers */
+#define DART_BASE_U4 0x10
+#define DART_SIZE_U4 0x20
+#define DART_EXCP_U4 0x30
+#define DART_TAGS_U4 0x1000
/* Control Register fields */
-/* base address of table (pfn) */
-#define DARTCNTL_BASE_MASK 0xfffff
-#define DARTCNTL_BASE_SHIFT 12
+/* U3 registers */
+#define DART_CNTL_U3_BASE_MASK 0xfffff
+#define DART_CNTL_U3_BASE_SHIFT 12
+#define DART_CNTL_U3_FLUSHTLB 0x400
+#define DART_CNTL_U3_ENABLE 0x200
+#define DART_CNTL_U3_SIZE_MASK 0x1ff
+#define DART_CNTL_U3_SIZE_SHIFT 0
+
+/* U4 registers */
+#define DART_BASE_U4_BASE_MASK 0xffffff
+#define DART_BASE_U4_BASE_SHIFT 0
+#define DART_CNTL_U4_FLUSHTLB 0x20000000
+#define DART_CNTL_U4_ENABLE 0x80000000
+#define DART_SIZE_U4_SIZE_MASK 0x1fff
+#define DART_SIZE_U4_SIZE_SHIFT 0
+
+#define DART_REG(r) (dart + ((r) >> 2))
+#define DART_IN(r) (in_be32(DART_REG(r)))
+#define DART_OUT(r,v) (out_be32(DART_REG(r), (v)))
-#define DARTCNTL_FLUSHTLB 0x400
-#define DARTCNTL_ENABLE 0x200
/* size of table in pages */
-#define DARTCNTL_SIZE_MASK 0x1ff
-#define DARTCNTL_SIZE_SHIFT 0
/* DART table fields */
diff --git a/arch/powerpc/sysdev/u3_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 5c1a26a6d00..df0dbdee762 100644
--- a/arch/powerpc/sysdev/u3_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -1,25 +1,27 @@
/*
- * arch/powerpc/sysdev/u3_iommu.c
+ * arch/powerpc/sysdev/dart_iommu.c
*
* Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
+ * Copyright (C) 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>,
+ * IBM Corporation
*
* Based on pSeries_iommu.c:
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
* Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
*
- * Dynamic DMA mapping support, Apple U3 & IBM CPC925 "DART" iommu.
+ * Dynamic DMA mapping support, Apple U3, U4 & IBM CPC925 "DART" iommu.
+ *
*
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
@@ -57,21 +59,22 @@ static unsigned long dart_tablesize;
static u32 *dart_vbase;
/* Mapped base address for the dart */
-static unsigned int *dart;
+static unsigned int *__iomem dart;
/* Dummy val that entries are set to when unused */
static unsigned int dart_emptyval;
-static struct iommu_table iommu_table_u3;
-static int iommu_table_u3_inited;
+static struct iommu_table iommu_table_dart;
+static int iommu_table_dart_inited;
static int dart_dirty;
+static int dart_is_u4;
#define DBG(...)
static inline void dart_tlb_invalidate_all(void)
{
unsigned long l = 0;
- unsigned int reg;
+ unsigned int reg, inv_bit;
unsigned long limit;
DBG("dart: flush\n");
@@ -81,29 +84,28 @@ static inline void dart_tlb_invalidate_all(void)
*
* Gotcha: Sometimes, the DART won't detect that the bit gets
* set. If so, clear it and set it again.
- */
+ */
limit = 0;
+ inv_bit = dart_is_u4 ? DART_CNTL_U4_FLUSHTLB : DART_CNTL_U3_FLUSHTLB;
retry:
- reg = in_be32((unsigned int *)dart+DARTCNTL);
- reg |= DARTCNTL_FLUSHTLB;
- out_be32((unsigned int *)dart+DARTCNTL, reg);
-
l = 0;
- while ((in_be32((unsigned int *)dart+DARTCNTL) & DARTCNTL_FLUSHTLB) &&
- l < (1L<<limit)) {
+ reg = DART_IN(DART_CNTL);
+ reg |= inv_bit;
+ DART_OUT(DART_CNTL, reg);
+
+ while ((DART_IN(DART_CNTL) & inv_bit) && l < (1L << limit))
l++;
- }
- if (l == (1L<<limit)) {
+ if (l == (1L << limit)) {
if (limit < 4) {
limit++;
- reg = in_be32((unsigned int *)dart+DARTCNTL);
- reg &= ~DARTCNTL_FLUSHTLB;
- out_be32((unsigned int *)dart+DARTCNTL, reg);
+ reg = DART_IN(DART_CNTL);
+ reg &= ~inv_bit;
+ DART_OUT(DART_CNTL, reg);
goto retry;
} else
- panic("U3-DART: TLB did not flush after waiting a long "
+ panic("DART: TLB did not flush after waiting a long "
"time. Buggy U3 ?");
}
}
@@ -115,7 +117,7 @@ static void dart_flush(struct iommu_table *tbl)
dart_dirty = 0;
}
-static void dart_build(struct iommu_table *tbl, long index,
+static void dart_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction)
{
@@ -128,7 +130,7 @@ static void dart_build(struct iommu_table *tbl, long index,
npages <<= DART_PAGE_FACTOR;
dp = ((unsigned int*)tbl->it_base) + index;
-
+
/* On U3, all memory is contigous, so we can move this
* out of the loop.
*/
@@ -148,7 +150,7 @@ static void dart_build(struct iommu_table *tbl, long index,
static void dart_free(struct iommu_table *tbl, long index, long npages)
{
unsigned int *dp;
-
+
/* We don't worry about flushing the TLB cache. The only drawback of
* not doing it is that we won't catch buggy device drivers doing
* bad DMAs, but then no 32-bit architecture ever does either.
@@ -160,7 +162,7 @@ static void dart_free(struct iommu_table *tbl, long index, long npages)
npages <<= DART_PAGE_FACTOR;
dp = ((unsigned int *)tbl->it_base) + index;
-
+
while (npages--)
*(dp++) = dart_emptyval;
}
@@ -168,20 +170,25 @@ static void dart_free(struct iommu_table *tbl, long index, long npages)
static int dart_init(struct device_node *dart_node)
{
- unsigned int regword;
unsigned int i;
- unsigned long tmp;
+ unsigned long tmp, base, size;
+ struct resource r;
if (dart_tablebase == 0 || dart_tablesize == 0) {
- printk(KERN_INFO "U3-DART: table not allocated, using direct DMA\n");
+ printk(KERN_INFO "DART: table not allocated, using "
+ "direct DMA\n");
return -ENODEV;
}
+ if (of_address_to_resource(dart_node, 0, &r))
+ panic("DART: can't get register base ! ");
+
/* Make sure nothing from the DART range remains in the CPU cache
* from a previous mapping that existed before the kernel took
* over
*/
- flush_dcache_phys_range(dart_tablebase, dart_tablebase + dart_tablesize);
+ flush_dcache_phys_range(dart_tablebase,
+ dart_tablebase + dart_tablesize);
/* Allocate a spare page to map all invalid DART pages. We need to do
* that to work around what looks like a problem with the HT bridge
@@ -189,21 +196,16 @@ static int dart_init(struct device_node *dart_node)
*/
tmp = lmb_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
if (!tmp)
- panic("U3-DART: Cannot allocate spare page!");
- dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) & DARTMAP_RPNMASK);
+ panic("DART: Cannot allocate spare page!");
+ dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) &
+ DARTMAP_RPNMASK);
- /* Map in DART registers. FIXME: Use device node to get base address */
- dart = ioremap(DART_BASE, 0x7000);
+ /* Map in DART registers */
+ dart = ioremap(r.start, r.end - r.start + 1);
if (dart == NULL)
- panic("U3-DART: Cannot map registers!");
+ panic("DART: Cannot map registers!");
- /* Set initial control register contents: table base,
- * table size and enable bit
- */
- regword = DARTCNTL_ENABLE |
- ((dart_tablebase >> DART_PAGE_SHIFT) << DARTCNTL_BASE_SHIFT) |
- (((dart_tablesize >> DART_PAGE_SHIFT) & DARTCNTL_SIZE_MASK)
- << DARTCNTL_SIZE_SHIFT);
+ /* Map in DART table */
dart_vbase = ioremap(virt_to_abs(dart_tablebase), dart_tablesize);
/* Fill initial table */
@@ -211,36 +213,50 @@ static int dart_init(struct device_node *dart_node)
dart_vbase[i] = dart_emptyval;
/* Initialize DART with table base and enable it. */
- out_be32((unsigned int *)dart, regword);
+ base = dart_tablebase >> DART_PAGE_SHIFT;
+ size = dart_tablesize >> DART_PAGE_SHIFT;
+ if (dart_is_u4) {
+ BUG_ON(size & ~DART_SIZE_U4_SIZE_MASK);
+ DART_OUT(DART_BASE_U4, base);
+ DART_OUT(DART_SIZE_U4, size);
+ DART_OUT(DART_CNTL, DART_CNTL_U4_ENABLE);
+ } else {
+ BUG_ON(size & ~DART_CNTL_U3_SIZE_MASK);
+ DART_OUT(DART_CNTL,
+ DART_CNTL_U3_ENABLE |
+ (base << DART_CNTL_U3_BASE_SHIFT) |
+ (size << DART_CNTL_U3_SIZE_SHIFT));
+ }
/* Invalidate DART to get rid of possible stale TLBs */
dart_tlb_invalidate_all();
- printk(KERN_INFO "U3/CPC925 DART IOMMU initialized\n");
+ printk(KERN_INFO "DART IOMMU initialized for %s type chipset\n",
+ dart_is_u4 ? "U4" : "U3");
return 0;
}
-static void iommu_table_u3_setup(void)
+static void iommu_table_dart_setup(void)
{
- iommu_table_u3.it_busno = 0;
- iommu_table_u3.it_offset = 0;
+ iommu_table_dart.it_busno = 0;
+ iommu_table_dart.it_offset = 0;
/* it_size is in number of entries */
- iommu_table_u3.it_size = (dart_tablesize / sizeof(u32)) >> DART_PAGE_FACTOR;
+ iommu_table_dart.it_size = (dart_tablesize / sizeof(u32)) >> DART_PAGE_FACTOR;
/* Initialize the common IOMMU code */
- iommu_table_u3.it_base = (unsigned long)dart_vbase;
- iommu_table_u3.it_index = 0;
- iommu_table_u3.it_blocksize = 1;
- iommu_init_table(&iommu_table_u3);
+ iommu_table_dart.it_base = (unsigned long)dart_vbase;
+ iommu_table_dart.it_index = 0;
+ iommu_table_dart.it_blocksize = 1;
+ iommu_init_table(&iommu_table_dart);
/* Reserve the last page of the DART to avoid possible prefetch
* past the DART mapped area
*/
- set_bit(iommu_table_u3.it_size - 1, iommu_table_u3.it_map);
+ set_bit(iommu_table_dart.it_size - 1, iommu_table_dart.it_map);
}
-static void iommu_dev_setup_u3(struct pci_dev *dev)
+static void iommu_dev_setup_dart(struct pci_dev *dev)
{
struct device_node *dn;
@@ -254,35 +270,39 @@ static void iommu_dev_setup_u3(struct pci_dev *dev)
dn = pci_device_to_OF_node(dev);
if (dn)
- PCI_DN(dn)->iommu_table = &iommu_table_u3;
+ PCI_DN(dn)->iommu_table = &iommu_table_dart;
}
-static void iommu_bus_setup_u3(struct pci_bus *bus)
+static void iommu_bus_setup_dart(struct pci_bus *bus)
{
struct device_node *dn;
- if (!iommu_table_u3_inited) {
- iommu_table_u3_inited = 1;
- iommu_table_u3_setup();
+ if (!iommu_table_dart_inited) {
+ iommu_table_dart_inited = 1;
+ iommu_table_dart_setup();
}
dn = pci_bus_to_OF_node(bus);
if (dn)
- PCI_DN(dn)->iommu_table = &iommu_table_u3;
+ PCI_DN(dn)->iommu_table = &iommu_table_dart;
}
static void iommu_dev_setup_null(struct pci_dev *dev) { }
static void iommu_bus_setup_null(struct pci_bus *bus) { }
-void iommu_init_early_u3(void)
+void iommu_init_early_dart(void)
{
struct device_node *dn;
/* Find the DART in the device-tree */
dn = of_find_compatible_node(NULL, "dart", "u3-dart");
- if (dn == NULL)
- return;
+ if (dn == NULL) {
+ dn = of_find_compatible_node(NULL, "dart", "u4-dart");
+ if (dn == NULL)
+ goto bail;
+ dart_is_u4 = 1;
+ }
/* Setup low level TCE operations for the core IOMMU code */
ppc_md.tce_build = dart_build;
@@ -290,24 +310,27 @@ void iommu_init_early_u3(void)
ppc_md.tce_flush = dart_flush;
/* Initialize the DART HW */
- if (dart_init(dn)) {
- /* If init failed, use direct iommu and null setup functions */
- ppc_md.iommu_dev_setup = iommu_dev_setup_null;
- ppc_md.iommu_bus_setup = iommu_bus_setup_null;
-
- /* Setup pci_dma ops */
- pci_direct_iommu_init();
- } else {
- ppc_md.iommu_dev_setup = iommu_dev_setup_u3;
- ppc_md.iommu_bus_setup = iommu_bus_setup_u3;
+ if (dart_init(dn) == 0) {
+ ppc_md.iommu_dev_setup = iommu_dev_setup_dart;
+ ppc_md.iommu_bus_setup = iommu_bus_setup_dart;
/* Setup pci_dma ops */
pci_iommu_init();
+
+ return;
}
+
+ bail:
+ /* If init failed, use direct iommu and null setup functions */
+ ppc_md.iommu_dev_setup = iommu_dev_setup_null;
+ ppc_md.iommu_bus_setup = iommu_bus_setup_null;
+
+ /* Setup pci_dma ops */
+ pci_direct_iommu_init();
}
-void __init alloc_u3_dart_table(void)
+void __init alloc_dart_table(void)
{
/* Only reserve DART space if machine has more than 2GB of RAM
* or if requested with iommu=on on cmdline.
@@ -323,5 +346,5 @@ void __init alloc_u3_dart_table(void)
dart_tablebase = (unsigned long)
abs_to_virt(lmb_alloc_base(1UL<<24, 1UL<<24, 0x80000000L));
- printk(KERN_INFO "U3-DART allocated at: %lx\n", dart_tablebase);
+ printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase);
}
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 9513ea78e6c..4f26304d026 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -13,6 +13,9 @@
*/
#undef DEBUG
+#undef DEBUG_IPI
+#undef DEBUG_IRQ
+#undef DEBUG_LOW
#include <linux/config.h>
#include <linux/types.h>
@@ -168,35 +171,86 @@ static void __init mpic_test_broken_ipi(struct mpic *mpic)
/* Test if an interrupt is sourced from HyperTransport (used on broken U3s)
* to force the edge setting on the MPIC and do the ack workaround.
*/
-static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source_no)
+static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source)
{
- if (source_no >= 128 || !mpic->fixups)
+ if (source >= 128 || !mpic->fixups)
return 0;
- return mpic->fixups[source_no].base != NULL;
+ return mpic->fixups[source].base != NULL;
}
-static inline void mpic_apic_end_irq(struct mpic *mpic, unsigned int source_no)
+static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source)
{
- struct mpic_irq_fixup *fixup = &mpic->fixups[source_no];
+ struct mpic_irq_fixup *fixup = &mpic->fixups[source];
- spin_lock(&mpic->fixup_lock);
- writeb(0x11 + 2 * fixup->irq, fixup->base + 2);
- writel(fixup->data, fixup->base + 4);
- spin_unlock(&mpic->fixup_lock);
+ if (fixup->applebase) {
+ unsigned int soff = (fixup->index >> 3) & ~3;
+ unsigned int mask = 1U << (fixup->index & 0x1f);
+ writel(mask, fixup->applebase + soff);
+ } else {
+ spin_lock(&mpic->fixup_lock);
+ writeb(0x11 + 2 * fixup->index, fixup->base + 2);
+ writel(fixup->data, fixup->base + 4);
+ spin_unlock(&mpic->fixup_lock);
+ }
}
+static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
+ unsigned int irqflags)
+{
+ struct mpic_irq_fixup *fixup = &mpic->fixups[source];
+ unsigned long flags;
+ u32 tmp;
+
+ if (fixup->base == NULL)
+ return;
+
+ DBG("startup_ht_interrupt(%u, %u) index: %d\n",
+ source, irqflags, fixup->index);
+ spin_lock_irqsave(&mpic->fixup_lock, flags);
+ /* Enable and configure */
+ writeb(0x10 + 2 * fixup->index, fixup->base + 2);
+ tmp = readl(fixup->base + 4);
+ tmp &= ~(0x23U);
+ if (irqflags & IRQ_LEVEL)
+ tmp |= 0x22;
+ writel(tmp, fixup->base + 4);
+ spin_unlock_irqrestore(&mpic->fixup_lock, flags);
+}
+
+static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source,
+ unsigned int irqflags)
+{
+ struct mpic_irq_fixup *fixup = &mpic->fixups[source];
+ unsigned long flags;
+ u32 tmp;
+
+ if (fixup->base == NULL)
+ return;
+
+ DBG("shutdown_ht_interrupt(%u, %u)\n", source, irqflags);
+
+ /* Disable */
+ spin_lock_irqsave(&mpic->fixup_lock, flags);
+ writeb(0x10 + 2 * fixup->index, fixup->base + 2);
+ tmp = readl(fixup->base + 4);
+ tmp &= ~1U;
+ writel(tmp, fixup->base + 4);
+ spin_unlock_irqrestore(&mpic->fixup_lock, flags);
+}
-static void __init mpic_scan_ioapic(struct mpic *mpic, u8 __iomem *devbase)
+static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase,
+ unsigned int devfn, u32 vdid)
{
int i, irq, n;
+ u8 __iomem *base;
u32 tmp;
u8 pos;
- for (pos = readb(devbase + 0x34); pos; pos = readb(devbase + pos + 1)) {
- u8 id = readb(devbase + pos);
-
- if (id == 0x08) {
+ for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0;
+ pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) {
+ u8 id = readb(devbase + pos + PCI_CAP_LIST_ID);
+ if (id == PCI_CAP_ID_HT_IRQCONF) {
id = readb(devbase + pos + 3);
if (id == 0x80)
break;
@@ -205,33 +259,41 @@ static void __init mpic_scan_ioapic(struct mpic *mpic, u8 __iomem *devbase)
if (pos == 0)
return;
- printk(KERN_INFO "mpic: - Workarounds @ %p, pos = 0x%02x\n", devbase, pos);
+ base = devbase + pos;
+ writeb(0x01, base + 2);
+ n = (readl(base + 4) >> 16) & 0xff;
- devbase += pos;
-
- writeb(0x01, devbase + 2);
- n = (readl(devbase + 4) >> 16) & 0xff;
+ printk(KERN_INFO "mpic: - HT:%02x.%x [0x%02x] vendor %04x device %04x"
+ " has %d irqs\n",
+ devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1);
for (i = 0; i <= n; i++) {
- writeb(0x10 + 2 * i, devbase + 2);
- tmp = readl(devbase + 4);
- if ((tmp & 0x21) != 0x20)
- continue;
+ writeb(0x10 + 2 * i, base + 2);
+ tmp = readl(base + 4);
irq = (tmp >> 16) & 0xff;
- mpic->fixups[irq].irq = i;
- mpic->fixups[irq].base = devbase;
- writeb(0x11 + 2 * i, devbase + 2);
- mpic->fixups[irq].data = readl(devbase + 4) | 0x80000000;
+ DBG("HT PIC index 0x%x, irq 0x%x, tmp: %08x\n", i, irq, tmp);
+ /* mask it , will be unmasked later */
+ tmp |= 0x1;
+ writel(tmp, base + 4);
+ mpic->fixups[irq].index = i;
+ mpic->fixups[irq].base = base;
+ /* Apple HT PIC has a non-standard way of doing EOIs */
+ if ((vdid & 0xffff) == 0x106b)
+ mpic->fixups[irq].applebase = devbase + 0x60;
+ else
+ mpic->fixups[irq].applebase = NULL;
+ writeb(0x11 + 2 * i, base + 2);
+ mpic->fixups[irq].data = readl(base + 4) | 0x80000000;
}
}
-static void __init mpic_scan_ioapics(struct mpic *mpic)
+static void __init mpic_scan_ht_pics(struct mpic *mpic)
{
unsigned int devfn;
u8 __iomem *cfgspace;
- printk(KERN_INFO "mpic: Setting up IO-APICs workarounds for U3\n");
+ printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n");
/* Allocate fixups array */
mpic->fixups = alloc_bootmem(128 * sizeof(struct mpic_irq_fixup));
@@ -247,13 +309,14 @@ static void __init mpic_scan_ioapics(struct mpic *mpic)
cfgspace = ioremap(0xf2000000, 0x10000);
BUG_ON(cfgspace == NULL);
- /* Now we scan all slots. We do a very quick scan, we read the header type,
- * vendor ID and device ID only, that's plenty enough
+ /* Now we scan all slots. We do a very quick scan, we read the header
+ * type, vendor ID and device ID only, that's plenty enough
*/
for (devfn = 0; devfn < 0x100; devfn++) {
u8 __iomem *devbase = cfgspace + (devfn << 8);
u8 hdr_type = readb(devbase + PCI_HEADER_TYPE);
u32 l = readl(devbase + PCI_VENDOR_ID);
+ u16 s;
DBG("devfn %x, l: %x\n", devfn, l);
@@ -261,8 +324,12 @@ static void __init mpic_scan_ioapics(struct mpic *mpic)
if (l == 0xffffffff || l == 0x00000000 ||
l == 0x0000ffff || l == 0xffff0000)
goto next;
+ /* Check if is supports capability lists */
+ s = readw(devbase + PCI_STATUS);
+ if (!(s & PCI_STATUS_CAP_LIST))
+ goto next;
- mpic_scan_ioapic(mpic, devbase);
+ mpic_scan_ht_pic(mpic, devbase, devfn, l);
next:
/* next device, if function 0 */
@@ -363,6 +430,31 @@ static void mpic_enable_irq(unsigned int irq)
break;
}
} while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK);
+
+#ifdef CONFIG_MPIC_BROKEN_U3
+ if (mpic->flags & MPIC_BROKEN_U3) {
+ unsigned int src = irq - mpic->irq_offset;
+ if (mpic_is_ht_interrupt(mpic, src) &&
+ (irq_desc[irq].status & IRQ_LEVEL))
+ mpic_ht_end_irq(mpic, src);
+ }
+#endif /* CONFIG_MPIC_BROKEN_U3 */
+}
+
+static unsigned int mpic_startup_irq(unsigned int irq)
+{
+#ifdef CONFIG_MPIC_BROKEN_U3
+ struct mpic *mpic = mpic_from_irq(irq);
+ unsigned int src = irq - mpic->irq_offset;
+
+ if (mpic_is_ht_interrupt(mpic, src))
+ mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status);
+
+#endif /* CONFIG_MPIC_BROKEN_U3 */
+
+ mpic_enable_irq(irq);
+
+ return 0;
}
static void mpic_disable_irq(unsigned int irq)
@@ -386,12 +478,27 @@ static void mpic_disable_irq(unsigned int irq)
} while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK));
}
+static void mpic_shutdown_irq(unsigned int irq)
+{
+#ifdef CONFIG_MPIC_BROKEN_U3
+ struct mpic *mpic = mpic_from_irq(irq);
+ unsigned int src = irq - mpic->irq_offset;
+
+ if (mpic_is_ht_interrupt(mpic, src))
+ mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status);
+
+#endif /* CONFIG_MPIC_BROKEN_U3 */
+
+ mpic_disable_irq(irq);
+}
+
static void mpic_end_irq(unsigned int irq)
{
struct mpic *mpic = mpic_from_irq(irq);
+#ifdef DEBUG_IRQ
DBG("%s: end_irq: %d\n", mpic->name, irq);
-
+#endif
/* We always EOI on end_irq() even for edge interrupts since that
* should only lower the priority, the MPIC should have properly
* latched another edge interrupt coming in anyway
@@ -400,8 +507,9 @@ static void mpic_end_irq(unsigned int irq)
#ifdef CONFIG_MPIC_BROKEN_U3
if (mpic->flags & MPIC_BROKEN_U3) {
unsigned int src = irq - mpic->irq_offset;
- if (mpic_is_ht_interrupt(mpic, src))
- mpic_apic_end_irq(mpic, src);
+ if (mpic_is_ht_interrupt(mpic, src) &&
+ (irq_desc[irq].status & IRQ_LEVEL))
+ mpic_ht_end_irq(mpic, src);
}
#endif /* CONFIG_MPIC_BROKEN_U3 */
@@ -482,6 +590,8 @@ struct mpic * __init mpic_alloc(unsigned long phys_addr,
mpic->name = name;
mpic->hc_irq.typename = name;
+ mpic->hc_irq.startup = mpic_startup_irq;
+ mpic->hc_irq.shutdown = mpic_shutdown_irq;
mpic->hc_irq.enable = mpic_enable_irq;
mpic->hc_irq.disable = mpic_disable_irq;
mpic->hc_irq.end = mpic_end_irq;
@@ -650,10 +760,10 @@ void __init mpic_init(struct mpic *mpic)
mpic->irq_count = mpic->num_sources;
#ifdef CONFIG_MPIC_BROKEN_U3
- /* Do the ioapic fixups on U3 broken mpic */
+ /* Do the HT PIC fixups on U3 broken mpic */
DBG("MPIC flags: %x\n", mpic->flags);
if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY))
- mpic_scan_ioapics(mpic);
+ mpic_scan_ht_pics(mpic);
#endif /* CONFIG_MPIC_BROKEN_U3 */
for (i = 0; i < mpic->num_sources; i++) {
@@ -840,7 +950,9 @@ void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask)
BUG_ON(mpic == NULL);
+#ifdef DEBUG_IPI
DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
+#endif
mpic_cpu_write(MPIC_CPU_IPI_DISPATCH_0 + ipi_no * 0x10,
mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0]));
@@ -851,19 +963,28 @@ int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs)
u32 irq;
irq = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK;
+#ifdef DEBUG_LOW
DBG("%s: get_one_irq(): %d\n", mpic->name, irq);
-
+#endif
if (mpic->cascade && irq == mpic->cascade_vec) {
+#ifdef DEBUG_LOW
DBG("%s: cascading ...\n", mpic->name);
+#endif
irq = mpic->cascade(regs, mpic->cascade_data);
mpic_eoi(mpic);
return irq;
}
if (unlikely(irq == MPIC_VEC_SPURRIOUS))
return -1;
- if (irq < MPIC_VEC_IPI_0)
+ if (irq < MPIC_VEC_IPI_0) {
+#ifdef DEBUG_IRQ
+ DBG("%s: irq %d\n", mpic->name, irq + mpic->irq_offset);
+#endif
return irq + mpic->irq_offset;
+ }
+#ifdef DEBUG_IPI
DBG("%s: ipi %d !\n", mpic->name, irq - MPIC_VEC_IPI_0);
+#endif
return irq - MPIC_VEC_IPI_0 + mpic->ipi_offset;
}