aboutsummaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorJaroslav Kysela <perex@suse.cz>2006-03-22 11:02:08 +0100
committerJaroslav Kysela <perex@suse.cz>2006-03-22 11:02:08 +0100
commit5501972e0b5857bc8354770d900ceb9b40c7f6b7 (patch)
treeff239422827c4cd54d2998f8851304255de31b38 /drivers/net
parent9d2f928ddf64ca0361562e30faf584cd33055c60 (diff)
parente952f31bce6e9f64db01f607abc46529ba57ac9e (diff)
Merge with rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/8139too.c2
-rw-r--r--drivers/net/arm/am79c961a.c4
-rw-r--r--drivers/net/bnx2.c477
-rw-r--r--drivers/net/bnx2.h37
-rw-r--r--drivers/net/cassini.c40
-rw-r--r--drivers/net/cassini.h2
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/fs_enet/mac-scc.c2
-rw-r--r--drivers/net/gianfar.c4
-rw-r--r--drivers/net/irda/Kconfig8
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/ep7211_ir.c11
-rw-r--r--drivers/net/irda/irda-usb.c5
-rw-r--r--drivers/net/irda/irtty-sir.c19
-rw-r--r--drivers/net/irda/nsc-ircc.c320
-rw-r--r--drivers/net/irda/nsc-ircc.h2
-rw-r--r--drivers/net/irda/sir_dongle.c19
-rw-r--r--drivers/net/irda/toim3232-sir.c375
-rw-r--r--drivers/net/irda/vlsi_ir.c2
-rw-r--r--drivers/net/mv643xx_eth.h18
-rw-r--r--drivers/net/pcnet32.c4143
-rw-r--r--drivers/net/ppp_generic.c4
-rw-r--r--drivers/net/pppoe.c3
-rw-r--r--drivers/net/skfp/fplustm.c12
-rw-r--r--drivers/net/skge.c275
-rw-r--r--drivers/net/skge.h1
-rw-r--r--drivers/net/sky2.c583
-rw-r--r--drivers/net/sky2.h22
-rw-r--r--drivers/net/smc91x.c57
-rw-r--r--drivers/net/smc91x.h474
-rw-r--r--drivers/net/sungem.c37
-rw-r--r--drivers/net/sungem.h6
-rw-r--r--drivers/net/tg3.c648
-rw-r--r--drivers/net/tg3.h19
-rw-r--r--drivers/net/wan/sbni.c3
38 files changed, 4439 insertions, 3204 deletions
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index e58d4c50c2e..f5ee064ab6b 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1605,7 +1605,7 @@ static void rtl8139_thread (void *_data)
if (tp->watchdog_fired) {
tp->watchdog_fired = 0;
rtl8139_tx_timeout_task(_data);
- } else if (rtnl_shlock_nowait() == 0) {
+ } else if (rtnl_trylock()) {
rtl8139_thread_iter (dev, tp, tp->mmio_addr);
rtnl_unlock ();
} else {
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 53e3afc1b7b..09d5c3f2698 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -696,7 +696,9 @@ static int __init am79c961_probe(struct platform_device *pdev)
dev->base_addr = res->start;
dev->irq = platform_get_irq(pdev, 0);
- ret = -ENODEV;
+ ret = -ENODEV;
+ if (dev->irq < 0)
+ goto nodev;
if (!request_region(dev->base_addr, 0x18, dev->name))
goto nodev;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index b787b6582e5..7d213707008 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -14,8 +14,8 @@
#define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.4.31"
-#define DRV_MODULE_RELDATE "January 19, 2006"
+#define DRV_MODULE_VERSION "1.4.38"
+#define DRV_MODULE_RELDATE "February 10, 2006"
#define RUN_AT(x) (jiffies + (x))
@@ -360,6 +360,8 @@ bnx2_netif_start(struct bnx2 *bp)
static void
bnx2_free_mem(struct bnx2 *bp)
{
+ int i;
+
if (bp->stats_blk) {
pci_free_consistent(bp->pdev, sizeof(struct statistics_block),
bp->stats_blk, bp->stats_blk_mapping);
@@ -378,19 +380,23 @@ bnx2_free_mem(struct bnx2 *bp)
}
kfree(bp->tx_buf_ring);
bp->tx_buf_ring = NULL;
- if (bp->rx_desc_ring) {
- pci_free_consistent(bp->pdev,
- sizeof(struct rx_bd) * RX_DESC_CNT,
- bp->rx_desc_ring, bp->rx_desc_mapping);
- bp->rx_desc_ring = NULL;
- }
- kfree(bp->rx_buf_ring);
+ for (i = 0; i < bp->rx_max_ring; i++) {
+ if (bp->rx_desc_ring[i])
+ pci_free_consistent(bp->pdev,
+ sizeof(struct rx_bd) * RX_DESC_CNT,
+ bp->rx_desc_ring[i],
+ bp->rx_desc_mapping[i]);
+ bp->rx_desc_ring[i] = NULL;
+ }
+ vfree(bp->rx_buf_ring);
bp->rx_buf_ring = NULL;
}
static int
bnx2_alloc_mem(struct bnx2 *bp)
{
+ int i;
+
bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
GFP_KERNEL);
if (bp->tx_buf_ring == NULL)
@@ -404,18 +410,23 @@ bnx2_alloc_mem(struct bnx2 *bp)
if (bp->tx_desc_ring == NULL)
goto alloc_mem_err;
- bp->rx_buf_ring = kmalloc(sizeof(struct sw_bd) * RX_DESC_CNT,
- GFP_KERNEL);
+ bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
+ bp->rx_max_ring);
if (bp->rx_buf_ring == NULL)
goto alloc_mem_err;
- memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT);
- bp->rx_desc_ring = pci_alloc_consistent(bp->pdev,
- sizeof(struct rx_bd) *
- RX_DESC_CNT,
- &bp->rx_desc_mapping);
- if (bp->rx_desc_ring == NULL)
- goto alloc_mem_err;
+ memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
+ bp->rx_max_ring);
+
+ for (i = 0; i < bp->rx_max_ring; i++) {
+ bp->rx_desc_ring[i] =
+ pci_alloc_consistent(bp->pdev,
+ sizeof(struct rx_bd) * RX_DESC_CNT,
+ &bp->rx_desc_mapping[i]);
+ if (bp->rx_desc_ring[i] == NULL)
+ goto alloc_mem_err;
+
+ }
bp->status_blk = pci_alloc_consistent(bp->pdev,
sizeof(struct status_block),
@@ -1520,7 +1531,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
struct sk_buff *skb;
struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
dma_addr_t mapping;
- struct rx_bd *rxbd = &bp->rx_desc_ring[index];
+ struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
unsigned long align;
skb = dev_alloc_skb(bp->rx_buf_size);
@@ -1656,23 +1667,30 @@ static inline void
bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
u16 cons, u16 prod)
{
- struct sw_bd *cons_rx_buf = &bp->rx_buf_ring[cons];
- struct sw_bd *prod_rx_buf = &bp->rx_buf_ring[prod];
- struct rx_bd *cons_bd = &bp->rx_desc_ring[cons];
- struct rx_bd *prod_bd = &bp->rx_desc_ring[prod];
+ struct sw_bd *cons_rx_buf, *prod_rx_buf;
+ struct rx_bd *cons_bd, *prod_bd;
+
+ cons_rx_buf = &bp->rx_buf_ring[cons];
+ prod_rx_buf = &bp->rx_buf_ring[prod];
pci_dma_sync_single_for_device(bp->pdev,
pci_unmap_addr(cons_rx_buf, mapping),
bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
- prod_rx_buf->skb = cons_rx_buf->skb;
- pci_unmap_addr_set(prod_rx_buf, mapping,
- pci_unmap_addr(cons_rx_buf, mapping));
+ bp->rx_prod_bseq += bp->rx_buf_use_size;
- memcpy(prod_bd, cons_bd, 8);
+ prod_rx_buf->skb = skb;
- bp->rx_prod_bseq += bp->rx_buf_use_size;
+ if (cons == prod)
+ return;
+ pci_unmap_addr_set(prod_rx_buf, mapping,
+ pci_unmap_addr(cons_rx_buf, mapping));
+
+ cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+ prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+ prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
+ prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
}
static int
@@ -1699,14 +1717,19 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
u32 status;
struct sw_bd *rx_buf;
struct sk_buff *skb;
+ dma_addr_t dma_addr;
sw_ring_cons = RX_RING_IDX(sw_cons);
sw_ring_prod = RX_RING_IDX(sw_prod);
rx_buf = &bp->rx_buf_ring[sw_ring_cons];
skb = rx_buf->skb;
- pci_dma_sync_single_for_cpu(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
+
+ rx_buf->skb = NULL;
+
+ dma_addr = pci_unmap_addr(rx_buf, mapping);
+
+ pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
rx_hdr = (struct l2_fhdr *) skb->data;
@@ -1747,8 +1770,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
skb = new_skb;
}
else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
- pci_unmap_single(bp->pdev,
- pci_unmap_addr(rx_buf, mapping),
+ pci_unmap_single(bp->pdev, dma_addr,
bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
skb_reserve(skb, bp->rx_offset);
@@ -1794,8 +1816,6 @@ reuse_rx:
rx_pkt++;
next_rx:
- rx_buf->skb = NULL;
-
sw_cons = NEXT_RX_BD(sw_cons);
sw_prod = NEXT_RX_BD(sw_prod);
@@ -3340,27 +3360,35 @@ bnx2_init_rx_ring(struct bnx2 *bp)
bp->hw_rx_cons = 0;
bp->rx_prod_bseq = 0;
- rxbd = &bp->rx_desc_ring[0];
- for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
- rxbd->rx_bd_len = bp->rx_buf_use_size;
- rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
- }
+ for (i = 0; i < bp->rx_max_ring; i++) {
+ int j;
- rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping >> 32;
- rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping & 0xffffffff;
+ rxbd = &bp->rx_desc_ring[i][0];
+ for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
+ rxbd->rx_bd_len = bp->rx_buf_use_size;
+ rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
+ }
+ if (i == (bp->rx_max_ring - 1))
+ j = 0;
+ else
+ j = i + 1;
+ rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
+ rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
+ 0xffffffff;
+ }
val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
val |= 0x02 << 8;
CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
- val = (u64) bp->rx_desc_mapping >> 32;
+ val = (u64) bp->rx_desc_mapping[0] >> 32;
CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
- val = (u64) bp->rx_desc_mapping & 0xffffffff;
+ val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
- for ( ;ring_prod < bp->rx_ring_size; ) {
+ for (i = 0; i < bp->rx_ring_size; i++) {
if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
break;
}
@@ -3375,6 +3403,29 @@ bnx2_init_rx_ring(struct bnx2 *bp)
}
static void
+bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
+{
+ u32 num_rings, max;
+
+ bp->rx_ring_size = size;
+ num_rings = 1;
+ while (size > MAX_RX_DESC_CNT) {
+ size -= MAX_RX_DESC_CNT;
+ num_rings++;
+ }
+ /* round to next power of 2 */
+ max = MAX_RX_RINGS;
+ while ((max & num_rings) == 0)
+ max >>= 1;
+
+ if (num_rings != max)
+ max <<= 1;
+
+ bp->rx_max_ring = max;
+ bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
+}
+
+static void
bnx2_free_tx_skbs(struct bnx2 *bp)
{
int i;
@@ -3419,7 +3470,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
if (bp->rx_buf_ring == NULL)
return;
- for (i = 0; i < RX_DESC_CNT; i++) {
+ for (i = 0; i < bp->rx_max_ring_idx; i++) {
struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
struct sk_buff *skb = rx_buf->skb;
@@ -3506,74 +3557,9 @@ bnx2_test_registers(struct bnx2 *bp)
{ 0x0c00, 0, 0x00000000, 0x00000001 },
{ 0x0c04, 0, 0x00000000, 0x03ff0001 },
{ 0x0c08, 0, 0x0f0ff073, 0x00000000 },
- { 0x0c0c, 0, 0x00ffffff, 0x00000000 },
- { 0x0c30, 0, 0x00000000, 0xffffffff },
- { 0x0c34, 0, 0x00000000, 0xffffffff },
- { 0x0c38, 0, 0x00000000, 0xffffffff },
- { 0x0c3c, 0, 0x00000000, 0xffffffff },
- { 0x0c40, 0, 0x00000000, 0xffffffff },
- { 0x0c44, 0, 0x00000000, 0xffffffff },
- { 0x0c48, 0, 0x00000000, 0x0007ffff },
- { 0x0c4c, 0, 0x00000000, 0xffffffff },
- { 0x0c50, 0, 0x00000000, 0xffffffff },
- { 0x0c54, 0, 0x00000000, 0xffffffff },
- { 0x0c58, 0, 0x00000000, 0xffffffff },
- { 0x0c5c, 0, 0x00000000, 0xffffffff },
- { 0x0c60, 0, 0x00000000, 0xffffffff },
- { 0x0c64, 0, 0x00000000, 0xffffffff },
- { 0x0c68, 0, 0x00000000, 0xffffffff },
- { 0x0c6c, 0, 0x00000000, 0xffffffff },
- { 0x0c70, 0, 0x00000000, 0xffffffff },
- { 0x0c74, 0, 0x00000000, 0xffffffff },
- { 0x0c78, 0, 0x00000000, 0xffffffff },
- { 0x0c7c, 0, 0x00000000, 0xffffffff },
- { 0x0c80, 0, 0x00000000, 0xffffffff },
- { 0x0c84, 0, 0x00000000, 0xffffffff },
- { 0x0c88, 0, 0x00000000, 0xffffffff },
- { 0x0c8c, 0, 0x00000000, 0xffffffff },
- { 0x0c90, 0, 0x00000000, 0xffffffff },
- { 0x0c94, 0, 0x00000000, 0xffffffff },
- { 0x0c98, 0, 0x00000000, 0xffffffff },
- { 0x0c9c, 0, 0x00000000, 0xffffffff },
- { 0x0ca0, 0, 0x00000000, 0xffffffff },
- { 0x0ca4, 0, 0x00000000, 0xffffffff },
- { 0x0ca8, 0, 0x00000000, 0x0007ffff },
- { 0x0cac, 0, 0x00000000, 0xffffffff },
- { 0x0cb0, 0, 0x00000000, 0xffffffff },
- { 0x0cb4, 0, 0x00000000, 0xffffffff },
- { 0x0cb8, 0, 0x00000000, 0xffffffff },
- { 0x0cbc, 0, 0x00000000, 0xffffffff },
- { 0x0cc0, 0, 0x00000000, 0xffffffff },
- { 0x0cc4, 0, 0x00000000, 0xffffffff },
- { 0x0cc8, 0, 0x00000000, 0xffffffff },
- { 0x0ccc, 0, 0x00000000, 0xffffffff },
- { 0x0cd0, 0, 0x00000000, 0xffffffff },
- { 0x0cd4, 0, 0x00000000, 0xffffffff },
- { 0x0cd8, 0, 0x00000000, 0xffffffff },
- { 0x0cdc, 0, 0x00000000, 0xffffffff },
- { 0x0ce0, 0, 0x00000000, 0xffffffff },
- { 0x0ce4, 0, 0x00000000, 0xffffffff },
- { 0x0ce8, 0, 0x00000000, 0xffffffff },
- { 0x0cec, 0, 0x00000000, 0xffffffff },
- { 0x0cf0, 0, 0x00000000, 0xffffffff },
- { 0x0cf4, 0, 0x00000000, 0xffffffff },
- { 0x0cf8, 0, 0x00000000, 0xffffffff },
- { 0x0cfc, 0, 0x00000000, 0xffffffff },
- { 0x0d00, 0, 0x00000000, 0xffffffff },
- { 0x0d04, 0, 0x00000000, 0xffffffff },
{ 0x1000, 0, 0x00000000, 0x00000001 },
{ 0x1004, 0, 0x00000000, 0x000f0001 },
- { 0x1044, 0, 0x00000000, 0xffc003ff },
- { 0x1080, 0, 0x00000000, 0x0001ffff },
- { 0x1084, 0, 0x00000000, 0xffffffff },
- { 0x1088, 0, 0x00000000, 0xffffffff },
- { 0x108c, 0, 0x00000000, 0xffffffff },
- { 0x1090, 0, 0x00000000, 0xffffffff },
- { 0x1094, 0, 0x00000000, 0xffffffff },
- { 0x1098, 0, 0x00000000, 0xffffffff },
- { 0x109c, 0, 0x00000000, 0xffffffff },
- { 0x10a0, 0, 0x00000000, 0xffffffff },
{ 0x1408, 0, 0x01c00800, 0x00000000 },
{ 0x149c, 0, 0x8000ffff, 0x00000000 },
@@ -3585,111 +3571,9 @@ bnx2_test_registers(struct bnx2 *bp)
{ 0x14c4, 0, 0x00003fff, 0x00000000 },
{ 0x14cc, 0, 0x00000000, 0x00000001 },
{ 0x14d0, 0, 0xffffffff, 0x00000000 },
- { 0x1500, 0, 0x00000000, 0xffffffff },
- { 0x1504, 0, 0x00000000, 0xffffffff },
- { 0x1508, 0, 0x00000000, 0xffffffff },
- { 0x150c, 0, 0x00000000, 0xffffffff },
- { 0x1510, 0, 0x00000000, 0xffffffff },
- { 0x1514, 0, 0x00000000, 0xffffffff },
- { 0x1518, 0, 0x00000000, 0xffffffff },
- { 0x151c, 0, 0x00000000, 0xffffffff },
- { 0x1520, 0, 0x00000000, 0xffffffff },
- { 0x1524, 0, 0x00000000, 0xffffffff },
- { 0x1528, 0, 0x00000000, 0xffffffff },
- { 0x152c, 0, 0x00000000, 0xffffffff },
- { 0x1530, 0, 0x00000000, 0xffffffff },
- { 0x1534, 0, 0x00000000, 0xffffffff },
- { 0x1538, 0, 0x00000000, 0xffffffff },
- { 0x153c, 0, 0x00000000, 0xffffffff },
- { 0x1540, 0, 0x00000000, 0xffffffff },
- { 0x1544, 0, 0x00000000, 0xffffffff },
- { 0x1548, 0, 0x00000000, 0xffffffff },
- { 0x154c, 0, 0x00000000, 0xffffffff },
- { 0x1550, 0, 0x00000000, 0xffffffff },
- { 0x1554, 0, 0x00000000, 0xffffffff },
- { 0x1558, 0, 0x00000000, 0xffffffff },
- { 0x1600, 0, 0x00000000, 0xffffffff },
- { 0x1604, 0, 0x00000000, 0xffffffff },
- { 0x1608, 0, 0x00000000, 0xffffffff },
- { 0x160c, 0, 0x00000000, 0xffffffff },
- { 0x1610, 0, 0x00000000, 0xffffffff },
- { 0x1614, 0, 0x00000000, 0xffffffff },
- { 0x1618, 0, 0x00000000, 0xffffffff },
- { 0x161c, 0, 0x00000000, 0xffffffff },
- { 0x1620, 0, 0x00000000, 0xffffffff },
- { 0x1624, 0, 0x00000000, 0xffffffff },
- { 0x1628, 0, 0x00000000, 0xffffffff },
- { 0x162c, 0, 0x00000000, 0xffffffff },
- { 0x1630, 0, 0x00000000, 0xffffffff },
- { 0x1634, 0, 0x00000000, 0xffffffff },
- { 0x1638, 0, 0x00000000, 0xffffffff },
- { 0x163c, 0, 0x00000000, 0xffffffff },
- { 0x1640, 0, 0x00000000, 0xffffffff },
- { 0x1644, 0, 0x00000000, 0xffffffff },
- { 0x1648, 0, 0x00000000, 0xffffffff },
- { 0x164c, 0, 0x00000000, 0xffffffff },
- { 0x1650, 0, 0x00000000, 0xffffffff },
- { 0x1654, 0, 0x00000000, 0xffffffff },
{ 0x1800, 0, 0x00000000, 0x00000001 },
{ 0x1804, 0, 0x00000000, 0x00000003 },
- { 0x1840, 0, 0x00000000, 0xffffffff },
- { 0x1844, 0, 0x00000000, 0xffffffff },
- { 0x1848, 0, 0x00000000, 0xffffffff },
- { 0x184c, 0, 0x00000000, 0xffffffff },
- { 0x1850, 0, 0x00000000, 0xffffffff },
- { 0x1900, 0, 0x7ffbffff, 0x00000000 },
- { 0x1904, 0, 0xffffffff, 0x00000000 },
- { 0x190c, 0, 0xffffffff, 0x00000000 },
- { 0x1914, 0, 0xffffffff, 0x00000000 },
- { 0x191c, 0, 0xffffffff, 0x00000000 },
- { 0x1924, 0, 0xffffffff, 0x00000000 },
- { 0x192c, 0, 0xffffffff, 0x00000000 },
- { 0x1934, 0, 0xffffffff, 0x00000000 },
- { 0x193c, 0, 0xffffffff, 0x00000000 },
- { 0x1944, 0, 0xffffffff, 0x00000000 },
- { 0x194c, 0, 0xffffffff, 0x00000000 },
- { 0x1954, 0, 0xffffffff, 0x00000000 },
- { 0x195c, 0, 0xffffffff, 0x00000000 },
- { 0x1964, 0, 0xffffffff, 0x00000000 },
- { 0x196c, 0, 0xffffffff, 0x00000000 },
- { 0x1974, 0, 0xffffffff, 0x00000000 },
- { 0x197c, 0, 0xffffffff, 0x00000000 },
- { 0x1980, 0, 0x0700ffff, 0x00000000 },
-
- { 0x1c00, 0, 0x00000000, 0x00000001 },
- { 0x1c04, 0, 0x00000000, 0x00000003 },
- { 0x1c08, 0, 0x0000000f, 0x00000000 },
- { 0x1c40, 0, 0x00000000, 0xffffffff },
- { 0x1c44, 0, 0x00000000, 0xffffffff },
- { 0x1c48, 0, 0x00000000, 0xffffffff },
- { 0x1c4c, 0, 0x00000000, 0xffffffff },
- { 0x1c50, 0, 0x00000000, 0xffffffff },
- { 0x1d00, 0, 0x7ffbffff, 0x00000000 },
- { 0x1d04, 0, 0xffffffff, 0x00000000 },
- { 0x1d0c, 0, 0xffffffff, 0x00000000 },
- { 0x1d14, 0, 0xffffffff, 0x00000000 },
- { 0x1d1c, 0, 0xffffffff, 0x00000000 },
- { 0x1d24, 0, 0xffffffff, 0x00000000 },
- { 0x1d2c, 0, 0xffffffff, 0x00000000 },
- { 0x1d34, 0, 0xffffffff, 0x00000000 },
- { 0x1d3c, 0, 0xffffffff, 0x00000000 },
- { 0x1d44, 0, 0xffffffff, 0x00000000 },
- { 0x1d4c, 0, 0xffffffff, 0x00000000 },
- { 0x1d54, 0, 0xffffffff, 0x00000000 },
- { 0x1d5c, 0, 0xffffffff, 0x00000000 },
- { 0x1d64, 0, 0xffffffff, 0x00000000 },
- { 0x1d6c, 0, 0xffffffff, 0x00000000 },
- { 0x1d74, 0, 0xffffffff, 0x00000000 },
- { 0x1d7c, 0, 0xffffffff, 0x00000000 },
- { 0x1d80, 0, 0x0700ffff, 0x00000000 },
-
- { 0x2004, 0, 0x00000000, 0x0337000f },
- { 0x2008, 0, 0xffffffff, 0x00000000 },
- { 0x200c, 0, 0xffffffff, 0x00000000 },
- { 0x2010, 0, 0xffffffff, 0x00000000 },
- { 0x2014, 0, 0x801fff80, 0x00000000 },
- { 0x2018, 0, 0x000003ff, 0x00000000 },
{ 0x2800, 0, 0x00000000, 0x00000001 },
{ 0x2804, 0, 0x00000000, 0x00003f01 },
@@ -3707,16 +3591,6 @@ bnx2_test_registers(struct bnx2 *bp)
{ 0x2c00, 0, 0x00000000, 0x00000011 },
{ 0x2c04, 0, 0x00000000, 0x00030007 },
- { 0x3000, 0, 0x00000000, 0x00000001 },
- { 0x3004, 0, 0x00000000, 0x007007ff },
- { 0x3008, 0, 0x00000003, 0x00000000 },
- { 0x300c, 0, 0xffffffff, 0x00000000 },
- { 0x3010, 0, 0xffffffff, 0x00000000 },
- { 0x3014, 0, 0xffffffff, 0x00000000 },
- { 0x3034, 0, 0xffffffff, 0x00000000 },
- { 0x3038, 0, 0xffffffff, 0x00000000 },
- { 0x3050, 0, 0x00000001, 0x00000000 },
-
{ 0x3c00, 0, 0x00000000, 0x00000001 },
{ 0x3c04, 0, 0x00000000, 0x00070000 },
{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
@@ -3726,88 +3600,11 @@ bnx2_test_registers(struct bnx2 *bp)
{ 0x3c18, 0, 0x00000000, 0xffffffff },
{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
{ 0x3c20, 0, 0xffffff00, 0x00000000 },
- { 0x3c24, 0, 0xffffffff, 0x00000000 },
- { 0x3c28, 0, 0xffffffff, 0x00000000 },
- { 0x3c2c, 0, 0xffffffff, 0x00000000 },
- { 0x3c30, 0, 0xffffffff, 0x00000000 },
- { 0x3c34, 0, 0xffffffff, 0x00000000 },
- { 0x3c38, 0, 0xffffffff, 0x00000000 },
- { 0x3c3c, 0, 0xffffffff, 0x00000000 },
- { 0x3c40, 0, 0xffffffff, 0x00000000 },
- { 0x3c44, 0, 0xffffffff, 0x00000000 },
- { 0x3c48, 0, 0xffffffff, 0x00000000 },
- { 0x3c4c, 0, 0xffffffff, 0x00000000 },
- { 0x3c50, 0, 0xffffffff, 0x00000000 },
- { 0x3c54, 0, 0xffffffff, 0x00000000 },
- { 0x3c58, 0, 0xffffffff, 0x00000000 },
- { 0x3c5c, 0, 0xffffffff, 0x00000000 },
- { 0x3c60, 0, 0xffffffff, 0x00000000 },
- { 0x3c64, 0, 0xffffffff, 0x00000000 },
- { 0x3c68, 0, 0xffffffff, 0x00000000 },
- { 0x3c6c, 0, 0xffffffff, 0x00000000 },
- { 0x3c70, 0, 0xffffffff, 0x00000000 },
- { 0x3c74, 0, 0x0000003f, 0x00000000 },
- { 0x3c78, 0, 0x00000000, 0x00000000 },
- { 0x3c7c, 0, 0x00000000, 0x00000000 },
- { 0x3c80, 0, 0x3fffffff, 0x00000000 },
- { 0x3c84, 0, 0x0000003f, 0x00000000 },
- { 0x3c88, 0, 0x00000000, 0xffffffff },
- { 0x3c8c, 0, 0x00000000, 0xffffffff },
-
- { 0x4000, 0, 0x00000000, 0x00000001 },
- { 0x4004, 0, 0x00000000, 0x00030000 },
- { 0x4008, 0, 0x00000ff0, 0x00000000 },
- { 0x400c, 0, 0xffffffff, 0x00000000 },
- { 0x4088, 0, 0x00000000, 0x00070303 },
-
- { 0x4400, 0, 0x00000000, 0x00000001 },
- { 0x4404, 0, 0x00000000, 0x00003f01 },
- { 0x4408, 0, 0x7fff00ff, 0x00000000 },
- { 0x440c, 0, 0xffffffff, 0x00000000 },
- { 0x4410, 0, 0xffff, 0x0000 },
- { 0x4414, 0, 0xffff, 0x0000 },
- { 0x4418, 0, 0xffff, 0x0000 },
- { 0x441c, 0, 0xffff, 0x0000 },
- { 0x4428, 0, 0xffffffff, 0x00000000 },
- { 0x442c, 0, 0xffffffff, 0x00000000 },
- { 0x4430, 0, 0xffffffff, 0x00000000 },
- { 0x4434, 0, 0xffffffff, 0x00000000 },
- { 0x4438, 0, 0xffffffff, 0x00000000 },
- { 0x443c, 0, 0xffffffff, 0x00000000 },
- { 0x4440, 0, 0xffffffff, 0x00000000 },
- { 0x4444, 0, 0xffffffff, 0x00000000 },
-
- { 0x4c00, 0, 0x00000000, 0x00000001 },
- { 0x4c04, 0, 0x00000000, 0x0000003f },
- { 0x4c08, 0, 0xffffffff, 0x00000000 },
- { 0x4c0c, 0, 0x0007fc00, 0x00000000 },
- { 0x4c10, 0, 0x80003fe0, 0x00000000 },
- { 0x4c14, 0, 0xffffffff, 0x00000000 },
- { 0x4c44, 0, 0x00000000, 0x9fff9fff },
- { 0x4c48, 0, 0x00000000, 0xb3009fff },
- { 0x4c4c, 0, 0x00000000, 0x77f33b30 },
- { 0x4c50, 0, 0x00000000, 0xffffffff },
{ 0x5004, 0, 0x00000000, 0x0000007f },
{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
{ 0x500c, 0, 0xf800f800, 0x07ff07ff },
- { 0x5400, 0, 0x00000008, 0x00000001 },
- { 0x5404, 0, 0x00000000, 0x0000003f },
- { 0x5408, 0, 0x0000001f, 0x00000000 },
- { 0x540c, 0, 0xffffffff, 0x00000000 },
- { 0x5410, 0, 0xffffffff, 0x00000000 },
- { 0x5414, 0, 0x0000ffff, 0x00000000 },
- { 0x5418, 0, 0x0000ffff, 0x00000000 },
- { 0x541c, 0, 0x0000ffff, 0x00000000 },
- { 0x5420, 0, 0x0000ffff, 0x00000000 },
- { 0x5428, 0, 0x000000ff, 0x00000000 },
- { 0x542c, 0, 0xff00ffff, 0x00000000 },
- { 0x5430, 0, 0x001fff80, 0x00000000 },
- { 0x5438, 0, 0xffffffff, 0x00000000 },
- { 0x543c, 0, 0xffffffff, 0x00000000 },
- { 0x5440, 0, 0xf800f800, 0x07ff07ff },
-
{ 0x5c00, 0, 0x00000000, 0x00000001 },
{ 0x5c04, 0, 0x00000000, 0x0003000f },
{ 0x5c08, 0, 0x00000003, 0x00000000 },
@@ -4794,6 +4591,64 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
info->fw_version[5] = 0;
}
+#define BNX2_REGDUMP_LEN (32 * 1024)
+
+static int
+bnx2_get_regs_len(struct net_device *dev)
+{
+ return BNX2_REGDUMP_LEN;
+}
+
+static void
+bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
+{
+ u32 *p = _p, i, offset;
+ u8 *orig_p = _p;
+ struct bnx2 *bp = netdev_priv(dev);
+ u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
+ 0x0800, 0x0880, 0x0c00, 0x0c10,
+ 0x0c30, 0x0d08, 0x1000, 0x101c,
+ 0x1040, 0x1048, 0x1080, 0x10a4,
+ 0x1400, 0x1490, 0x1498, 0x14f0,
+ 0x1500, 0x155c, 0x1580, 0x15dc,
+ 0x1600, 0x1658, 0x1680, 0x16d8,
+ 0x1800, 0x1820, 0x1840, 0x1854,
+ 0x1880, 0x1894, 0x1900, 0x1984,
+ 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
+ 0x1c80, 0x1c94, 0x1d00, 0x1d84,
+ 0x2000, 0x2030, 0x23c0, 0x2400,
+ 0x2800, 0x2820, 0x2830, 0x2850,
+ 0x2b40, 0x2c10, 0x2fc0, 0x3058,
+ 0x3c00, 0x3c94, 0x4000, 0x4010,
+ 0x4080, 0x4090, 0x43c0, 0x4458,
+ 0x4c00, 0x4c18, 0x4c40, 0x4c54,
+ 0x4fc0, 0x5010, 0x53c0, 0x5444,
+ 0x5c00, 0x5c18, 0x5c80, 0x5c90,
+ 0x5fc0, 0x6000, 0x6400, 0x6428,
+ 0x6800, 0x6848, 0x684c, 0x6860,
+ 0x6888, 0x6910, 0x8000 };
+
+ regs->version = 0;
+
+ memset(p, 0, BNX2_REGDUMP_LEN);
+
+ if (!netif_running(bp->dev))
+ return;
+
+ i = 0;
+ offset = reg_boundaries[0];
+ p += offset;
+ while (offset < BNX2_REGDUMP_LEN) {
+ *p++ = REG_RD(bp, offset);
+ offset += 4;
+ if (offset == reg_boundaries[i + 1]) {
+ offset = reg_boundaries[i + 2];
+ p = (u32 *) (orig_p + offset);
+ i += 2;
+ }
+ }
+}
+
static void
bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
@@ -4979,7 +4834,7 @@ bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
struct bnx2 *bp = netdev_priv(dev);
- ering->rx_max_pending = MAX_RX_DESC_CNT;
+ ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
ering->rx_mini_max_pending = 0;
ering->rx_jumbo_max_pending = 0;
@@ -4996,17 +4851,28 @@ bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
struct bnx2 *bp = netdev_priv(dev);
- if ((ering->rx_pending > MAX_RX_DESC_CNT) ||
+ if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
(ering->tx_pending > MAX_TX_DESC_CNT) ||
(ering->tx_pending <= MAX_SKB_FRAGS)) {
return -EINVAL;
}
- bp->rx_ring_size = ering->rx_pending;
+ if (netif_running(bp->dev)) {
+ bnx2_netif_stop(bp);
+ bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
+ bnx2_free_skbs(bp);
+ bnx2_free_mem(bp);
+ }
+
+ bnx2_set_rx_ring_size(bp, ering->rx_pending);
bp->tx_ring_size = ering->tx_pending;
if (netif_running(bp->dev)) {
- bnx2_netif_stop(bp);
+ int rc;
+
+ rc = bnx2_alloc_mem(bp);
+ if (rc)
+ return rc;
bnx2_init_nic(bp);
bnx2_netif_start(bp);
}
@@ -5360,6 +5226,8 @@ static struct ethtool_ops bnx2_ethtool_ops = {
.get_settings = bnx2_get_settings,
.set_settings = bnx2_set_settings,
.get_drvinfo = bnx2_get_drvinfo,
+ .get_regs_len = bnx2_get_regs_len,
+ .get_regs = bnx2_get_regs,
.get_wol = bnx2_get_wol,
.set_wol = bnx2_set_wol,
.nway_reset = bnx2_nway_reset,
@@ -5678,7 +5546,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->mac_addr[5] = (u8) reg;
bp->tx_ring_size = MAX_TX_DESC_CNT;
- bp->rx_ring_size = 100;
+ bnx2_set_rx_ring_size(bp, 100);
bp->rx_csum = 1;
@@ -5897,6 +5765,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
if (!netif_running(dev))
return 0;
+ flush_scheduled_work();
bnx2_netif_stop(bp);
netif_device_detach(dev);
del_timer_sync(&bp->timer);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 9f691cbd666..fd4b7f2eb47 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -23,6 +23,7 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/init.h>
@@ -3792,8 +3793,10 @@ struct l2_fhdr {
#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct tx_bd))
#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
+#define MAX_RX_RINGS 4
#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct rx_bd))
#define MAX_RX_DESC_CNT (RX_DESC_CNT - 1)
+#define MAX_TOTAL_RX_DESC_CNT (MAX_RX_DESC_CNT * MAX_RX_RINGS)
#define NEXT_TX_BD(x) (((x) & (MAX_TX_DESC_CNT - 1)) == \
(MAX_TX_DESC_CNT - 1)) ? \
@@ -3805,8 +3808,10 @@ struct l2_fhdr {
(MAX_RX_DESC_CNT - 1)) ? \
(x) + 2 : (x) + 1
-#define RX_RING_IDX(x) ((x) & MAX_RX_DESC_CNT)
+#define RX_RING_IDX(x) ((x) & bp->rx_max_ring_idx)
+#define RX_RING(x) (((x) & ~MAX_RX_DESC_CNT) >> 8)
+#define RX_IDX(x) ((x) & MAX_RX_DESC_CNT)
/* Context size. */
#define CTX_SHIFT 7
@@ -3903,6 +3908,15 @@ struct bnx2 {
struct status_block *status_blk;
u32 last_status_idx;
+ u32 flags;
+#define PCIX_FLAG 1
+#define PCI_32BIT_FLAG 2
+#define ONE_TDMA_FLAG 4 /* no longer used */
+#define NO_WOL_FLAG 8
+#define USING_DAC_FLAG 0x10
+#define USING_MSI_FLAG 0x20
+#define ASF_ENABLE_FLAG 0x40
+
struct tx_bd *tx_desc_ring;
struct sw_bd *tx_buf_ring;
u32 tx_prod_bseq;
@@ -3920,19 +3934,22 @@ struct bnx2 {
u32 rx_offset;
u32 rx_buf_use_size; /* useable size */
u32 rx_buf_size; /* with alignment */
- struct rx_bd *rx_desc_ring;
- struct sw_bd *rx_buf_ring;
+ u32 rx_max_ring_idx;
+
u32 rx_prod_bseq;
u16 rx_prod;
u16 rx_cons;
u32 rx_csum;
+ struct sw_bd *rx_buf_ring;
+ struct rx_bd *rx_desc_ring[MAX_RX_RINGS];
+
/* Only used to synchronize netif_stop_queue/wake_queue when tx */
/* ring is full */
spinlock_t tx_lock;
- /* End of fileds used in the performance code paths. */
+ /* End of fields used in the performance code paths. */
char *name;
@@ -3945,15 +3962,6 @@ struct bnx2 {
/* Used to synchronize phy accesses. */
spinlock_t phy_lock;
- u32 flags;
-#define PCIX_FLAG 1
-#define PCI_32BIT_FLAG 2
-#define ONE_TDMA_FLAG 4 /* no longer used */
-#define NO_WOL_FLAG 8
-#define USING_DAC_FLAG 0x10
-#define USING_MSI_FLAG 0x20
-#define ASF_ENABLE_FLAG 0x40
-
u32 phy_flags;
#define PHY_SERDES_FLAG 1
#define PHY_CRC_FIX_FLAG 2
@@ -4004,8 +4012,9 @@ struct bnx2 {
dma_addr_t tx_desc_mapping;
+ int rx_max_ring;
int rx_ring_size;
- dma_addr_t rx_desc_mapping;
+ dma_addr_t rx_desc_mapping[MAX_RX_RINGS];
u16 tx_quick_cons_trip;
u16 tx_quick_cons_trip_int;
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 6e295fce5c6..8f1573e658a 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -91,6 +91,7 @@
#include <linux/mii.h>
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <linux/mutex.h>
#include <net/checksum.h>
@@ -3892,7 +3893,7 @@ static void cas_reset(struct cas *cp, int blkflag)
spin_unlock(&cp->stat_lock[N_TX_RINGS]);
}
-/* Shut down the chip, must be called with pm_sem held. */
+/* Shut down the chip, must be called with pm_mutex held. */
static void cas_shutdown(struct cas *cp)
{
unsigned long flags;
@@ -4311,11 +4312,11 @@ static int cas_open(struct net_device *dev)
int hw_was_up, err;
unsigned long flags;
- down(&cp->pm_sem);
+ mutex_lock(&cp->pm_mutex);
hw_was_up = cp->hw_running;
- /* The power-management semaphore protects the hw_running
+ /* The power-management mutex protects the hw_running
* etc. state so it is safe to do this bit without cp->lock
*/
if (!cp->hw_running) {
@@ -4364,7 +4365,7 @@ static int cas_open(struct net_device *dev)
cas_unlock_all_restore(cp, flags);
netif_start_queue(dev);
- up(&cp->pm_sem);
+ mutex_unlock(&cp->pm_mutex);
return 0;
err_spare:
@@ -4372,7 +4373,7 @@ err_spare:
cas_free_rxds(cp);
err_tx_tiny:
cas_tx_tiny_free(cp);
- up(&cp->pm_sem);
+ mutex_unlock(&cp->pm_mutex);
return err;
}
@@ -4382,7 +4383,7 @@ static int cas_close(struct net_device *dev)
struct cas *cp = netdev_priv(dev);
/* Make sure we don't get distracted by suspend/resume */
- down(&cp->pm_sem);
+ mutex_lock(&cp->pm_mutex);
netif_stop_queue(dev);
@@ -4399,7 +4400,7 @@ static int cas_close(struct net_device *dev)
cas_spare_free(cp);
cas_free_rxds(cp);
cas_tx_tiny_free(cp);
- up(&cp->pm_sem);
+ mutex_unlock(&cp->pm_mutex);
return 0;
}
@@ -4834,10 +4835,10 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
unsigned long flags;
int rc = -EOPNOTSUPP;
- /* Hold the PM semaphore while doing ioctl's or we may collide
+ /* Hold the PM mutex while doing ioctl's or we may collide
* with open/close and power management and oops.
*/
- down(&cp->pm_sem);
+ mutex_lock(&cp->pm_mutex);
switch (cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
data->phy_id = cp->phy_addr;
@@ -4867,7 +4868,7 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
};
- up(&cp->pm_sem);
+ mutex_unlock(&cp->pm_mutex);
return rc;
}
@@ -4994,7 +4995,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
spin_lock_init(&cp->tx_lock[i]);
}
spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
- init_MUTEX(&cp->pm_sem);
+ mutex_init(&cp->pm_mutex);
init_timer(&cp->link_timer);
cp->link_timer.function = cas_link_timer;
@@ -5116,10 +5117,10 @@ err_out_free_consistent:
cp->init_block, cp->block_dvma);
err_out_iounmap:
- down(&cp->pm_sem);
+ mutex_lock(&cp->pm_mutex);
if (cp->hw_running)
cas_shutdown(cp);
- up(&cp->pm_sem);
+ mutex_unlock(&cp->pm_mutex);
iounmap(cp->regs);
@@ -5152,11 +5153,11 @@ static void __devexit cas_remove_one(struct pci_dev *pdev)
cp = netdev_priv(dev);
unregister_netdev(dev);
- down(&cp->pm_sem);
+ mutex_lock(&cp->pm_mutex);
flush_scheduled_work();
if (cp->hw_running)
cas_shutdown(cp);
- up(&cp->pm_sem);
+ mutex_unlock(&cp->pm_mutex);
#if 1
if (cp->orig_cacheline_size) {
@@ -5183,10 +5184,7 @@ static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
struct cas *cp = netdev_priv(dev);
unsigned long flags;
- /* We hold the PM semaphore during entire driver
- * sleep time
- */
- down(&cp->pm_sem);
+ mutex_lock(&cp->pm_mutex);
/* If the driver is opened, we stop the DMA */
if (cp->opened) {
@@ -5206,6 +5204,7 @@ static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
if (cp->hw_running)
cas_shutdown(cp);
+ mutex_unlock(&cp->pm_mutex);
return 0;
}
@@ -5217,6 +5216,7 @@ static int cas_resume(struct pci_dev *pdev)
printk(KERN_INFO "%s: resuming\n", dev->name);
+ mutex_lock(&cp->pm_mutex);
cas_hard_reset(cp);
if (cp->opened) {
unsigned long flags;
@@ -5229,7 +5229,7 @@ static int cas_resume(struct pci_dev *pdev)
netif_device_attach(dev);
}
- up(&cp->pm_sem);
+ mutex_unlock(&cp->pm_mutex);
return 0;
}
#endif /* CONFIG_PM */
diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h
index 88063ef16cf..ab55c7ee101 100644
--- a/drivers/net/cassini.h
+++ b/drivers/net/cassini.h
@@ -4284,7 +4284,7 @@ struct cas {
* (ie. not power managed) */
int hw_running;
int opened;
- struct semaphore pm_sem; /* open/close/suspend/resume */
+ struct mutex pm_mutex; /* open/close/suspend/resume */
struct cas_init_block *init_block;
struct cas_tx_desc *init_txds[MAX_TX_RINGS];
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index f39de16e6b9..49cd096a3c3 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -920,7 +920,7 @@ e1000_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
#ifdef CONFIG_E1000_NAPI
for (i = 0; i < adapter->num_rx_queues; i++)
- __dev_put(&adapter->polling_netdev[i]);
+ dev_put(&adapter->polling_netdev[i]);
#endif
if (!e1000_check_phy_reset_block(&adapter->hw))
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index e67b1d06611..95e2bb8dd7b 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -118,6 +118,8 @@ static int do_pd_setup(struct fs_enet_private *fep)
/* Fill out IRQ field */
fep->interrupt = platform_get_irq(pdev, 0);
+ if (fep->interrupt < 0)
+ return -EINVAL;
/* Attach the memory for the FCC Parameter RAM */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram");
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index 2e8f4446969..3dad69dfdb2 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -144,6 +144,8 @@ static int do_pd_setup(struct fs_enet_private *fep)
/* Fill out IRQ field */
fep->interrupt = platform_get_irq_byname(pdev,"interrupt");
+ if (fep->interrupt < 0)
+ return -EINVAL;
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
fep->fec.fecp =(void*)r->start;
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index a3897fda71f..a772b286f96 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -118,6 +118,8 @@ static int do_pd_setup(struct fs_enet_private *fep)
/* Fill out IRQ field */
fep->interrupt = platform_get_irq_byname(pdev, "interrupt");
+ if (fep->interrupt < 0)
+ return -EINVAL;
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
fep->scc.sccp = (void *)r->start;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 0e8e3fcde9f..771e25d8c41 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -193,8 +193,12 @@ static int gfar_probe(struct platform_device *pdev)
priv->interruptTransmit = platform_get_irq_byname(pdev, "tx");
priv->interruptReceive = platform_get_irq_byname(pdev, "rx");
priv->interruptError = platform_get_irq_byname(pdev, "error");
+ if (priv->interruptTransmit < 0 || priv->interruptReceive < 0 || priv->interruptError < 0)
+ goto regs_fail;
} else {
priv->interruptTransmit = platform_get_irq(pdev, 0);
+ if (priv->interruptTransmit < 0)
+ goto regs_fail;
}
/* get a pointer to the register memory */
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index c81fe1c382d..5e6d0075299 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -64,6 +64,14 @@ config TEKRAM_DONGLE
dongles you will have to start irattach like this:
"irattach -d tekram".
+config TOIM3232_DONGLE
+ tristate "TOIM3232 IrDa dongle"
+ depends on DONGLE && IRDA
+ help
+ Say Y here if you want to build support for the Vishay/Temic
+ TOIM3232 and TOIM4232 based dongles.
+ To compile it as a module, choose M here.
+
config LITELINK_DONGLE
tristate "Parallax LiteLink dongle"
depends on DONGLE && IRDA
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index 72cbfdc9cfc..27ab75f2079 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_OLD_BELKIN_DONGLE) += old_belkin-sir.o
obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o
obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o
obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
+obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o
# The SIR helper module
sir-dev-objs := sir_dev.o sir_dongle.o sir_kthread.o
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 3137592d60c..910c0cab35b 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1778,7 +1778,7 @@ static struct pci_driver donauboe_pci_driver = {
static int __init
donauboe_init (void)
{
- return pci_module_init(&donauboe_pci_driver);
+ return pci_register_driver(&donauboe_pci_driver);
}
static void __exit
diff --git a/drivers/net/irda/ep7211_ir.c b/drivers/net/irda/ep7211_ir.c
index 31896262d21..4cba38f7e4a 100644
--- a/drivers/net/irda/ep7211_ir.c
+++ b/drivers/net/irda/ep7211_ir.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/tty.h>
#include <linux/init.h>
+#include <linux/spinlock.h>
#include <net/irda/irda.h>
#include <net/irda/irda_device.h>
@@ -23,6 +24,8 @@ static void ep7211_ir_close(dongle_t *self);
static int ep7211_ir_change_speed(struct irda_task *task);
static int ep7211_ir_reset(struct irda_task *task);
+static DEFINE_SPINLOCK(ep7211_lock);
+
static struct dongle_reg dongle = {
.type = IRDA_EP7211_IR,
.open = ep7211_ir_open,
@@ -36,7 +39,7 @@ static void ep7211_ir_open(dongle_t *self, struct qos_info *qos)
{
unsigned int syscon1, flags;
- save_flags(flags); cli();
+ spin_lock_irqsave(&ep7211_lock, flags);
/* Turn on the SIR encoder. */
syscon1 = clps_readl(SYSCON1);
@@ -46,14 +49,14 @@ static void ep7211_ir_open(dongle_t *self, struct qos_info *qos)
/* XXX: We should disable modem status interrupts on the first
UART (interrupt #14). */
- restore_flags(flags);
+ spin_unlock_irqrestore(&ep7211_lock, flags);
}
static void ep7211_ir_close(dongle_t *self)
{
unsigned int syscon1, flags;
- save_flags(flags); cli();
+ spin_lock_irqsave(&ep7211_lock, flags);
/* Turn off the SIR encoder. */
syscon1 = clps_readl(SYSCON1);
@@ -63,7 +66,7 @@ static void ep7211_ir_close(dongle_t *self)
/* XXX: If we've disabled the modem status interrupts, we should
reset them back to their original state. */
- restore_flags(flags);
+ spin_unlock_irqrestore(&ep7211_lock, flags);
}
/*
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 8936058a3cc..6e2ec56cde0 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -740,7 +740,7 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs)
struct sk_buff *newskb;
struct sk_buff *dataskb;
struct urb *next_urb;
- int docopy;
+ unsigned int len, docopy;
IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length);
@@ -851,10 +851,11 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs)
dataskb->dev = self->netdev;
dataskb->mac.raw = dataskb->data;
dataskb->protocol = htons(ETH_P_IRDA);
+ len = dataskb->len;
netif_rx(dataskb);
/* Keep stats up to date */
- self->stats.rx_bytes += dataskb->len;
+ self->stats.rx_bytes += len;
self->stats.rx_packets++;
self->netdev->last_rx = jiffies;
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 101750bf210..6a98b7ae497 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -33,6 +33,7 @@
#include <asm/uaccess.h>
#include <linux/smp_lock.h>
#include <linux/delay.h>
+#include <linux/mutex.h>
#include <net/irda/irda.h>
#include <net/irda/irda_device.h>
@@ -338,7 +339,7 @@ static inline void irtty_stop_receiver(struct tty_struct *tty, int stop)
/*****************************************************************/
/* serialize ldisc open/close with sir_dev */
-static DECLARE_MUTEX(irtty_sem);
+static DEFINE_MUTEX(irtty_mutex);
/* notifier from sir_dev when irda% device gets opened (ifup) */
@@ -348,11 +349,11 @@ static int irtty_start_dev(struct sir_dev *dev)
struct tty_struct *tty;
/* serialize with ldisc open/close */
- down(&irtty_sem);
+ mutex_lock(&irtty_mutex);
priv = dev->priv;
if (unlikely(!priv || priv->magic!=IRTTY_MAGIC)) {
- up(&irtty_sem);
+ mutex_unlock(&irtty_mutex);
return -ESTALE;
}
@@ -363,7 +364,7 @@ static int irtty_start_dev(struct sir_dev *dev)
/* Make sure we can receive more data */
irtty_stop_receiver(tty, FALSE);
- up(&irtty_sem);
+ mutex_unlock(&irtty_mutex);
return 0;
}
@@ -375,11 +376,11 @@ static int irtty_stop_dev(struct sir_dev *dev)
struct tty_struct *tty;
/* serialize with ldisc open/close */
- down(&irtty_sem);
+ mutex_lock(&irtty_mutex);
priv = dev->priv;
if (unlikely(!priv || priv->magic!=IRTTY_MAGIC)) {
- up(&irtty_sem);
+ mutex_unlock(&irtty_mutex);
return -ESTALE;
}
@@ -390,7 +391,7 @@ static int irtty_stop_dev(struct sir_dev *dev)
if (tty->driver->stop)
tty->driver->stop(tty);
- up(&irtty_sem);
+ mutex_unlock(&irtty_mutex);
return 0;
}
@@ -514,13 +515,13 @@ static int irtty_open(struct tty_struct *tty)
priv->dev = dev;
/* serialize with start_dev - in case we were racing with ifup */
- down(&irtty_sem);
+ mutex_lock(&irtty_mutex);
dev->priv = priv;
tty->disc_data = priv;
tty->receive_room = 65536;
- up(&irtty_sem);
+ mutex_unlock(&irtty_mutex);
IRDA_DEBUG(0, "%s - %s: irda line discipline opened\n", __FUNCTION__, tty->name);
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index ee717d0e939..83141a3ff54 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -12,6 +12,7 @@
* Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
* Copyright (c) 1998 Lichen Wang, <lwang@actisys.com>
* Copyright (c) 1998 Actisys Corp., www.actisys.com
+ * Copyright (c) 2000-2004 Jean Tourrilhes <jt@hpl.hp.com>
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or
@@ -53,14 +54,13 @@
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include <linux/dma-mapping.h>
+#include <linux/pnp.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/byteorder.h>
-#include <linux/pm.h>
-#include <linux/pm_legacy.h>
-
#include <net/irda/wrapper.h>
#include <net/irda/irda.h>
#include <net/irda/irda_device.h>
@@ -72,14 +72,27 @@
static char *driver_name = "nsc-ircc";
+/* Power Management */
+#define NSC_IRCC_DRIVER_NAME "nsc-ircc"
+static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state);
+static int nsc_ircc_resume(struct platform_device *dev);
+
+static struct platform_driver nsc_ircc_driver = {
+ .suspend = nsc_ircc_suspend,
+ .resume = nsc_ircc_resume,
+ .driver = {
+ .name = NSC_IRCC_DRIVER_NAME,
+ },
+};
+
/* Module parameters */
static int qos_mtt_bits = 0x07; /* 1 ms or more */
static int dongle_id;
/* Use BIOS settions by default, but user may supply module parameters */
-static unsigned int io[] = { ~0, ~0, ~0, ~0 };
-static unsigned int irq[] = { 0, 0, 0, 0, 0 };
-static unsigned int dma[] = { 0, 0, 0, 0, 0 };
+static unsigned int io[] = { ~0, ~0, ~0, ~0, ~0 };
+static unsigned int irq[] = { 0, 0, 0, 0, 0 };
+static unsigned int dma[] = { 0, 0, 0, 0, 0 };
static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info);
static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info);
@@ -87,6 +100,7 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info);
static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info);
static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info);
static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info);
+static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id);
/* These are the known NSC chips */
static nsc_chip_t chips[] = {
@@ -101,11 +115,12 @@ static nsc_chip_t chips[] = {
/* Contributed by Jan Frey - IBM A30/A31 */
{ "PC8739x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xea, 0xff,
nsc_ircc_probe_39x, nsc_ircc_init_39x },
+ { "IBM", { 0x2e, 0x4e, 0x0 }, 0x20, 0xf4, 0xff,
+ nsc_ircc_probe_39x, nsc_ircc_init_39x },
{ NULL }
};
-/* Max 4 instances for now */
-static struct nsc_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL };
+static struct nsc_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL, NULL };
static char *dongle_types[] = {
"Differential serial interface",
@@ -126,8 +141,24 @@ static char *dongle_types[] = {
"No dongle connected",
};
+/* PNP probing */
+static chipio_t pnp_info;
+static const struct pnp_device_id nsc_ircc_pnp_table[] = {
+ { .id = "NSC6001", .driver_data = 0 },
+ { .id = "IBM0071", .driver_data = 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(pnp, nsc_ircc_pnp_table);
+
+static struct pnp_driver nsc_ircc_pnp_driver = {
+ .name = "nsc-ircc",
+ .id_table = nsc_ircc_pnp_table,
+ .probe = nsc_ircc_pnp_probe,
+};
+
/* Some prototypes */
-static int nsc_ircc_open(int i, chipio_t *info);
+static int nsc_ircc_open(chipio_t *info);
static int nsc_ircc_close(struct nsc_ircc_cb *self);
static int nsc_ircc_setup(chipio_t *info);
static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self);
@@ -146,7 +177,10 @@ static int nsc_ircc_net_open(struct net_device *dev);
static int nsc_ircc_net_close(struct net_device *dev);
static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev);
-static int nsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data);
+
+/* Globals */
+static int pnp_registered;
+static int pnp_succeeded;
/*
* Function nsc_ircc_init ()
@@ -158,28 +192,36 @@ static int __init nsc_ircc_init(void)
{
chipio_t info;
nsc_chip_t *chip;
- int ret = -ENODEV;
+ int ret;
int cfg_base;
int cfg, id;
int reg;
int i = 0;
+ ret = platform_driver_register(&nsc_ircc_driver);
+ if (ret) {
+ IRDA_ERROR("%s, Can't register driver!\n", driver_name);
+ return ret;
+ }
+
+ /* Register with PnP subsystem to detect disable ports */
+ ret = pnp_register_driver(&nsc_ircc_pnp_driver);
+
+ if (ret >= 0)
+ pnp_registered = 1;
+
+ ret = -ENODEV;
+
/* Probe for all the NSC chipsets we know about */
- for (chip=chips; chip->name ; chip++) {
+ for (chip = chips; chip->name ; chip++) {
IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __FUNCTION__,
chip->name);
/* Try all config registers for this chip */
- for (cfg=0; cfg<3; cfg++) {
+ for (cfg = 0; cfg < ARRAY_SIZE(chip->cfg); cfg++) {
cfg_base = chip->cfg[cfg];
if (!cfg_base)
continue;
-
- memset(&info, 0, sizeof(chipio_t));
- info.cfg_base = cfg_base;
- info.fir_base = io[i];
- info.dma = dma[i];
- info.irq = irq[i];
/* Read index register */
reg = inb(cfg_base);
@@ -194,24 +236,65 @@ static int __init nsc_ircc_init(void)
if ((id & chip->cid_mask) == chip->cid_value) {
IRDA_DEBUG(2, "%s() Found %s chip, revision=%d\n",
__FUNCTION__, chip->name, id & ~chip->cid_mask);
- /*
- * If the user supplies the base address, then
- * we init the chip, if not we probe the values
- * set by the BIOS
- */
- if (io[i] < 0x2000) {
- chip->init(chip, &info);
- } else
- chip->probe(chip, &info);
- if (nsc_ircc_open(i, &info) == 0)
- ret = 0;
+ /*
+ * If we found a correct PnP setting,
+ * we first try it.
+ */
+ if (pnp_succeeded) {
+ memset(&info, 0, sizeof(chipio_t));
+ info.cfg_base = cfg_base;
+ info.fir_base = pnp_info.fir_base;
+ info.dma = pnp_info.dma;
+ info.irq = pnp_info.irq;
+
+ if (info.fir_base < 0x2000) {
+ IRDA_MESSAGE("%s, chip->init\n", driver_name);
+ chip->init(chip, &info);
+ } else
+ chip->probe(chip, &info);
+
+ if (nsc_ircc_open(&info) >= 0)
+ ret = 0;
+ }
+
+ /*
+ * Opening based on PnP values failed.
+ * Let's fallback to user values, or probe
+ * the chip.
+ */
+ if (ret) {
+ IRDA_DEBUG(2, "%s, PnP init failed\n", driver_name);
+ memset(&info, 0, sizeof(chipio_t));
+ info.cfg_base = cfg_base;
+ info.fir_base = io[i];
+ info.dma = dma[i];
+ info.irq = irq[i];
+
+ /*
+ * If the user supplies the base address, then
+ * we init the chip, if not we probe the values
+ * set by the BIOS
+ */
+ if (io[i] < 0x2000) {
+ chip->init(chip, &info);
+ } else
+ chip->probe(chip, &info);
+
+ if (nsc_ircc_open(&info) >= 0)
+ ret = 0;
+ }
i++;
} else {
IRDA_DEBUG(2, "%s(), Wrong chip id=0x%02x\n", __FUNCTION__, id);
}
}
-
+ }
+
+ if (ret) {
+ platform_driver_unregister(&nsc_ircc_driver);
+ pnp_unregister_driver(&nsc_ircc_pnp_driver);
+ pnp_registered = 0;
}
return ret;
@@ -227,12 +310,17 @@ static void __exit nsc_ircc_cleanup(void)
{
int i;
- pm_unregister_all(nsc_ircc_pmproc);
-
- for (i=0; i < 4; i++) {
+ for (i = 0; i < ARRAY_SIZE(dev_self); i++) {
if (dev_self[i])
nsc_ircc_close(dev_self[i]);
}
+
+ platform_driver_unregister(&nsc_ircc_driver);
+
+ if (pnp_registered)
+ pnp_unregister_driver(&nsc_ircc_pnp_driver);
+
+ pnp_registered = 0;
}
/*
@@ -241,16 +329,26 @@ static void __exit nsc_ircc_cleanup(void)
* Open driver instance
*
*/
-static int __init nsc_ircc_open(int i, chipio_t *info)
+static int __init nsc_ircc_open(chipio_t *info)
{
struct net_device *dev;
struct nsc_ircc_cb *self;
- struct pm_dev *pmdev;
void *ret;
- int err;
+ int err, chip_index;
IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ for (chip_index = 0; chip_index < ARRAY_SIZE(dev_self); chip_index++) {
+ if (!dev_self[chip_index])
+ break;
+ }
+
+ if (chip_index == ARRAY_SIZE(dev_self)) {
+ IRDA_ERROR("%s(), maximum number of supported chips reached!\n", __FUNCTION__);
+ return -ENOMEM;
+ }
+
IRDA_MESSAGE("%s, Found chip at base=0x%03x\n", driver_name,
info->cfg_base);
@@ -271,8 +369,8 @@ static int __init nsc_ircc_open(int i, chipio_t *info)
spin_lock_init(&self->lock);
/* Need to store self somewhere */
- dev_self[i] = self;
- self->index = i;
+ dev_self[chip_index] = self;
+ self->index = chip_index;
/* Initialize IO */
self->io.cfg_base = info->cfg_base;
@@ -351,7 +449,7 @@ static int __init nsc_ircc_open(int i, chipio_t *info)
/* Check if user has supplied a valid dongle id or not */
if ((dongle_id <= 0) ||
- (dongle_id >= (sizeof(dongle_types) / sizeof(dongle_types[0]))) ) {
+ (dongle_id >= ARRAY_SIZE(dongle_types))) {
dongle_id = nsc_ircc_read_dongle_id(self->io.fir_base);
IRDA_MESSAGE("%s, Found dongle: %s\n", driver_name,
@@ -364,11 +462,18 @@ static int __init nsc_ircc_open(int i, chipio_t *info)
self->io.dongle_id = dongle_id;
nsc_ircc_init_dongle_interface(self->io.fir_base, dongle_id);
- pmdev = pm_register(PM_SYS_DEV, PM_SYS_IRDA, nsc_ircc_pmproc);
- if (pmdev)
- pmdev->data = self;
+ self->pldev = platform_device_register_simple(NSC_IRCC_DRIVER_NAME,
+ self->index, NULL, 0);
+ if (IS_ERR(self->pldev)) {
+ err = PTR_ERR(self->pldev);
+ goto out5;
+ }
+ platform_set_drvdata(self->pldev, self);
- return 0;
+ return chip_index;
+
+ out5:
+ unregister_netdev(dev);
out4:
dma_free_coherent(NULL, self->tx_buff.truesize,
self->tx_buff.head, self->tx_buff_dma);
@@ -379,7 +484,7 @@ static int __init nsc_ircc_open(int i, chipio_t *info)
release_region(self->io.fir_base, self->io.fir_ext);
out1:
free_netdev(dev);
- dev_self[i] = NULL;
+ dev_self[chip_index] = NULL;
return err;
}
@@ -399,6 +504,8 @@ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self)
iobase = self->io.fir_base;
+ platform_device_unregister(self->pldev);
+
/* Remove netdevice */
unregister_netdev(self->netdev);
@@ -806,6 +913,43 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info)
return 0;
}
+/* PNP probing */
+static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id)
+{
+ memset(&pnp_info, 0, sizeof(chipio_t));
+ pnp_info.irq = -1;
+ pnp_info.dma = -1;
+ pnp_succeeded = 1;
+
+ /* There don't seem to be any way to get the cfg_base.
+ * On my box, cfg_base is in the PnP descriptor of the
+ * motherboard. Oh well... Jean II */
+
+ if (pnp_port_valid(dev, 0) &&
+ !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED))
+ pnp_info.fir_base = pnp_port_start(dev, 0);
+
+ if (pnp_irq_valid(dev, 0) &&
+ !(pnp_irq_flags(dev, 0) & IORESOURCE_DISABLED))
+ pnp_info.irq = pnp_irq(dev, 0);
+
+ if (pnp_dma_valid(dev, 0) &&
+ !(pnp_dma_flags(dev, 0) & IORESOURCE_DISABLED))
+ pnp_info.dma = pnp_dma(dev, 0);
+
+ IRDA_DEBUG(0, "%s() : From PnP, found firbase 0x%03X ; irq %d ; dma %d.\n",
+ __FUNCTION__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma);
+
+ if((pnp_info.fir_base == 0) ||
+ (pnp_info.irq == -1) || (pnp_info.dma == -1)) {
+ /* Returning an error will disable the device. Yuck ! */
+ //return -EINVAL;
+ pnp_succeeded = 0;
+ }
+
+ return 0;
+}
+
/*
* Function nsc_ircc_setup (info)
*
@@ -2161,45 +2305,83 @@ static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev)
return &self->stats;
}
-static void nsc_ircc_suspend(struct nsc_ircc_cb *self)
+static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state)
{
- IRDA_MESSAGE("%s, Suspending\n", driver_name);
+ struct nsc_ircc_cb *self = platform_get_drvdata(dev);
+ int bank;
+ unsigned long flags;
+ int iobase = self->io.fir_base;
if (self->io.suspended)
- return;
+ return 0;
- nsc_ircc_net_close(self->netdev);
+ IRDA_DEBUG(1, "%s, Suspending\n", driver_name);
+ rtnl_lock();
+ if (netif_running(self->netdev)) {
+ netif_device_detach(self->netdev);
+ spin_lock_irqsave(&self->lock, flags);
+ /* Save current bank */
+ bank = inb(iobase+BSR);
+
+ /* Disable interrupts */
+ switch_bank(iobase, BANK0);
+ outb(0, iobase+IER);
+
+ /* Restore bank register */
+ outb(bank, iobase+BSR);
+
+ spin_unlock_irqrestore(&self->lock, flags);
+ free_irq(self->io.irq, self->netdev);
+ disable_dma(self->io.dma);
+ }
self->io.suspended = 1;
+ rtnl_unlock();
+
+ return 0;
}
-static void nsc_ircc_wakeup(struct nsc_ircc_cb *self)
+static int nsc_ircc_resume(struct platform_device *dev)
{
+ struct nsc_ircc_cb *self = platform_get_drvdata(dev);
+ unsigned long flags;
+
if (!self->io.suspended)
- return;
+ return 0;
+ IRDA_DEBUG(1, "%s, Waking up\n", driver_name);
+
+ rtnl_lock();
nsc_ircc_setup(&self->io);
- nsc_ircc_net_open(self->netdev);
-
- IRDA_MESSAGE("%s, Waking up\n", driver_name);
+ nsc_ircc_init_dongle_interface(self->io.fir_base, self->io.dongle_id);
+ if (netif_running(self->netdev)) {
+ if (request_irq(self->io.irq, nsc_ircc_interrupt, 0,
+ self->netdev->name, self->netdev)) {
+ IRDA_WARNING("%s, unable to allocate irq=%d\n",
+ driver_name, self->io.irq);
+
+ /*
+ * Don't fail resume process, just kill this
+ * network interface
+ */
+ unregister_netdevice(self->netdev);
+ } else {
+ spin_lock_irqsave(&self->lock, flags);
+ nsc_ircc_change_speed(self, self->io.speed);
+ spin_unlock_irqrestore(&self->lock, flags);
+ netif_device_attach(self->netdev);
+ }
+
+ } else {
+ spin_lock_irqsave(&self->lock, flags);
+ nsc_ircc_change_speed(self, 9600);
+ spin_unlock_irqrestore(&self->lock, flags);
+ }
self->io.suspended = 0;
-}
+ rtnl_unlock();
-static int nsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data)
-{
- struct nsc_ircc_cb *self = (struct nsc_ircc_cb*) dev->data;
- if (self) {
- switch (rqst) {
- case PM_SUSPEND:
- nsc_ircc_suspend(self);
- break;
- case PM_RESUME:
- nsc_ircc_wakeup(self);
- break;
- }
- }
- return 0;
+ return 0;
}
MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
diff --git a/drivers/net/irda/nsc-ircc.h b/drivers/net/irda/nsc-ircc.h
index 6edf7e51462..dacf671abcd 100644
--- a/drivers/net/irda/nsc-ircc.h
+++ b/drivers/net/irda/nsc-ircc.h
@@ -269,7 +269,7 @@ struct nsc_ircc_cb {
__u32 new_speed;
int index; /* Instance index */
- struct pm_dev *dev;
+ struct platform_device *pldev;
};
static inline void switch_bank(int iobase, int bank)
diff --git a/drivers/net/irda/sir_dongle.c b/drivers/net/irda/sir_dongle.c
index 8d225921ae7..d7e32d9554f 100644
--- a/drivers/net/irda/sir_dongle.c
+++ b/drivers/net/irda/sir_dongle.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/smp_lock.h>
#include <linux/kmod.h>
+#include <linux/mutex.h>
#include <net/irda/irda.h>
@@ -28,7 +29,7 @@
*/
static LIST_HEAD(dongle_list); /* list of registered dongle drivers */
-static DECLARE_MUTEX(dongle_list_lock); /* protects the list */
+static DEFINE_MUTEX(dongle_list_lock); /* protects the list */
int irda_register_dongle(struct dongle_driver *new)
{
@@ -38,25 +39,25 @@ int irda_register_dongle(struct dongle_driver *new)
IRDA_DEBUG(0, "%s : registering dongle \"%s\" (%d).\n",
__FUNCTION__, new->driver_name, new->type);
- down(&dongle_list_lock);
+ mutex_lock(&dongle_list_lock);
list_for_each(entry, &dongle_list) {
drv = list_entry(entry, struct dongle_driver, dongle_list);
if (new->type == drv->type) {
- up(&dongle_list_lock);
+ mutex_unlock(&dongle_list_lock);
return -EEXIST;
}
}
list_add(&new->dongle_list, &dongle_list);
- up(&dongle_list_lock);
+ mutex_unlock(&dongle_list_lock);
return 0;
}
EXPORT_SYMBOL(irda_register_dongle);
int irda_unregister_dongle(struct dongle_driver *drv)
{
- down(&dongle_list_lock);
+ mutex_lock(&dongle_list_lock);
list_del(&drv->dongle_list);
- up(&dongle_list_lock);
+ mutex_unlock(&dongle_list_lock);
return 0;
}
EXPORT_SYMBOL(irda_unregister_dongle);
@@ -75,7 +76,7 @@ int sirdev_get_dongle(struct sir_dev *dev, IRDA_DONGLE type)
return -EBUSY;
/* serialize access to the list of registered dongles */
- down(&dongle_list_lock);
+ mutex_lock(&dongle_list_lock);
list_for_each(entry, &dongle_list) {
drv = list_entry(entry, struct dongle_driver, dongle_list);
@@ -109,14 +110,14 @@ int sirdev_get_dongle(struct sir_dev *dev, IRDA_DONGLE type)
if (!drv->open || (err=drv->open(dev))!=0)
goto out_reject; /* failed to open driver */
- up(&dongle_list_lock);
+ mutex_unlock(&dongle_list_lock);
return 0;
out_reject:
dev->dongle_drv = NULL;
module_put(drv->owner);
out_unlock:
- up(&dongle_list_lock);
+ mutex_unlock(&dongle_list_lock);
return err;
}
diff --git a/drivers/net/irda/toim3232-sir.c b/drivers/net/irda/toim3232-sir.c
new file mode 100644
index 00000000000..aa1a9b0ed83
--- /dev/null
+++ b/drivers/net/irda/toim3232-sir.c
@@ -0,0 +1,375 @@
+/*********************************************************************
+ *
+ * Filename: toim3232-sir.c
+ * Version: 1.0
+ * Description: Implementation of dongles based on the Vishay/Temic
+ * TOIM3232 SIR Endec chipset. Currently only the
+ * IRWave IR320ST-2 is tested, although it should work
+ * with any TOIM3232 or TOIM4232 chipset based RS232
+ * dongle with minimal modification.
+ * Based heavily on the Tekram driver (tekram.c),
+ * with thanks to Dag Brattli and Martin Diehl.
+ * Status: Experimental.
+ * Author: David Basden <davidb-irda@rcpt.to>
+ * Created at: Thu Feb 09 23:47:32 2006
+ *
+ * Copyright (c) 2006 David Basden.
+ * Copyright (c) 1998-1999 Dag Brattli,
+ * Copyright (c) 2002 Martin Diehl,
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * Neither Dag Brattli nor University of Tromsø admit liability nor
+ * provide warranty for any of this software. This material is
+ * provided "AS-IS" and at no charge.
+ *
+ ********************************************************************/
+
+/*
+ * This driver has currently only been tested on the IRWave IR320ST-2
+ *
+ * PROTOCOL:
+ *
+ * The protocol for talking to the TOIM3232 is quite easy, and is
+ * designed to interface with RS232 with only level convertors. The
+ * BR/~D line on the chip is brought high to signal 'command mode',
+ * where a command byte is sent to select the baudrate of the RS232
+ * interface and the pulse length of the IRDA output. When BR/~D
+ * is brought low, the dongle then changes to the selected baudrate,
+ * and the RS232 interface is used for data until BR/~D is brought
+ * high again. The initial speed for the TOIMx323 after RESET is
+ * 9600 baud. The baudrate for command-mode is the last selected
+ * baud-rate, or 9600 after a RESET.
+ *
+ * The dongle I have (below) adds some extra hardware on the front end,
+ * but this is mostly directed towards pariasitic power from the RS232
+ * line rather than changing very much about how to communicate with
+ * the TOIM3232.
+ *
+ * The protocol to talk to the TOIM4232 chipset seems to be almost
+ * identical to the TOIM3232 (and the 4232 datasheet is more detailed)
+ * so this code will probably work on that as well, although I haven't
+ * tested it on that hardware.
+ *
+ * Target dongle variations that might be common:
+ *
+ * DTR and RTS function:
+ * The data sheet for the 4232 has a sample implementation that hooks the
+ * DTR and RTS lines to the RESET and BaudRate/~Data lines of the
+ * chip (through line-converters). Given both DTR and RTS would have to
+ * be held low in normal operation, and the TOIMx232 requires +5V to
+ * signal ground, most dongle designers would almost certainly choose
+ * an implementation that kept at least one of DTR or RTS high in
+ * normal operation to provide power to the dongle, but will likely
+ * vary between designs.
+ *
+ * User specified command bits:
+ * There are two user-controllable output lines from the TOIMx232 that
+ * can be set low or high by setting the appropriate bits in the
+ * high-nibble of the command byte (when setting speed and pulse length).
+ * These might be used to switch on and off added hardware or extra
+ * dongle features.
+ *
+ *
+ * Target hardware: IRWave IR320ST-2
+ *
+ * The IRWave IR320ST-2 is a simple dongle based on the Vishay/Temic
+ * TOIM3232 SIR Endec and the Vishay/Temic TFDS4500 SIR IRDA transciever.
+ * It uses a hex inverter and some discrete components to buffer and
+ * line convert the RS232 down to 5V.
+ *
+ * The dongle is powered through a voltage regulator, fed by a large
+ * capacitor. To switch the dongle on, DTR is brought high to charge
+ * the capacitor and drive the voltage regulator. DTR isn't associated
+ * with any control lines on the TOIM3232. Parisitic power is also taken
+ * from the RTS, TD and RD lines when brought high, but through resistors.
+ * When DTR is low, the circuit might lose power even with RTS high.
+ *
+ * RTS is inverted and attached to the BR/~D input pin. When RTS
+ * is high, BR/~D is low, and the TOIM3232 is in the normal 'data' mode.
+ * RTS is brought low, BR/~D is high, and the TOIM3232 is in 'command
+ * mode'.
+ *
+ * For some unknown reason, the RESET line isn't actually connected
+ * to anything. This means to reset the dongle to get it to a known
+ * state (9600 baud) you must drop DTR and RTS low, wait for the power
+ * capacitor to discharge, and then bring DTR (and RTS for data mode)
+ * high again, and wait for the capacitor to charge, the power supply
+ * to stabilise, and the oscillator clock to stabilise.
+ *
+ * Fortunately, if the current baudrate is known, the chipset can
+ * easily change speed by entering command mode without having to
+ * reset the dongle first.
+ *
+ * Major Components:
+ *
+ * - Vishay/Temic TOIM3232 SIR Endec to change RS232 pulse timings
+ * to IRDA pulse timings
+ * - 3.6864MHz crystal to drive TOIM3232 clock oscillator
+ * - DM74lS04M Inverting Hex line buffer for RS232 input buffering
+ * and level conversion
+ * - PJ2951AC 150mA voltage regulator
+ * - Vishay/Temic TFDS4500 SIR IRDA front-end transceiver
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <net/irda/irda.h>
+
+#include "sir-dev.h"
+
+static int toim3232delay = 150; /* default is 150 ms */
+module_param(toim3232delay, int, 0);
+MODULE_PARM_DESC(toim3232delay, "toim3232 dongle write complete delay");
+
+#if 0
+static int toim3232flipdtr = 0; /* default is DTR high to reset */
+module_param(toim3232flipdtr, int, 0);
+MODULE_PARM_DESC(toim3232flipdtr, "toim3232 dongle invert DTR (Reset)");
+
+static int toim3232fliprts = 0; /* default is RTS high for baud change */
+module_param(toim3232fliptrs, int, 0);
+MODULE_PARM_DESC(toim3232fliprts, "toim3232 dongle invert RTS (BR/D)");
+#endif
+
+static int toim3232_open(struct sir_dev *);
+static int toim3232_close(struct sir_dev *);
+static int toim3232_change_speed(struct sir_dev *, unsigned);
+static int toim3232_reset(struct sir_dev *);
+
+#define TOIM3232_115200 0x00
+#define TOIM3232_57600 0x01
+#define TOIM3232_38400 0x02
+#define TOIM3232_19200 0x03
+#define TOIM3232_9600 0x06
+#define TOIM3232_2400 0x0A
+
+#define TOIM3232_PW 0x10 /* Pulse select bit */
+
+static struct dongle_driver toim3232 = {
+ .owner = THIS_MODULE,
+ .driver_name = "Vishay TOIM3232",
+ .type = IRDA_TOIM3232_DONGLE,
+ .open = toim3232_open,
+ .close = toim3232_close,
+ .reset = toim3232_reset,
+ .set_speed = toim3232_change_speed,
+};
+
+static int __init toim3232_sir_init(void)
+{
+ if (toim3232delay < 1 || toim3232delay > 500)
+ toim3232delay = 200;
+ IRDA_DEBUG(1, "%s - using %d ms delay\n",
+ toim3232.driver_name, toim3232delay);
+ return irda_register_dongle(&toim3232);
+}
+
+static void __exit toim3232_sir_cleanup(void)
+{
+ irda_unregister_dongle(&toim3232);
+}
+
+static int toim3232_open(struct sir_dev *dev)
+{
+ struct qos_info *qos = &dev->qos;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* Pull the lines high to start with.
+ *
+ * For the IR320ST-2, we need to charge the main supply capacitor to
+ * switch the device on. We keep DTR high throughout to do this.
+ * When RTS, TD and RD are high, they will also trickle-charge the
+ * cap. RTS is high for data transmission, and low for baud rate select.
+ * -- DGB
+ */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* The TOI3232 supports many speeds between 1200bps and 115000bps.
+ * We really only care about those supported by the IRDA spec, but
+ * 38400 seems to be implemented in many places */
+ qos->baud_rate.bits &= IR_2400|IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
+
+ /* From the tekram driver. Not sure what a reasonable value is -- DGB */
+ qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */
+ irda_qos_bits_to_value(qos);
+
+ /* irda thread waits 50 msec for power settling */
+
+ return 0;
+}
+
+static int toim3232_close(struct sir_dev *dev)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* Power off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ return 0;
+}
+
+/*
+ * Function toim3232change_speed (dev, state, speed)
+ *
+ * Set the speed for the TOIM3232 based dongle. Warning, this
+ * function must be called with a process context!
+ *
+ * Algorithm
+ * 1. keep DTR high but clear RTS to bring into baud programming mode
+ * 2. wait at least 7us to enter programming mode
+ * 3. send control word to set baud rate and timing
+ * 4. wait at least 1us
+ * 5. bring RTS high to enter DATA mode (RS232 is passed through to transceiver)
+ * 6. should take effect immediately (although probably worth waiting)
+ */
+
+#define TOIM3232_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED + 1)
+
+static int toim3232_change_speed(struct sir_dev *dev, unsigned speed)
+{
+ unsigned state = dev->fsm.substate;
+ unsigned delay = 0;
+ u8 byte;
+ static int ret = 0;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ switch(state) {
+ case SIRDEV_STATE_DONGLE_SPEED:
+
+ /* Figure out what we are going to send as a control byte */
+ switch (speed) {
+ case 2400:
+ byte = TOIM3232_PW|TOIM3232_2400;
+ break;
+ default:
+ speed = 9600;
+ ret = -EINVAL;
+ /* fall thru */
+ case 9600:
+ byte = TOIM3232_PW|TOIM3232_9600;
+ break;
+ case 19200:
+ byte = TOIM3232_PW|TOIM3232_19200;
+ break;
+ case 38400:
+ byte = TOIM3232_PW|TOIM3232_38400;
+ break;
+ case 57600:
+ byte = TOIM3232_PW|TOIM3232_57600;
+ break;
+ case 115200:
+ byte = TOIM3232_115200;
+ break;
+ }
+
+ /* Set DTR, Clear RTS: Go into baud programming mode */
+ sirdev_set_dtr_rts(dev, TRUE, FALSE);
+
+ /* Wait at least 7us */
+ udelay(14);
+
+ /* Write control byte */
+ sirdev_raw_write(dev, &byte, 1);
+
+ dev->speed = speed;
+
+ state = TOIM3232_STATE_WAIT_SPEED;
+ delay = toim3232delay;
+ break;
+
+ case TOIM3232_STATE_WAIT_SPEED:
+ /* Have transmitted control byte * Wait for 'at least 1us' */
+ udelay(14);
+
+ /* Set DTR, Set RTS: Go into normal data mode */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Wait (TODO: check this is needed) */
+ udelay(50);
+ break;
+
+ default:
+ printk(KERN_ERR "%s - undefined state %d\n", __FUNCTION__, state);
+ ret = -EINVAL;
+ break;
+ }
+
+ dev->fsm.substate = state;
+ return (delay > 0) ? delay : ret;
+}
+
+/*
+ * Function toim3232reset (driver)
+ *
+ * This function resets the toim3232 dongle. Warning, this function
+ * must be called with a process context!!
+ *
+ * What we should do is:
+ * 0. Pull RESET high
+ * 1. Wait for at least 7us
+ * 2. Pull RESET low
+ * 3. Wait for at least 7us
+ * 4. Pull BR/~D high
+ * 5. Wait for at least 7us
+ * 6. Send control byte to set baud rate
+ * 7. Wait at least 1us after stop bit
+ * 8. Pull BR/~D low
+ * 9. Should then be in data mode
+ *
+ * Because the IR320ST-2 doesn't have the RESET line connected for some reason,
+ * we'll have to do something else.
+ *
+ * The default speed after a RESET is 9600, so lets try just bringing it up in
+ * data mode after switching it off, waiting for the supply capacitor to
+ * discharge, and then switch it back on. This isn't actually pulling RESET
+ * high, but it seems to have the same effect.
+ *
+ * This behaviour will probably work on dongles that have the RESET line connected,
+ * but if not, add a flag for the IR320ST-2, and implment the above-listed proper
+ * behaviour.
+ *
+ * RTS is inverted and then fed to BR/~D, so to put it in programming mode, we
+ * need to have pull RTS low
+ */
+
+static int toim3232_reset(struct sir_dev *dev)
+{
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* Switch off both DTR and RTS to switch off dongle */
+ sirdev_set_dtr_rts(dev, FALSE, FALSE);
+
+ /* Should sleep a while. This might be evil doing it this way.*/
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(50));
+
+ /* Set DTR, Set RTS (data mode) */
+ sirdev_set_dtr_rts(dev, TRUE, TRUE);
+
+ /* Wait at least 10 ms for power to stabilize again */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(10));
+
+ /* Speed should now be 9600 */
+ dev->speed = 9600;
+
+ return 0;
+}
+
+MODULE_AUTHOR("David Basden <davidb-linux@rcpt.to>");
+MODULE_DESCRIPTION("Vishay/Temic TOIM3232 based dongle driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("irda-dongle-12"); /* IRDA_TOIM3232_DONGLE */
+
+module_init(toim3232_sir_init);
+module_exit(toim3232_sir_cleanup);
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index a9f49f058cf..97a49e0be76 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -1887,7 +1887,7 @@ static int __init vlsi_mod_init(void)
vlsi_proc_root->owner = THIS_MODULE;
}
- ret = pci_module_init(&vlsi_irda_driver);
+ ret = pci_register_driver(&vlsi_irda_driver);
if (ret && vlsi_proc_root)
remove_proc_entry(PROC_DIR, NULL);
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h
index 7754d1974b9..4262c1da6d4 100644
--- a/drivers/net/mv643xx_eth.h
+++ b/drivers/net/mv643xx_eth.h
@@ -42,13 +42,23 @@
#define MAX_DESCS_PER_SKB 1
#endif
+/*
+ * The MV643XX HW requires 8-byte alignment. However, when I/O
+ * is non-cache-coherent, we need to ensure that the I/O buffers
+ * we use don't share cache lines with other data.
+ */
+#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_NOT_COHERENT_CACHE)
+#define ETH_DMA_ALIGN L1_CACHE_BYTES
+#else
+#define ETH_DMA_ALIGN 8
+#endif
+
#define ETH_VLAN_HLEN 4
#define ETH_FCS_LEN 4
-#define ETH_DMA_ALIGN 8 /* hw requires 8-byte alignment */
-#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
+#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */
#define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \
- ETH_VLAN_HLEN + ETH_FCS_LEN)
-#define ETH_RX_SKB_SIZE ((dev->mtu + ETH_WRAPPER_LEN + 7) & ~0x7)
+ ETH_VLAN_HLEN + ETH_FCS_LEN)
+#define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + ETH_DMA_ALIGN)
#define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */
#define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 7e900572eaf..9595f74da93 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -22,12 +22,12 @@
*************************************************************************/
#define DRV_NAME "pcnet32"
-#define DRV_VERSION "1.31c"
-#define DRV_RELDATE "01.Nov.2005"
+#define DRV_VERSION "1.32"
+#define DRV_RELDATE "18.Mar.2006"
#define PFX DRV_NAME ": "
-static const char * const version =
-DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
+static const char *const version =
+ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
#include <linux/module.h>
#include <linux/kernel.h>
@@ -58,18 +58,23 @@ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
* PCI device identifiers for "new style" Linux PCI Device Drivers
*/
static struct pci_device_id pcnet32_pci_tbl[] = {
- { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- /*
- * Adapters that were sold with IBM's RS/6000 or pSeries hardware have
- * the incorrect vendor id.
- */
- { PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE, PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, 0 },
- { 0, }
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+
+ /*
+ * Adapters that were sold with IBM's RS/6000 or pSeries hardware have
+ * the incorrect vendor id.
+ */
+ { PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, 0},
+
+ { } /* terminate list */
};
-MODULE_DEVICE_TABLE (pci, pcnet32_pci_tbl);
+MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl);
static int cards_found;
@@ -77,13 +82,11 @@ static int cards_found;
* VLB I/O addresses
*/
static unsigned int pcnet32_portlist[] __initdata =
- { 0x300, 0x320, 0x340, 0x360, 0 };
-
-
+ { 0x300, 0x320, 0x340, 0x360, 0 };
static int pcnet32_debug = 0;
-static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
-static int pcnet32vlb; /* check for VLB cards ? */
+static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
+static int pcnet32vlb; /* check for VLB cards ? */
static struct net_device *pcnet32_dev;
@@ -110,32 +113,34 @@ static int rx_copybreak = 200;
* to internal options
*/
static const unsigned char options_mapping[] = {
- PCNET32_PORT_ASEL, /* 0 Auto-select */
- PCNET32_PORT_AUI, /* 1 BNC/AUI */
- PCNET32_PORT_AUI, /* 2 AUI/BNC */
- PCNET32_PORT_ASEL, /* 3 not supported */
- PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */
- PCNET32_PORT_ASEL, /* 5 not supported */
- PCNET32_PORT_ASEL, /* 6 not supported */
- PCNET32_PORT_ASEL, /* 7 not supported */
- PCNET32_PORT_ASEL, /* 8 not supported */
- PCNET32_PORT_MII, /* 9 MII 10baseT */
- PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */
- PCNET32_PORT_MII, /* 11 MII (autosel) */
- PCNET32_PORT_10BT, /* 12 10BaseT */
- PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */
- PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD, /* 14 MII 100BaseTx-FD */
- PCNET32_PORT_ASEL /* 15 not supported */
+ PCNET32_PORT_ASEL, /* 0 Auto-select */
+ PCNET32_PORT_AUI, /* 1 BNC/AUI */
+ PCNET32_PORT_AUI, /* 2 AUI/BNC */
+ PCNET32_PORT_ASEL, /* 3 not supported */
+ PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */
+ PCNET32_PORT_ASEL, /* 5 not supported */
+ PCNET32_PORT_ASEL, /* 6 not supported */
+ PCNET32_PORT_ASEL, /* 7 not supported */
+ PCNET32_PORT_ASEL, /* 8 not supported */
+ PCNET32_PORT_MII, /* 9 MII 10baseT */
+ PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */
+ PCNET32_PORT_MII, /* 11 MII (autosel) */
+ PCNET32_PORT_10BT, /* 12 10BaseT */
+ PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */
+ /* 14 MII 100BaseTx-FD */
+ PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD,
+ PCNET32_PORT_ASEL /* 15 not supported */
};
static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = {
- "Loopback test (offline)"
+ "Loopback test (offline)"
};
+
#define PCNET32_TEST_LEN (sizeof(pcnet32_gstrings_test) / ETH_GSTRING_LEN)
-#define PCNET32_NUM_REGS 168
+#define PCNET32_NUM_REGS 136
-#define MAX_UNITS 8 /* More are supported, limit only on options */
+#define MAX_UNITS 8 /* More are supported, limit only on options */
static int options[MAX_UNITS];
static int full_duplex[MAX_UNITS];
static int homepna[MAX_UNITS];
@@ -151,124 +156,6 @@ static int homepna[MAX_UNITS];
*/
/*
- * History:
- * v0.01: Initial version
- * only tested on Alpha Noname Board
- * v0.02: changed IRQ handling for new interrupt scheme (dev_id)
- * tested on a ASUS SP3G
- * v0.10: fixed an odd problem with the 79C974 in a Compaq Deskpro XL
- * looks like the 974 doesn't like stopping and restarting in a
- * short period of time; now we do a reinit of the lance; the
- * bug was triggered by doing ifconfig eth0 <ip> broadcast <addr>
- * and hangs the machine (thanks to Klaus Liedl for debugging)
- * v0.12: by suggestion from Donald Becker: Renamed driver to pcnet32,
- * made it standalone (no need for lance.c)
- * v0.13: added additional PCI detecting for special PCI devices (Compaq)
- * v0.14: stripped down additional PCI probe (thanks to David C Niemi
- * and sveneric@xs4all.nl for testing this on their Compaq boxes)
- * v0.15: added 79C965 (VLB) probe
- * added interrupt sharing for PCI chips
- * v0.16: fixed set_multicast_list on Alpha machines
- * v0.17: removed hack from dev.c; now pcnet32 uses ethif_probe in Space.c
- * v0.19: changed setting of autoselect bit
- * v0.20: removed additional Compaq PCI probe; there is now a working one
- * in arch/i386/bios32.c
- * v0.21: added endian conversion for ppc, from work by cort@cs.nmt.edu
- * v0.22: added printing of status to ring dump
- * v0.23: changed enet_statistics to net_devive_stats
- * v0.90: added multicast filter
- * added module support
- * changed irq probe to new style
- * added PCnetFast chip id
- * added fix for receive stalls with Intel saturn chipsets
- * added in-place rx skbs like in the tulip driver
- * minor cleanups
- * v0.91: added PCnetFast+ chip id
- * back port to 2.0.x
- * v1.00: added some stuff from Donald Becker's 2.0.34 version
- * added support for byte counters in net_dev_stats
- * v1.01: do ring dumps, only when debugging the driver
- * increased the transmit timeout
- * v1.02: fixed memory leak in pcnet32_init_ring()
- * v1.10: workaround for stopped transmitter
- * added port selection for modules
- * detect special T1/E1 WAN card and setup port selection
- * v1.11: fixed wrong checking of Tx errors
- * v1.20: added check of return value kmalloc (cpeterso@cs.washington.edu)
- * added save original kmalloc addr for freeing (mcr@solidum.com)
- * added support for PCnetHome chip (joe@MIT.EDU)
- * rewritten PCI card detection
- * added dwio mode to get driver working on some PPC machines
- * v1.21: added mii selection and mii ioctl
- * v1.22: changed pci scanning code to make PPC people happy
- * fixed switching to 32bit mode in pcnet32_open() (thanks
- * to Michael Richard <mcr@solidum.com> for noticing this one)
- * added sub vendor/device id matching (thanks again to
- * Michael Richard <mcr@solidum.com>)
- * added chip id for 79c973/975 (thanks to Zach Brown <zab@zabbo.net>)
- * v1.23 fixed small bug, when manual selecting MII speed/duplex
- * v1.24 Applied Thomas' patch to use TxStartPoint and thus decrease TxFIFO
- * underflows. Added tx_start_pt module parameter. Increased
- * TX_RING_SIZE from 16 to 32. Added #ifdef'd code to use DXSUFLO
- * for FAST[+] chipsets. <kaf@fc.hp.com>
- * v1.24ac Added SMP spinlocking - Alan Cox <alan@redhat.com>
- * v1.25kf Added No Interrupt on successful Tx for some Tx's <kaf@fc.hp.com>
- * v1.26 Converted to pci_alloc_consistent, Jamey Hicks / George France
- * <jamey@crl.dec.com>
- * - Fixed a few bugs, related to running the controller in 32bit mode.
- * 23 Oct, 2000. Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
- * v1.26p Fix oops on rmmod+insmod; plug i/o resource leak - Paul Gortmaker
- * v1.27 improved CSR/PROM address detection, lots of cleanups,
- * new pcnet32vlb module option, HP-PARISC support,
- * added module parameter descriptions,
- * initial ethtool support - Helge Deller <deller@gmx.de>
- * v1.27a Sun Feb 10 2002 Go Taniguchi <go@turbolinux.co.jp>
- * use alloc_etherdev and register_netdev
- * fix pci probe not increment cards_found
- * FD auto negotiate error workaround for xSeries250
- * clean up and using new mii module
- * v1.27b Sep 30 2002 Kent Yoder <yoder1@us.ibm.com>
- * Added timer for cable connection state changes.
- * v1.28 20 Feb 2004 Don Fry <brazilnut@us.ibm.com>
- * Jon Mason <jonmason@us.ibm.com>, Chinmay Albal <albal@in.ibm.com>
- * Now uses ethtool_ops, netif_msg_* and generic_mii_ioctl.
- * Fixes bogus 'Bus master arbitration failure', pci_[un]map_single
- * length errors, and transmit hangs. Cleans up after errors in open.
- * Jim Lewis <jklewis@us.ibm.com> added ethernet loopback test.
- * Thomas Munck Steenholdt <tmus@tmus.dk> non-mii ioctl corrections.
- * v1.29 6 Apr 2004 Jim Lewis <jklewis@us.ibm.com> added physical
- * identification code (blink led's) and register dump.
- * Don Fry added timer for 971/972 so skbufs don't remain on tx ring
- * forever.
- * v1.30 18 May 2004 Don Fry removed timer and Last Transmit Interrupt
- * (ltint) as they added complexity and didn't give good throughput.
- * v1.30a 22 May 2004 Don Fry limit frames received during interrupt.
- * v1.30b 24 May 2004 Don Fry fix bogus tx carrier errors with 79c973,
- * assisted by Bruce Penrod <bmpenrod@endruntechnologies.com>.
- * v1.30c 25 May 2004 Don Fry added netif_wake_queue after pcnet32_restart.
- * v1.30d 01 Jun 2004 Don Fry discard oversize rx packets.
- * v1.30e 11 Jun 2004 Don Fry recover after fifo error and rx hang.
- * v1.30f 16 Jun 2004 Don Fry cleanup IRQ to allow 0 and 1 for PCI,
- * expanding on suggestions from Ralf Baechle <ralf@linux-mips.org>,
- * and Brian Murphy <brian@murphy.dk>.
- * v1.30g 22 Jun 2004 Patrick Simmons <psimmons@flash.net> added option
- * homepna for selecting HomePNA mode for PCNet/Home 79C978.
- * v1.30h 24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32.
- * v1.30i 28 Jun 2004 Don Fry change to use module_param.
- * v1.30j 29 Apr 2005 Don Fry fix skb/map leak with loopback test.
- * v1.31 02 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> added set_ringparam().
- * v1.31a 12 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> set min ring size to 4
- * to allow loopback test to work unchanged.
- * v1.31b 06 Oct 2005 Don Fry changed alloc_ring to show name of device
- * if allocation fails
- * v1.31c 01 Nov 2005 Don Fry Allied Telesyn 2700/2701 FX are 100Mbit only.
- * Force 100Mbit FD if Auto (ASEL) is selected.
- * See Bugzilla 2669 and 4551.
- */
-
-
-/*
* Set the number of Tx and Rx buffers, using Log_2(# buffers).
* Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
* That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
@@ -303,42 +190,42 @@ static int homepna[MAX_UNITS];
/* The PCNET32 Rx and Tx ring descriptors. */
struct pcnet32_rx_head {
- u32 base;
- s16 buf_length;
- s16 status;
- u32 msg_length;
- u32 reserved;
+ u32 base;
+ s16 buf_length;
+ s16 status;
+ u32 msg_length;
+ u32 reserved;
};
struct pcnet32_tx_head {
- u32 base;
- s16 length;
- s16 status;
- u32 misc;
- u32 reserved;
+ u32 base;
+ s16 length;
+ s16 status;
+ u32 misc;
+ u32 reserved;
};
/* The PCNET32 32-Bit initialization block, described in databook. */
struct pcnet32_init_block {
- u16 mode;
- u16 tlen_rlen;
- u8 phys_addr[6];
- u16 reserved;
- u32 filter[2];
- /* Receive and transmit ring base, along with extra bits. */
- u32 rx_ring;
- u32 tx_ring;
+ u16 mode;
+ u16 tlen_rlen;
+ u8 phys_addr[6];
+ u16 reserved;
+ u32 filter[2];
+ /* Receive and transmit ring base, along with extra bits. */
+ u32 rx_ring;
+ u32 tx_ring;
};
/* PCnet32 access functions */
struct pcnet32_access {
- u16 (*read_csr)(unsigned long, int);
- void (*write_csr)(unsigned long, int, u16);
- u16 (*read_bcr)(unsigned long, int);
- void (*write_bcr)(unsigned long, int, u16);
- u16 (*read_rap)(unsigned long);
- void (*write_rap)(unsigned long, u16);
- void (*reset)(unsigned long);
+ u16 (*read_csr) (unsigned long, int);
+ void (*write_csr) (unsigned long, int, u16);
+ u16 (*read_bcr) (unsigned long, int);
+ void (*write_bcr) (unsigned long, int, u16);
+ u16 (*read_rap) (unsigned long);
+ void (*write_rap) (unsigned long, u16);
+ void (*reset) (unsigned long);
};
/*
@@ -346,760 +233,794 @@ struct pcnet32_access {
* so the structure should be allocated using pci_alloc_consistent().
*/
struct pcnet32_private {
- struct pcnet32_init_block init_block;
- /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
- struct pcnet32_rx_head *rx_ring;
- struct pcnet32_tx_head *tx_ring;
- dma_addr_t dma_addr; /* DMA address of beginning of this
- object, returned by
- pci_alloc_consistent */
- struct pci_dev *pci_dev; /* Pointer to the associated pci device
- structure */
- const char *name;
- /* The saved address of a sent-in-place packet/buffer, for skfree(). */
- struct sk_buff **tx_skbuff;
- struct sk_buff **rx_skbuff;
- dma_addr_t *tx_dma_addr;
- dma_addr_t *rx_dma_addr;
- struct pcnet32_access a;
- spinlock_t lock; /* Guard lock */
- unsigned int cur_rx, cur_tx; /* The next free ring entry */
- unsigned int rx_ring_size; /* current rx ring size */
- unsigned int tx_ring_size; /* current tx ring size */
- unsigned int rx_mod_mask; /* rx ring modular mask */
- unsigned int tx_mod_mask; /* tx ring modular mask */
- unsigned short rx_len_bits;
- unsigned short tx_len_bits;
- dma_addr_t rx_ring_dma_addr;
- dma_addr_t tx_ring_dma_addr;
- unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
- struct net_device_stats stats;
- char tx_full;
- int options;
- unsigned int shared_irq:1, /* shared irq possible */
- dxsuflo:1, /* disable transmit stop on uflo */
- mii:1; /* mii port available */
- struct net_device *next;
- struct mii_if_info mii_if;
- struct timer_list watchdog_timer;
- struct timer_list blink_timer;
- u32 msg_enable; /* debug message level */
+ struct pcnet32_init_block init_block;
+ /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
+ struct pcnet32_rx_head *rx_ring;
+ struct pcnet32_tx_head *tx_ring;
+ dma_addr_t dma_addr;/* DMA address of beginning of this
+ object, returned by pci_alloc_consistent */
+ struct pci_dev *pci_dev;
+ const char *name;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff **tx_skbuff;
+ struct sk_buff **rx_skbuff;
+ dma_addr_t *tx_dma_addr;
+ dma_addr_t *rx_dma_addr;
+ struct pcnet32_access a;
+ spinlock_t lock; /* Guard lock */
+ unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int rx_ring_size; /* current rx ring size */
+ unsigned int tx_ring_size; /* current tx ring size */
+ unsigned int rx_mod_mask; /* rx ring modular mask */
+ unsigned int tx_mod_mask; /* tx ring modular mask */
+ unsigned short rx_len_bits;
+ unsigned short tx_len_bits;
+ dma_addr_t rx_ring_dma_addr;
+ dma_addr_t tx_ring_dma_addr;
+ unsigned int dirty_rx, /* ring entries to be freed. */
+ dirty_tx;
+
+ struct net_device_stats stats;
+ char tx_full;
+ char phycount; /* number of phys found */
+ int options;
+ unsigned int shared_irq:1, /* shared irq possible */
+ dxsuflo:1, /* disable transmit stop on uflo */
+ mii:1; /* mii port available */
+ struct net_device *next;
+ struct mii_if_info mii_if;
+ struct timer_list watchdog_timer;
+ struct timer_list blink_timer;
+ u32 msg_enable; /* debug message level */
+
+ /* each bit indicates an available PHY */
+ u32 phymask;
};
static void pcnet32_probe_vlbus(void);
-static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
-static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
-static int pcnet32_open(struct net_device *);
-static int pcnet32_init_ring(struct net_device *);
-static int pcnet32_start_xmit(struct sk_buff *, struct net_device *);
-static int pcnet32_rx(struct net_device *);
-static void pcnet32_tx_timeout (struct net_device *dev);
+static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
+static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
+static int pcnet32_open(struct net_device *);
+static int pcnet32_init_ring(struct net_device *);
+static int pcnet32_start_xmit(struct sk_buff *, struct net_device *);
+static int pcnet32_rx(struct net_device *);
+static void pcnet32_tx_timeout(struct net_device *dev);
static irqreturn_t pcnet32_interrupt(int, void *, struct pt_regs *);
-static int pcnet32_close(struct net_device *);
+static int pcnet32_close(struct net_device *);
static struct net_device_stats *pcnet32_get_stats(struct net_device *);
static void pcnet32_load_multicast(struct net_device *dev);
static void pcnet32_set_multicast_list(struct net_device *);
-static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
+static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
static void pcnet32_watchdog(struct net_device *);
static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
-static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val);
+static void mdio_write(struct net_device *dev, int phy_id, int reg_num,
+ int val);
static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits);
static void pcnet32_ethtool_test(struct net_device *dev,
- struct ethtool_test *eth_test, u64 *data);
-static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1);
+ struct ethtool_test *eth_test, u64 * data);
+static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1);
static int pcnet32_phys_id(struct net_device *dev, u32 data);
static void pcnet32_led_blink_callback(struct net_device *dev);
static int pcnet32_get_regs_len(struct net_device *dev);
static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
- void *ptr);
+ void *ptr);
static void pcnet32_purge_tx_ring(struct net_device *dev);
static int pcnet32_alloc_ring(struct net_device *dev, char *name);
static void pcnet32_free_ring(struct net_device *dev);
-
+static void pcnet32_check_media(struct net_device *dev, int verbose);
enum pci_flags_bit {
- PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
- PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+ PCI_USES_IO = 1, PCI_USES_MEM = 2, PCI_USES_MASTER = 4,
+ PCI_ADDR0 = 0x10 << 0, PCI_ADDR1 = 0x10 << 1, PCI_ADDR2 =
+ 0x10 << 2, PCI_ADDR3 = 0x10 << 3,
};
-
-static u16 pcnet32_wio_read_csr (unsigned long addr, int index)
+static u16 pcnet32_wio_read_csr(unsigned long addr, int index)
{
- outw (index, addr+PCNET32_WIO_RAP);
- return inw (addr+PCNET32_WIO_RDP);
+ outw(index, addr + PCNET32_WIO_RAP);
+ return inw(addr + PCNET32_WIO_RDP);
}
-static void pcnet32_wio_write_csr (unsigned long addr, int index, u16 val)
+static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val)
{
- outw (index, addr+PCNET32_WIO_RAP);
- outw (val, addr+PCNET32_WIO_RDP);
+ outw(index, addr + PCNET32_WIO_RAP);
+ outw(val, addr + PCNET32_WIO_RDP);
}
-static u16 pcnet32_wio_read_bcr (unsigned long addr, int index)
+static u16 pcnet32_wio_read_bcr(unsigned long addr, int index)
{
- outw (index, addr+PCNET32_WIO_RAP);
- return inw (addr+PCNET32_WIO_BDP);
+ outw(index, addr + PCNET32_WIO_RAP);
+ return inw(addr + PCNET32_WIO_BDP);
}
-static void pcnet32_wio_write_bcr (unsigned long addr, int index, u16 val)
+static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val)
{
- outw (index, addr+PCNET32_WIO_RAP);
- outw (val, addr+PCNET32_WIO_BDP);
+ outw(index, addr + PCNET32_WIO_RAP);
+ outw(val, addr + PCNET32_WIO_BDP);
}
-static u16 pcnet32_wio_read_rap (unsigned long addr)
+static u16 pcnet32_wio_read_rap(unsigned long addr)
{
- return inw (addr+PCNET32_WIO_RAP);
+ return inw(addr + PCNET32_WIO_RAP);
}
-static void pcnet32_wio_write_rap (unsigned long addr, u16 val)
+static void pcnet32_wio_write_rap(unsigned long addr, u16 val)
{
- outw (val, addr+PCNET32_WIO_RAP);
+ outw(val, addr + PCNET32_WIO_RAP);
}
-static void pcnet32_wio_reset (unsigned long addr)
+static void pcnet32_wio_reset(unsigned long addr)
{
- inw (addr+PCNET32_WIO_RESET);
+ inw(addr + PCNET32_WIO_RESET);
}
-static int pcnet32_wio_check (unsigned long addr)
+static int pcnet32_wio_check(unsigned long addr)
{
- outw (88, addr+PCNET32_WIO_RAP);
- return (inw (addr+PCNET32_WIO_RAP) == 88);
+ outw(88, addr + PCNET32_WIO_RAP);
+ return (inw(addr + PCNET32_WIO_RAP) == 88);
}
static struct pcnet32_access pcnet32_wio = {
- .read_csr = pcnet32_wio_read_csr,
- .write_csr = pcnet32_wio_write_csr,
- .read_bcr = pcnet32_wio_read_bcr,
- .write_bcr = pcnet32_wio_write_bcr,
- .read_rap = pcnet32_wio_read_rap,
- .write_rap = pcnet32_wio_write_rap,
- .reset = pcnet32_wio_reset
+ .read_csr = pcnet32_wio_read_csr,
+ .write_csr = pcnet32_wio_write_csr,
+ .read_bcr = pcnet32_wio_read_bcr,
+ .write_bcr = pcnet32_wio_write_bcr,
+ .read_rap = pcnet32_wio_read_rap,
+ .write_rap = pcnet32_wio_write_rap,
+ .reset = pcnet32_wio_reset
};
-static u16 pcnet32_dwio_read_csr (unsigned long addr, int index)
+static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
{
- outl (index, addr+PCNET32_DWIO_RAP);
- return (inl (addr+PCNET32_DWIO_RDP) & 0xffff);
+ outl(index, addr + PCNET32_DWIO_RAP);
+ return (inl(addr + PCNET32_DWIO_RDP) & 0xffff);
}
-static void pcnet32_dwio_write_csr (unsigned long addr, int index, u16 val)
+static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
{
- outl (index, addr+PCNET32_DWIO_RAP);
- outl (val, addr+PCNET32_DWIO_RDP);
+ outl(index, addr + PCNET32_DWIO_RAP);
+ outl(val, addr + PCNET32_DWIO_RDP);
}
-static u16 pcnet32_dwio_read_bcr (unsigned long addr, int index)
+static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
{
- outl (index, addr+PCNET32_DWIO_RAP);
- return (inl (addr+PCNET32_DWIO_BDP) & 0xffff);
+ outl(index, addr + PCNET32_DWIO_RAP);
+ return (inl(addr + PCNET32_DWIO_BDP) & 0xffff);
}
-static void pcnet32_dwio_write_bcr (unsigned long addr, int index, u16 val)
+static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
{
- outl (index, addr+PCNET32_DWIO_RAP);
- outl (val, addr+PCNET32_DWIO_BDP);
+ outl(index, addr + PCNET32_DWIO_RAP);
+ outl(val, addr + PCNET32_DWIO_BDP);
}
-static u16 pcnet32_dwio_read_rap (unsigned long addr)
+static u16 pcnet32_dwio_read_rap(unsigned long addr)
{
- return (inl (addr+PCNET32_DWIO_RAP) & 0xffff);
+ return (inl(addr + PCNET32_DWIO_RAP) & 0xffff);
}
-static void pcnet32_dwio_write_rap (unsigned long addr, u16 val)
+static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
{
- outl (val, addr+PCNET32_DWIO_RAP);
+ outl(val, addr + PCNET32_DWIO_RAP);
}
-static void pcnet32_dwio_reset (unsigned long addr)
+static void pcnet32_dwio_reset(unsigned long addr)
{
- inl (addr+PCNET32_DWIO_RESET);
+ inl(addr + PCNET32_DWIO_RESET);
}
-static int pcnet32_dwio_check (unsigned long addr)
+static int pcnet32_dwio_check(unsigned long addr)
{
- outl (88, addr+PCNET32_DWIO_RAP);
- return ((inl (addr+PCNET32_DWIO_RAP) & 0xffff) == 88);
+ outl(88, addr + PCNET32_DWIO_RAP);
+ return ((inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88);
}
static struct pcnet32_access pcnet32_dwio = {
- .read_csr = pcnet32_dwio_read_csr,
- .write_csr = pcnet32_dwio_write_csr,
- .read_bcr = pcnet32_dwio_read_bcr,
- .write_bcr = pcnet32_dwio_write_bcr,
- .read_rap = pcnet32_dwio_read_rap,
- .write_rap = pcnet32_dwio_write_rap,
- .reset = pcnet32_dwio_reset
+ .read_csr = pcnet32_dwio_read_csr,
+ .write_csr = pcnet32_dwio_write_csr,
+ .read_bcr = pcnet32_dwio_read_bcr,
+ .write_bcr = pcnet32_dwio_write_bcr,
+ .read_rap = pcnet32_dwio_read_rap,
+ .write_rap = pcnet32_dwio_write_rap,
+ .reset = pcnet32_dwio_reset
};
#ifdef CONFIG_NET_POLL_CONTROLLER
static void pcnet32_poll_controller(struct net_device *dev)
{
- disable_irq(dev->irq);
- pcnet32_interrupt(0, dev, NULL);
- enable_irq(dev->irq);
+ disable_irq(dev->irq);
+ pcnet32_interrupt(0, dev, NULL);
+ enable_irq(dev->irq);
}
#endif
-
static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long flags;
- int r = -EOPNOTSUPP;
-
- if (lp->mii) {
- spin_lock_irqsave(&lp->lock, flags);
- mii_ethtool_gset(&lp->mii_if, cmd);
- spin_unlock_irqrestore(&lp->lock, flags);
- r = 0;
- }
- return r;
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long flags;
+ int r = -EOPNOTSUPP;
+
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ mii_ethtool_gset(&lp->mii_if, cmd);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ r = 0;
+ }
+ return r;
}
static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long flags;
- int r = -EOPNOTSUPP;
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long flags;
+ int r = -EOPNOTSUPP;
- if (lp->mii) {
- spin_lock_irqsave(&lp->lock, flags);
- r = mii_ethtool_sset(&lp->mii_if, cmd);
- spin_unlock_irqrestore(&lp->lock, flags);
- }
- return r;
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ r = mii_ethtool_sset(&lp->mii_if, cmd);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return r;
}
-static void pcnet32_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+static void pcnet32_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
{
- struct pcnet32_private *lp = dev->priv;
-
- strcpy (info->driver, DRV_NAME);
- strcpy (info->version, DRV_VERSION);
- if (lp->pci_dev)
- strcpy (info->bus_info, pci_name(lp->pci_dev));
- else
- sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr);
+ struct pcnet32_private *lp = dev->priv;
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ if (lp->pci_dev)
+ strcpy(info->bus_info, pci_name(lp->pci_dev));
+ else
+ sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr);
}
static u32 pcnet32_get_link(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long flags;
- int r;
-
- spin_lock_irqsave(&lp->lock, flags);
- if (lp->mii) {
- r = mii_link_ok(&lp->mii_if);
- } else {
- ulong ioaddr = dev->base_addr; /* card base I/O address */
- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
- }
- spin_unlock_irqrestore(&lp->lock, flags);
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long flags;
+ int r;
- return r;
+ spin_lock_irqsave(&lp->lock, flags);
+ if (lp->mii) {
+ r = mii_link_ok(&lp->mii_if);
+ } else {
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return r;
}
static u32 pcnet32_get_msglevel(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- return lp->msg_enable;
+ struct pcnet32_private *lp = dev->priv;
+ return lp->msg_enable;
}
static void pcnet32_set_msglevel(struct net_device *dev, u32 value)
{
- struct pcnet32_private *lp = dev->priv;
- lp->msg_enable = value;
+ struct pcnet32_private *lp = dev->priv;
+ lp->msg_enable = value;
}
static int pcnet32_nway_reset(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long flags;
- int r = -EOPNOTSUPP;
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long flags;
+ int r = -EOPNOTSUPP;
- if (lp->mii) {
- spin_lock_irqsave(&lp->lock, flags);
- r = mii_nway_restart(&lp->mii_if);
- spin_unlock_irqrestore(&lp->lock, flags);
- }
- return r;
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ r = mii_nway_restart(&lp->mii_if);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return r;
}
-static void pcnet32_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+static void pcnet32_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
{
- struct pcnet32_private *lp = dev->priv;
+ struct pcnet32_private *lp = dev->priv;
- ering->tx_max_pending = TX_MAX_RING_SIZE - 1;
- ering->tx_pending = lp->tx_ring_size - 1;
- ering->rx_max_pending = RX_MAX_RING_SIZE - 1;
- ering->rx_pending = lp->rx_ring_size - 1;
+ ering->tx_max_pending = TX_MAX_RING_SIZE - 1;
+ ering->tx_pending = lp->tx_ring_size - 1;
+ ering->rx_max_pending = RX_MAX_RING_SIZE - 1;
+ ering->rx_pending = lp->rx_ring_size - 1;
}
-static int pcnet32_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+static int pcnet32_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long flags;
- int i;
-
- if (ering->rx_mini_pending || ering->rx_jumbo_pending)
- return -EINVAL;
-
- if (netif_running(dev))
- pcnet32_close(dev);
-
- spin_lock_irqsave(&lp->lock, flags);
- pcnet32_free_ring(dev);
- lp->tx_ring_size = min(ering->tx_pending, (unsigned int) TX_MAX_RING_SIZE);
- lp->rx_ring_size = min(ering->rx_pending, (unsigned int) RX_MAX_RING_SIZE);
-
- /* set the minimum ring size to 4, to allow the loopback test to work
- * unchanged.
- */
- for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
- if (lp->tx_ring_size <= (1 << i))
- break;
- }
- lp->tx_ring_size = (1 << i);
- lp->tx_mod_mask = lp->tx_ring_size - 1;
- lp->tx_len_bits = (i << 12);
-
- for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
- if (lp->rx_ring_size <= (1 << i))
- break;
- }
- lp->rx_ring_size = (1 << i);
- lp->rx_mod_mask = lp->rx_ring_size - 1;
- lp->rx_len_bits = (i << 4);
-
- if (pcnet32_alloc_ring(dev, dev->name)) {
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long flags;
+ int i;
+
+ if (ering->rx_mini_pending || ering->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (netif_running(dev))
+ pcnet32_close(dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
pcnet32_free_ring(dev);
- spin_unlock_irqrestore(&lp->lock, flags);
- return -ENOMEM;
- }
+ lp->tx_ring_size =
+ min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
+ lp->rx_ring_size =
+ min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
+
+ /* set the minimum ring size to 4, to allow the loopback test to work
+ * unchanged.
+ */
+ for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
+ if (lp->tx_ring_size <= (1 << i))
+ break;
+ }
+ lp->tx_ring_size = (1 << i);
+ lp->tx_mod_mask = lp->tx_ring_size - 1;
+ lp->tx_len_bits = (i << 12);
- spin_unlock_irqrestore(&lp->lock, flags);
+ for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
+ if (lp->rx_ring_size <= (1 << i))
+ break;
+ }
+ lp->rx_ring_size = (1 << i);
+ lp->rx_mod_mask = lp->rx_ring_size - 1;
+ lp->rx_len_bits = (i << 4);
+
+ if (pcnet32_alloc_ring(dev, dev->name)) {
+ pcnet32_free_ring(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return -ENOMEM;
+ }
- if (pcnet32_debug & NETIF_MSG_DRV)
- printk(KERN_INFO PFX "%s: Ring Param Settings: RX: %d, TX: %d\n",
- dev->name, lp->rx_ring_size, lp->tx_ring_size);
+ spin_unlock_irqrestore(&lp->lock, flags);
- if (netif_running(dev))
- pcnet32_open(dev);
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk(KERN_INFO PFX
+ "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name,
+ lp->rx_ring_size, lp->tx_ring_size);
- return 0;
+ if (netif_running(dev))
+ pcnet32_open(dev);
+
+ return 0;
}
-static void pcnet32_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+static void pcnet32_get_strings(struct net_device *dev, u32 stringset,
+ u8 * data)
{
- memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
+ memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
}
static int pcnet32_self_test_count(struct net_device *dev)
{
- return PCNET32_TEST_LEN;
+ return PCNET32_TEST_LEN;
}
static void pcnet32_ethtool_test(struct net_device *dev,
- struct ethtool_test *test, u64 *data)
+ struct ethtool_test *test, u64 * data)
{
- struct pcnet32_private *lp = dev->priv;
- int rc;
-
- if (test->flags == ETH_TEST_FL_OFFLINE) {
- rc = pcnet32_loopback_test(dev, data);
- if (rc) {
- if (netif_msg_hw(lp))
- printk(KERN_DEBUG "%s: Loopback test failed.\n", dev->name);
- test->flags |= ETH_TEST_FL_FAILED;
+ struct pcnet32_private *lp = dev->priv;
+ int rc;
+
+ if (test->flags == ETH_TEST_FL_OFFLINE) {
+ rc = pcnet32_loopback_test(dev, data);
+ if (rc) {
+ if (netif_msg_hw(lp))
+ printk(KERN_DEBUG "%s: Loopback test failed.\n",
+ dev->name);
+ test->flags |= ETH_TEST_FL_FAILED;
+ } else if (netif_msg_hw(lp))
+ printk(KERN_DEBUG "%s: Loopback test passed.\n",
+ dev->name);
} else if (netif_msg_hw(lp))
- printk(KERN_DEBUG "%s: Loopback test passed.\n", dev->name);
- } else if (netif_msg_hw(lp))
- printk(KERN_DEBUG "%s: No tests to run (specify 'Offline' on ethtool).", dev->name);
-} /* end pcnet32_ethtool_test */
+ printk(KERN_DEBUG
+ "%s: No tests to run (specify 'Offline' on ethtool).",
+ dev->name);
+} /* end pcnet32_ethtool_test */
-static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1)
+static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
{
- struct pcnet32_private *lp = dev->priv;
- struct pcnet32_access *a = &lp->a; /* access to registers */
- ulong ioaddr = dev->base_addr; /* card base I/O address */
- struct sk_buff *skb; /* sk buff */
- int x, i; /* counters */
- int numbuffs = 4; /* number of TX/RX buffers and descs */
- u16 status = 0x8300; /* TX ring status */
- u16 teststatus; /* test of ring status */
- int rc; /* return code */
- int size; /* size of packets */
- unsigned char *packet; /* source packet data */
- static const int data_len = 60; /* length of source packets */
- unsigned long flags;
- unsigned long ticks;
-
- *data1 = 1; /* status of test, default to fail */
- rc = 1; /* default to fail */
-
- if (netif_running(dev))
- pcnet32_close(dev);
-
- spin_lock_irqsave(&lp->lock, flags);
-
- /* Reset the PCNET32 */
- lp->a.reset (ioaddr);
-
- /* switch pcnet32 to 32bit mode */
- lp->a.write_bcr (ioaddr, 20, 2);
-
- lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
- lp->init_block.filter[0] = 0;
- lp->init_block.filter[1] = 0;
-
- /* purge & init rings but don't actually restart */
- pcnet32_restart(dev, 0x0000);
-
- lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */
-
- /* Initialize Transmit buffers. */
- size = data_len + 15;
- for (x=0; x<numbuffs; x++) {
- if (!(skb = dev_alloc_skb(size))) {
- if (netif_msg_hw(lp))
- printk(KERN_DEBUG "%s: Cannot allocate skb at line: %d!\n",
- dev->name, __LINE__);
- goto clean_up;
- } else {
- packet = skb->data;
- skb_put(skb, size); /* create space for data */
- lp->tx_skbuff[x] = skb;
- lp->tx_ring[x].length = le16_to_cpu(-skb->len);
- lp->tx_ring[x].misc = 0;
-
- /* put DA and SA into the skb */
- for (i=0; i<6; i++)
- *packet++ = dev->dev_addr[i];
- for (i=0; i<6; i++)
- *packet++ = dev->dev_addr[i];
- /* type */
- *packet++ = 0x08;
- *packet++ = 0x06;
- /* packet number */
- *packet++ = x;
- /* fill packet with data */
- for (i=0; i<data_len; i++)
- *packet++ = i;
-
- lp->tx_dma_addr[x] = pci_map_single(lp->pci_dev, skb->data,
- skb->len, PCI_DMA_TODEVICE);
- lp->tx_ring[x].base = (u32)le32_to_cpu(lp->tx_dma_addr[x]);
- wmb(); /* Make sure owner changes after all others are visible */
- lp->tx_ring[x].status = le16_to_cpu(status);
- }
- }
-
- x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */
- x = x | 0x0002;
- a->write_bcr(ioaddr, 32, x);
-
- lp->a.write_csr (ioaddr, 15, 0x0044); /* set int loopback in CSR15 */
-
- teststatus = le16_to_cpu(0x8000);
- lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */
-
- /* Check status of descriptors */
- for (x=0; x<numbuffs; x++) {
- ticks = 0;
- rmb();
- while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
- spin_unlock_irqrestore(&lp->lock, flags);
- mdelay(1);
- spin_lock_irqsave(&lp->lock, flags);
- rmb();
- ticks++;
- }
- if (ticks == 200) {
- if (netif_msg_hw(lp))
- printk("%s: Desc %d failed to reset!\n",dev->name,x);
- break;
- }
- }
-
- lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */
- wmb();
- if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
- printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
-
- for (x=0; x<numbuffs; x++) {
- printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x);
- skb = lp->rx_skbuff[x];
- for (i=0; i<size; i++) {
- printk("%02x ", *(skb->data+i));
- }
- printk("\n");
- }
- }
-
- x = 0;
- rc = 0;
- while (x<numbuffs && !rc) {
- skb = lp->rx_skbuff[x];
- packet = lp->tx_skbuff[x]->data;
- for (i=0; i<size; i++) {
- if (*(skb->data+i) != packet[i]) {
- if (netif_msg_hw(lp))
- printk(KERN_DEBUG "%s: Error in compare! %2x - %02x %02x\n",
- dev->name, i, *(skb->data+i), packet[i]);
- rc = 1;
- break;
- }
+ struct pcnet32_private *lp = dev->priv;
+ struct pcnet32_access *a = &lp->a; /* access to registers */
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ struct sk_buff *skb; /* sk buff */
+ int x, i; /* counters */
+ int numbuffs = 4; /* number of TX/RX buffers and descs */
+ u16 status = 0x8300; /* TX ring status */
+ u16 teststatus; /* test of ring status */
+ int rc; /* return code */
+ int size; /* size of packets */
+ unsigned char *packet; /* source packet data */
+ static const int data_len = 60; /* length of source packets */
+ unsigned long flags;
+ unsigned long ticks;
+
+ *data1 = 1; /* status of test, default to fail */
+ rc = 1; /* default to fail */
+
+ if (netif_running(dev))
+ pcnet32_close(dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ /* Reset the PCNET32 */
+ lp->a.reset(ioaddr);
+
+ /* switch pcnet32 to 32bit mode */
+ lp->a.write_bcr(ioaddr, 20, 2);
+
+ lp->init_block.mode =
+ le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
+ lp->init_block.filter[0] = 0;
+ lp->init_block.filter[1] = 0;
+
+ /* purge & init rings but don't actually restart */
+ pcnet32_restart(dev, 0x0000);
+
+ lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */
+
+ /* Initialize Transmit buffers. */
+ size = data_len + 15;
+ for (x = 0; x < numbuffs; x++) {
+ if (!(skb = dev_alloc_skb(size))) {
+ if (netif_msg_hw(lp))
+ printk(KERN_DEBUG
+ "%s: Cannot allocate skb at line: %d!\n",
+ dev->name, __LINE__);
+ goto clean_up;
+ } else {
+ packet = skb->data;
+ skb_put(skb, size); /* create space for data */
+ lp->tx_skbuff[x] = skb;
+ lp->tx_ring[x].length = le16_to_cpu(-skb->len);
+ lp->tx_ring[x].misc = 0;
+
+ /* put DA and SA into the skb */
+ for (i = 0; i < 6; i++)
+ *packet++ = dev->dev_addr[i];
+ for (i = 0; i < 6; i++)
+ *packet++ = dev->dev_addr[i];
+ /* type */
+ *packet++ = 0x08;
+ *packet++ = 0x06;
+ /* packet number */
+ *packet++ = x;
+ /* fill packet with data */
+ for (i = 0; i < data_len; i++)
+ *packet++ = i;
+
+ lp->tx_dma_addr[x] =
+ pci_map_single(lp->pci_dev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ lp->tx_ring[x].base =
+ (u32) le32_to_cpu(lp->tx_dma_addr[x]);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->tx_ring[x].status = le16_to_cpu(status);
+ }
+ }
+
+ x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */
+ x = x | 0x0002;
+ a->write_bcr(ioaddr, 32, x);
+
+ lp->a.write_csr(ioaddr, 15, 0x0044); /* set int loopback in CSR15 */
+
+ teststatus = le16_to_cpu(0x8000);
+ lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */
+
+ /* Check status of descriptors */
+ for (x = 0; x < numbuffs; x++) {
+ ticks = 0;
+ rmb();
+ while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
+ spin_unlock_irqrestore(&lp->lock, flags);
+ mdelay(1);
+ spin_lock_irqsave(&lp->lock, flags);
+ rmb();
+ ticks++;
+ }
+ if (ticks == 200) {
+ if (netif_msg_hw(lp))
+ printk("%s: Desc %d failed to reset!\n",
+ dev->name, x);
+ break;
+ }
+ }
+
+ lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */
+ wmb();
+ if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
+ printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
+
+ for (x = 0; x < numbuffs; x++) {
+ printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x);
+ skb = lp->rx_skbuff[x];
+ for (i = 0; i < size; i++) {
+ printk("%02x ", *(skb->data + i));
+ }
+ printk("\n");
+ }
+ }
+
+ x = 0;
+ rc = 0;
+ while (x < numbuffs && !rc) {
+ skb = lp->rx_skbuff[x];
+ packet = lp->tx_skbuff[x]->data;
+ for (i = 0; i < size; i++) {
+ if (*(skb->data + i) != packet[i]) {
+ if (netif_msg_hw(lp))
+ printk(KERN_DEBUG
+ "%s: Error in compare! %2x - %02x %02x\n",
+ dev->name, i, *(skb->data + i),
+ packet[i]);
+ rc = 1;
+ break;
+ }
+ }
+ x++;
+ }
+ if (!rc) {
+ *data1 = 0;
}
- x++;
- }
- if (!rc) {
- *data1 = 0;
- }
-clean_up:
- pcnet32_purge_tx_ring(dev);
- x = a->read_csr(ioaddr, 15) & 0xFFFF;
- a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */
+ clean_up:
+ pcnet32_purge_tx_ring(dev);
+ x = a->read_csr(ioaddr, 15) & 0xFFFF;
+ a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */
- x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
- x = x & ~0x0002;
- a->write_bcr(ioaddr, 32, x);
+ x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
+ x = x & ~0x0002;
+ a->write_bcr(ioaddr, 32, x);
- spin_unlock_irqrestore(&lp->lock, flags);
+ spin_unlock_irqrestore(&lp->lock, flags);
- if (netif_running(dev)) {
- pcnet32_open(dev);
- } else {
- lp->a.write_bcr (ioaddr, 20, 4); /* return to 16bit mode */
- }
+ if (netif_running(dev)) {
+ pcnet32_open(dev);
+ } else {
+ lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
+ }
- return(rc);
-} /* end pcnet32_loopback_test */
+ return (rc);
+} /* end pcnet32_loopback_test */
static void pcnet32_led_blink_callback(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- struct pcnet32_access *a = &lp->a;
- ulong ioaddr = dev->base_addr;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&lp->lock, flags);
- for (i=4; i<8; i++) {
- a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
- }
- spin_unlock_irqrestore(&lp->lock, flags);
-
- mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT);
+ struct pcnet32_private *lp = dev->priv;
+ struct pcnet32_access *a = &lp->a;
+ ulong ioaddr = dev->base_addr;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i = 4; i < 8; i++) {
+ a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT);
}
static int pcnet32_phys_id(struct net_device *dev, u32 data)
{
- struct pcnet32_private *lp = dev->priv;
- struct pcnet32_access *a = &lp->a;
- ulong ioaddr = dev->base_addr;
- unsigned long flags;
- int i, regs[4];
-
- if (!lp->blink_timer.function) {
- init_timer(&lp->blink_timer);
- lp->blink_timer.function = (void *) pcnet32_led_blink_callback;
- lp->blink_timer.data = (unsigned long) dev;
- }
-
- /* Save the current value of the bcrs */
- spin_lock_irqsave(&lp->lock, flags);
- for (i=4; i<8; i++) {
- regs[i-4] = a->read_bcr(ioaddr, i);
- }
- spin_unlock_irqrestore(&lp->lock, flags);
-
- mod_timer(&lp->blink_timer, jiffies);
- set_current_state(TASK_INTERRUPTIBLE);
-
- if ((!data) || (data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)))
- data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
-
- msleep_interruptible(data * 1000);
- del_timer_sync(&lp->blink_timer);
-
- /* Restore the original value of the bcrs */
- spin_lock_irqsave(&lp->lock, flags);
- for (i=4; i<8; i++) {
- a->write_bcr(ioaddr, i, regs[i-4]);
- }
- spin_unlock_irqrestore(&lp->lock, flags);
-
- return 0;
+ struct pcnet32_private *lp = dev->priv;
+ struct pcnet32_access *a = &lp->a;
+ ulong ioaddr = dev->base_addr;
+ unsigned long flags;
+ int i, regs[4];
+
+ if (!lp->blink_timer.function) {
+ init_timer(&lp->blink_timer);
+ lp->blink_timer.function = (void *)pcnet32_led_blink_callback;
+ lp->blink_timer.data = (unsigned long)dev;
+ }
+
+ /* Save the current value of the bcrs */
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i = 4; i < 8; i++) {
+ regs[i - 4] = a->read_bcr(ioaddr, i);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ mod_timer(&lp->blink_timer, jiffies);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if ((!data) || (data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ)))
+ data = (u32) (MAX_SCHEDULE_TIMEOUT / HZ);
+
+ msleep_interruptible(data * 1000);
+ del_timer_sync(&lp->blink_timer);
+
+ /* Restore the original value of the bcrs */
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i = 4; i < 8; i++) {
+ a->write_bcr(ioaddr, i, regs[i - 4]);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return 0;
}
+#define PCNET32_REGS_PER_PHY 32
+#define PCNET32_MAX_PHYS 32
static int pcnet32_get_regs_len(struct net_device *dev)
{
- return(PCNET32_NUM_REGS * sizeof(u16));
+ struct pcnet32_private *lp = dev->priv;
+ int j = lp->phycount * PCNET32_REGS_PER_PHY;
+
+ return ((PCNET32_NUM_REGS + j) * sizeof(u16));
}
static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
- void *ptr)
+ void *ptr)
{
- int i, csr0;
- u16 *buff = ptr;
- struct pcnet32_private *lp = dev->priv;
- struct pcnet32_access *a = &lp->a;
- ulong ioaddr = dev->base_addr;
- int ticks;
- unsigned long flags;
-
- spin_lock_irqsave(&lp->lock, flags);
-
- csr0 = a->read_csr(ioaddr, 0);
- if (!(csr0 & 0x0004)) { /* If not stopped */
- /* set SUSPEND (SPND) - CSR5 bit 0 */
- a->write_csr(ioaddr, 5, 0x0001);
-
- /* poll waiting for bit to be set */
- ticks = 0;
- while (!(a->read_csr(ioaddr, 5) & 0x0001)) {
- spin_unlock_irqrestore(&lp->lock, flags);
- mdelay(1);
- spin_lock_irqsave(&lp->lock, flags);
- ticks++;
- if (ticks > 200) {
- if (netif_msg_hw(lp))
- printk(KERN_DEBUG "%s: Error getting into suspend!\n",
- dev->name);
- break;
- }
- }
- }
+ int i, csr0;
+ u16 *buff = ptr;
+ struct pcnet32_private *lp = dev->priv;
+ struct pcnet32_access *a = &lp->a;
+ ulong ioaddr = dev->base_addr;
+ int ticks;
+ unsigned long flags;
- /* read address PROM */
- for (i=0; i<16; i += 2)
- *buff++ = inw(ioaddr + i);
+ spin_lock_irqsave(&lp->lock, flags);
- /* read control and status registers */
- for (i=0; i<90; i++) {
- *buff++ = a->read_csr(ioaddr, i);
- }
+ csr0 = a->read_csr(ioaddr, 0);
+ if (!(csr0 & 0x0004)) { /* If not stopped */
+ /* set SUSPEND (SPND) - CSR5 bit 0 */
+ a->write_csr(ioaddr, 5, 0x0001);
+
+ /* poll waiting for bit to be set */
+ ticks = 0;
+ while (!(a->read_csr(ioaddr, 5) & 0x0001)) {
+ spin_unlock_irqrestore(&lp->lock, flags);
+ mdelay(1);
+ spin_lock_irqsave(&lp->lock, flags);
+ ticks++;
+ if (ticks > 200) {
+ if (netif_msg_hw(lp))
+ printk(KERN_DEBUG
+ "%s: Error getting into suspend!\n",
+ dev->name);
+ break;
+ }
+ }
+ }
- *buff++ = a->read_csr(ioaddr, 112);
- *buff++ = a->read_csr(ioaddr, 114);
+ /* read address PROM */
+ for (i = 0; i < 16; i += 2)
+ *buff++ = inw(ioaddr + i);
- /* read bus configuration registers */
- for (i=0; i<30; i++) {
- *buff++ = a->read_bcr(ioaddr, i);
- }
- *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */
- for (i=31; i<36; i++) {
- *buff++ = a->read_bcr(ioaddr, i);
- }
+ /* read control and status registers */
+ for (i = 0; i < 90; i++) {
+ *buff++ = a->read_csr(ioaddr, i);
+ }
+
+ *buff++ = a->read_csr(ioaddr, 112);
+ *buff++ = a->read_csr(ioaddr, 114);
- /* read mii phy registers */
- if (lp->mii) {
- for (i=0; i<32; i++) {
- lp->a.write_bcr(ioaddr, 33, ((lp->mii_if.phy_id) << 5) | i);
- *buff++ = lp->a.read_bcr(ioaddr, 34);
+ /* read bus configuration registers */
+ for (i = 0; i < 30; i++) {
+ *buff++ = a->read_bcr(ioaddr, i);
+ }
+ *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */
+ for (i = 31; i < 36; i++) {
+ *buff++ = a->read_bcr(ioaddr, i);
}
- }
- if (!(csr0 & 0x0004)) { /* If not stopped */
- /* clear SUSPEND (SPND) - CSR5 bit 0 */
- a->write_csr(ioaddr, 5, 0x0000);
- }
+ /* read mii phy registers */
+ if (lp->mii) {
+ int j;
+ for (j = 0; j < PCNET32_MAX_PHYS; j++) {
+ if (lp->phymask & (1 << j)) {
+ for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
+ lp->a.write_bcr(ioaddr, 33,
+ (j << 5) | i);
+ *buff++ = lp->a.read_bcr(ioaddr, 34);
+ }
+ }
+ }
+ }
- i = buff - (u16 *)ptr;
- for (; i < PCNET32_NUM_REGS; i++)
- *buff++ = 0;
+ if (!(csr0 & 0x0004)) { /* If not stopped */
+ /* clear SUSPEND (SPND) - CSR5 bit 0 */
+ a->write_csr(ioaddr, 5, 0x0000);
+ }
- spin_unlock_irqrestore(&lp->lock, flags);
+ spin_unlock_irqrestore(&lp->lock, flags);
}
static struct ethtool_ops pcnet32_ethtool_ops = {
- .get_settings = pcnet32_get_settings,
- .set_settings = pcnet32_set_settings,
- .get_drvinfo = pcnet32_get_drvinfo,
- .get_msglevel = pcnet32_get_msglevel,
- .set_msglevel = pcnet32_set_msglevel,
- .nway_reset = pcnet32_nway_reset,
- .get_link = pcnet32_get_link,
- .get_ringparam = pcnet32_get_ringparam,
- .set_ringparam = pcnet32_set_ringparam,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .get_tso = ethtool_op_get_tso,
- .get_strings = pcnet32_get_strings,
- .self_test_count = pcnet32_self_test_count,
- .self_test = pcnet32_ethtool_test,
- .phys_id = pcnet32_phys_id,
- .get_regs_len = pcnet32_get_regs_len,
- .get_regs = pcnet32_get_regs,
- .get_perm_addr = ethtool_op_get_perm_addr,
+ .get_settings = pcnet32_get_settings,
+ .set_settings = pcnet32_set_settings,
+ .get_drvinfo = pcnet32_get_drvinfo,
+ .get_msglevel = pcnet32_get_msglevel,
+ .set_msglevel = pcnet32_set_msglevel,
+ .nway_reset = pcnet32_nway_reset,
+ .get_link = pcnet32_get_link,
+ .get_ringparam = pcnet32_get_ringparam,
+ .set_ringparam = pcnet32_set_ringparam,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .get_tso = ethtool_op_get_tso,
+ .get_strings = pcnet32_get_strings,
+ .self_test_count = pcnet32_self_test_count,
+ .self_test = pcnet32_ethtool_test,
+ .phys_id = pcnet32_phys_id,
+ .get_regs_len = pcnet32_get_regs_len,
+ .get_regs = pcnet32_get_regs,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
/* only probes for non-PCI devices, the rest are handled by
* pci_register_driver via pcnet32_probe_pci */
-static void __devinit
-pcnet32_probe_vlbus(void)
+static void __devinit pcnet32_probe_vlbus(void)
{
- unsigned int *port, ioaddr;
-
- /* search for PCnet32 VLB cards at known addresses */
- for (port = pcnet32_portlist; (ioaddr = *port); port++) {
- if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) {
- /* check if there is really a pcnet chip on that ioaddr */
- if ((inb(ioaddr + 14) == 0x57) && (inb(ioaddr + 15) == 0x57)) {
- pcnet32_probe1(ioaddr, 0, NULL);
- } else {
- release_region(ioaddr, PCNET32_TOTAL_SIZE);
- }
- }
- }
+ unsigned int *port, ioaddr;
+
+ /* search for PCnet32 VLB cards at known addresses */
+ for (port = pcnet32_portlist; (ioaddr = *port); port++) {
+ if (request_region
+ (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) {
+ /* check if there is really a pcnet chip on that ioaddr */
+ if ((inb(ioaddr + 14) == 0x57)
+ && (inb(ioaddr + 15) == 0x57)) {
+ pcnet32_probe1(ioaddr, 0, NULL);
+ } else {
+ release_region(ioaddr, PCNET32_TOTAL_SIZE);
+ }
+ }
+ }
}
-
static int __devinit
pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- unsigned long ioaddr;
- int err;
-
- err = pci_enable_device(pdev);
- if (err < 0) {
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "failed to enable device -- err=%d\n", err);
- return err;
- }
- pci_set_master(pdev);
+ unsigned long ioaddr;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err < 0) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_ERR PFX
+ "failed to enable device -- err=%d\n", err);
+ return err;
+ }
+ pci_set_master(pdev);
+
+ ioaddr = pci_resource_start(pdev, 0);
+ if (!ioaddr) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_ERR PFX
+ "card has no PCI IO resources, aborting\n");
+ return -ENODEV;
+ }
- ioaddr = pci_resource_start (pdev, 0);
- if (!ioaddr) {
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk (KERN_ERR PFX "card has no PCI IO resources, aborting\n");
- return -ENODEV;
- }
+ if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_ERR PFX
+ "architecture does not support 32bit PCI busmaster DMA\n");
+ return -ENODEV;
+ }
+ if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") ==
+ NULL) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_ERR PFX
+ "io address range already allocated\n");
+ return -EBUSY;
+ }
- if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) {
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "architecture does not support 32bit PCI busmaster DMA\n");
- return -ENODEV;
- }
- if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") == NULL) {
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "io address range already allocated\n");
- return -EBUSY;
- }
-
- err = pcnet32_probe1(ioaddr, 1, pdev);
- if (err < 0) {
- pci_disable_device(pdev);
- }
- return err;
+ err = pcnet32_probe1(ioaddr, 1, pdev);
+ if (err < 0) {
+ pci_disable_device(pdev);
+ }
+ return err;
}
-
/* pcnet32_probe1
* Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
* pdev will be NULL when called from pcnet32_probe_vlbus.
@@ -1107,630 +1028,764 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
static int __devinit
pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
{
- struct pcnet32_private *lp;
- dma_addr_t lp_dma_addr;
- int i, media;
- int fdx, mii, fset, dxsuflo;
- int chip_version;
- char *chipname;
- struct net_device *dev;
- struct pcnet32_access *a = NULL;
- u8 promaddr[6];
- int ret = -ENODEV;
-
- /* reset the chip */
- pcnet32_wio_reset(ioaddr);
-
- /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */
- if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
- a = &pcnet32_wio;
- } else {
- pcnet32_dwio_reset(ioaddr);
- if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 && pcnet32_dwio_check(ioaddr)) {
- a = &pcnet32_dwio;
- } else
- goto err_release_region;
- }
-
- chip_version = a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr,89) << 16);
- if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
- printk(KERN_INFO " PCnet chip version is %#x.\n", chip_version);
- if ((chip_version & 0xfff) != 0x003) {
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO PFX "Unsupported chip version.\n");
- goto err_release_region;
- }
-
- /* initialize variables */
- fdx = mii = fset = dxsuflo = 0;
- chip_version = (chip_version >> 12) & 0xffff;
-
- switch (chip_version) {
- case 0x2420:
- chipname = "PCnet/PCI 79C970"; /* PCI */
- break;
- case 0x2430:
- if (shared)
- chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */
- else
- chipname = "PCnet/32 79C965"; /* 486/VL bus */
- break;
- case 0x2621:
- chipname = "PCnet/PCI II 79C970A"; /* PCI */
- fdx = 1;
- break;
- case 0x2623:
- chipname = "PCnet/FAST 79C971"; /* PCI */
- fdx = 1; mii = 1; fset = 1;
- break;
- case 0x2624:
- chipname = "PCnet/FAST+ 79C972"; /* PCI */
- fdx = 1; mii = 1; fset = 1;
- break;
- case 0x2625:
- chipname = "PCnet/FAST III 79C973"; /* PCI */
- fdx = 1; mii = 1;
- break;
- case 0x2626:
- chipname = "PCnet/Home 79C978"; /* PCI */
- fdx = 1;
+ struct pcnet32_private *lp;
+ dma_addr_t lp_dma_addr;
+ int i, media;
+ int fdx, mii, fset, dxsuflo;
+ int chip_version;
+ char *chipname;
+ struct net_device *dev;
+ struct pcnet32_access *a = NULL;
+ u8 promaddr[6];
+ int ret = -ENODEV;
+
+ /* reset the chip */
+ pcnet32_wio_reset(ioaddr);
+
+ /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */
+ if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
+ a = &pcnet32_wio;
+ } else {
+ pcnet32_dwio_reset(ioaddr);
+ if (pcnet32_dwio_read_csr(ioaddr, 0) == 4
+ && pcnet32_dwio_check(ioaddr)) {
+ a = &pcnet32_dwio;
+ } else
+ goto err_release_region;
+ }
+
+ chip_version =
+ a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
+ if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
+ printk(KERN_INFO " PCnet chip version is %#x.\n",
+ chip_version);
+ if ((chip_version & 0xfff) != 0x003) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_INFO PFX "Unsupported chip version.\n");
+ goto err_release_region;
+ }
+
+ /* initialize variables */
+ fdx = mii = fset = dxsuflo = 0;
+ chip_version = (chip_version >> 12) & 0xffff;
+
+ switch (chip_version) {
+ case 0x2420:
+ chipname = "PCnet/PCI 79C970"; /* PCI */
+ break;
+ case 0x2430:
+ if (shared)
+ chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */
+ else
+ chipname = "PCnet/32 79C965"; /* 486/VL bus */
+ break;
+ case 0x2621:
+ chipname = "PCnet/PCI II 79C970A"; /* PCI */
+ fdx = 1;
+ break;
+ case 0x2623:
+ chipname = "PCnet/FAST 79C971"; /* PCI */
+ fdx = 1;
+ mii = 1;
+ fset = 1;
+ break;
+ case 0x2624:
+ chipname = "PCnet/FAST+ 79C972"; /* PCI */
+ fdx = 1;
+ mii = 1;
+ fset = 1;
+ break;
+ case 0x2625:
+ chipname = "PCnet/FAST III 79C973"; /* PCI */
+ fdx = 1;
+ mii = 1;
+ break;
+ case 0x2626:
+ chipname = "PCnet/Home 79C978"; /* PCI */
+ fdx = 1;
+ /*
+ * This is based on specs published at www.amd.com. This section
+ * assumes that a card with a 79C978 wants to go into standard
+ * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode,
+ * and the module option homepna=1 can select this instead.
+ */
+ media = a->read_bcr(ioaddr, 49);
+ media &= ~3; /* default to 10Mb ethernet */
+ if (cards_found < MAX_UNITS && homepna[cards_found])
+ media |= 1; /* switch to home wiring mode */
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_DEBUG PFX "media set to %sMbit mode.\n",
+ (media & 1) ? "1" : "10");
+ a->write_bcr(ioaddr, 49, media);
+ break;
+ case 0x2627:
+ chipname = "PCnet/FAST III 79C975"; /* PCI */
+ fdx = 1;
+ mii = 1;
+ break;
+ case 0x2628:
+ chipname = "PCnet/PRO 79C976";
+ fdx = 1;
+ mii = 1;
+ break;
+ default:
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_INFO PFX
+ "PCnet version %#x, no PCnet32 chip.\n",
+ chip_version);
+ goto err_release_region;
+ }
+
/*
- * This is based on specs published at www.amd.com. This section
- * assumes that a card with a 79C978 wants to go into standard
- * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode,
- * and the module option homepna=1 can select this instead.
+ * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
+ * starting until the packet is loaded. Strike one for reliability, lose
+ * one for latency - although on PCI this isnt a big loss. Older chips
+ * have FIFO's smaller than a packet, so you can't do this.
+ * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
*/
- media = a->read_bcr(ioaddr, 49);
- media &= ~3; /* default to 10Mb ethernet */
- if (cards_found < MAX_UNITS && homepna[cards_found])
- media |= 1; /* switch to home wiring mode */
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_DEBUG PFX "media set to %sMbit mode.\n",
- (media & 1) ? "1" : "10");
- a->write_bcr(ioaddr, 49, media);
- break;
- case 0x2627:
- chipname = "PCnet/FAST III 79C975"; /* PCI */
- fdx = 1; mii = 1;
- break;
- case 0x2628:
- chipname = "PCnet/PRO 79C976";
- fdx = 1; mii = 1;
- break;
- default:
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO PFX "PCnet version %#x, no PCnet32 chip.\n",
- chip_version);
- goto err_release_region;
- }
-
- /*
- * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
- * starting until the packet is loaded. Strike one for reliability, lose
- * one for latency - although on PCI this isnt a big loss. Older chips
- * have FIFO's smaller than a packet, so you can't do this.
- * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
- */
-
- if (fset) {
- a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860));
- a->write_csr(ioaddr, 80, (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
- dxsuflo = 1;
- }
-
- dev = alloc_etherdev(0);
- if (!dev) {
+
+ if (fset) {
+ a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860));
+ a->write_csr(ioaddr, 80,
+ (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
+ dxsuflo = 1;
+ }
+
+ dev = alloc_etherdev(0);
+ if (!dev) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_ERR PFX "Memory allocation failed.\n");
+ ret = -ENOMEM;
+ goto err_release_region;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "Memory allocation failed.\n");
- ret = -ENOMEM;
- goto err_release_region;
- }
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr);
-
- /* In most chips, after a chip reset, the ethernet address is read from the
- * station address PROM at the base address and programmed into the
- * "Physical Address Registers" CSR12-14.
- * As a precautionary measure, we read the PROM values and complain if
- * they disagree with the CSRs. Either way, we use the CSR values, and
- * double check that they are valid.
- */
- for (i = 0; i < 3; i++) {
- unsigned int val;
- val = a->read_csr(ioaddr, i+12) & 0x0ffff;
- /* There may be endianness issues here. */
- dev->dev_addr[2*i] = val & 0x0ff;
- dev->dev_addr[2*i+1] = (val >> 8) & 0x0ff;
- }
-
- /* read PROM address and compare with CSR address */
- for (i = 0; i < 6; i++)
- promaddr[i] = inb(ioaddr + i);
-
- if (memcmp(promaddr, dev->dev_addr, 6)
- || !is_valid_ether_addr(dev->dev_addr)) {
- if (is_valid_ether_addr(promaddr)) {
- if (pcnet32_debug & NETIF_MSG_PROBE) {
- printk(" warning: CSR address invalid,\n");
- printk(KERN_INFO " using instead PROM address of");
- }
- memcpy(dev->dev_addr, promaddr, 6);
- }
- }
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
-
- /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
- if (!is_valid_ether_addr(dev->perm_addr))
- memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
-
- if (pcnet32_debug & NETIF_MSG_PROBE) {
+ printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr);
+
+ /* In most chips, after a chip reset, the ethernet address is read from the
+ * station address PROM at the base address and programmed into the
+ * "Physical Address Registers" CSR12-14.
+ * As a precautionary measure, we read the PROM values and complain if
+ * they disagree with the CSRs. Either way, we use the CSR values, and
+ * double check that they are valid.
+ */
+ for (i = 0; i < 3; i++) {
+ unsigned int val;
+ val = a->read_csr(ioaddr, i + 12) & 0x0ffff;
+ /* There may be endianness issues here. */
+ dev->dev_addr[2 * i] = val & 0x0ff;
+ dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff;
+ }
+
+ /* read PROM address and compare with CSR address */
for (i = 0; i < 6; i++)
- printk(" %2.2x", dev->dev_addr[i]);
-
- /* Version 0x2623 and 0x2624 */
- if (((chip_version + 1) & 0xfffe) == 0x2624) {
- i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
- printk("\n" KERN_INFO " tx_start_pt(0x%04x):",i);
- switch(i>>10) {
- case 0: printk(" 20 bytes,"); break;
- case 1: printk(" 64 bytes,"); break;
- case 2: printk(" 128 bytes,"); break;
- case 3: printk("~220 bytes,"); break;
- }
- i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
- printk(" BCR18(%x):",i&0xffff);
- if (i & (1<<5)) printk("BurstWrEn ");
- if (i & (1<<6)) printk("BurstRdEn ");
- if (i & (1<<7)) printk("DWordIO ");
- if (i & (1<<11)) printk("NoUFlow ");
- i = a->read_bcr(ioaddr, 25);
- printk("\n" KERN_INFO " SRAMSIZE=0x%04x,",i<<8);
- i = a->read_bcr(ioaddr, 26);
- printk(" SRAM_BND=0x%04x,",i<<8);
- i = a->read_bcr(ioaddr, 27);
- if (i & (1<<14)) printk("LowLatRx");
- }
- }
-
- dev->base_addr = ioaddr;
- /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
- if ((lp = pci_alloc_consistent(pdev, sizeof(*lp), &lp_dma_addr)) == NULL) {
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "Consistent memory allocation failed.\n");
- ret = -ENOMEM;
- goto err_free_netdev;
- }
-
- memset(lp, 0, sizeof(*lp));
- lp->dma_addr = lp_dma_addr;
- lp->pci_dev = pdev;
-
- spin_lock_init(&lp->lock);
-
- SET_MODULE_OWNER(dev);
- SET_NETDEV_DEV(dev, &pdev->dev);
- dev->priv = lp;
- lp->name = chipname;
- lp->shared_irq = shared;
- lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
- lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */
- lp->tx_mod_mask = lp->tx_ring_size - 1;
- lp->rx_mod_mask = lp->rx_ring_size - 1;
- lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
- lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
- lp->mii_if.full_duplex = fdx;
- lp->mii_if.phy_id_mask = 0x1f;
- lp->mii_if.reg_num_mask = 0x1f;
- lp->dxsuflo = dxsuflo;
- lp->mii = mii;
- lp->msg_enable = pcnet32_debug;
- if ((cards_found >= MAX_UNITS) || (options[cards_found] > sizeof(options_mapping)))
- lp->options = PCNET32_PORT_ASEL;
- else
- lp->options = options_mapping[options[cards_found]];
- lp->mii_if.dev = dev;
- lp->mii_if.mdio_read = mdio_read;
- lp->mii_if.mdio_write = mdio_write;
-
- if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
- ((cards_found>=MAX_UNITS) || full_duplex[cards_found]))
- lp->options |= PCNET32_PORT_FD;
-
- if (!a) {
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "No access methods\n");
- ret = -ENODEV;
- goto err_free_consistent;
- }
- lp->a = *a;
-
- /* prior to register_netdev, dev->name is not yet correct */
- if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
- ret = -ENOMEM;
- goto err_free_ring;
- }
- /* detect special T1/E1 WAN card by checking for MAC address */
- if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0
+ promaddr[i] = inb(ioaddr + i);
+
+ if (memcmp(promaddr, dev->dev_addr, 6)
+ || !is_valid_ether_addr(dev->dev_addr)) {
+ if (is_valid_ether_addr(promaddr)) {
+ if (pcnet32_debug & NETIF_MSG_PROBE) {
+ printk(" warning: CSR address invalid,\n");
+ printk(KERN_INFO
+ " using instead PROM address of");
+ }
+ memcpy(dev->dev_addr, promaddr, 6);
+ }
+ }
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
+ if (!is_valid_ether_addr(dev->perm_addr))
+ memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
+
+ if (pcnet32_debug & NETIF_MSG_PROBE) {
+ for (i = 0; i < 6; i++)
+ printk(" %2.2x", dev->dev_addr[i]);
+
+ /* Version 0x2623 and 0x2624 */
+ if (((chip_version + 1) & 0xfffe) == 0x2624) {
+ i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
+ printk("\n" KERN_INFO " tx_start_pt(0x%04x):", i);
+ switch (i >> 10) {
+ case 0:
+ printk(" 20 bytes,");
+ break;
+ case 1:
+ printk(" 64 bytes,");
+ break;
+ case 2:
+ printk(" 128 bytes,");
+ break;
+ case 3:
+ printk("~220 bytes,");
+ break;
+ }
+ i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
+ printk(" BCR18(%x):", i & 0xffff);
+ if (i & (1 << 5))
+ printk("BurstWrEn ");
+ if (i & (1 << 6))
+ printk("BurstRdEn ");
+ if (i & (1 << 7))
+ printk("DWordIO ");
+ if (i & (1 << 11))
+ printk("NoUFlow ");
+ i = a->read_bcr(ioaddr, 25);
+ printk("\n" KERN_INFO " SRAMSIZE=0x%04x,", i << 8);
+ i = a->read_bcr(ioaddr, 26);
+ printk(" SRAM_BND=0x%04x,", i << 8);
+ i = a->read_bcr(ioaddr, 27);
+ if (i & (1 << 14))
+ printk("LowLatRx");
+ }
+ }
+
+ dev->base_addr = ioaddr;
+ /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
+ if ((lp =
+ pci_alloc_consistent(pdev, sizeof(*lp), &lp_dma_addr)) == NULL) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_ERR PFX
+ "Consistent memory allocation failed.\n");
+ ret = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ memset(lp, 0, sizeof(*lp));
+ lp->dma_addr = lp_dma_addr;
+ lp->pci_dev = pdev;
+
+ spin_lock_init(&lp->lock);
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->priv = lp;
+ lp->name = chipname;
+ lp->shared_irq = shared;
+ lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
+ lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */
+ lp->tx_mod_mask = lp->tx_ring_size - 1;
+ lp->rx_mod_mask = lp->rx_ring_size - 1;
+ lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
+ lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
+ lp->mii_if.full_duplex = fdx;
+ lp->mii_if.phy_id_mask = 0x1f;
+ lp->mii_if.reg_num_mask = 0x1f;
+ lp->dxsuflo = dxsuflo;
+ lp->mii = mii;
+ lp->msg_enable = pcnet32_debug;
+ if ((cards_found >= MAX_UNITS)
+ || (options[cards_found] > sizeof(options_mapping)))
+ lp->options = PCNET32_PORT_ASEL;
+ else
+ lp->options = options_mapping[options[cards_found]];
+ lp->mii_if.dev = dev;
+ lp->mii_if.mdio_read = mdio_read;
+ lp->mii_if.mdio_write = mdio_write;
+
+ if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
+ ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
+ lp->options |= PCNET32_PORT_FD;
+
+ if (!a) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_ERR PFX "No access methods\n");
+ ret = -ENODEV;
+ goto err_free_consistent;
+ }
+ lp->a = *a;
+
+ /* prior to register_netdev, dev->name is not yet correct */
+ if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
+ ret = -ENOMEM;
+ goto err_free_ring;
+ }
+ /* detect special T1/E1 WAN card by checking for MAC address */
+ if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0
&& dev->dev_addr[2] == 0x75)
- lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
-
- lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
- lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
- for (i = 0; i < 6; i++)
- lp->init_block.phys_addr[i] = dev->dev_addr[i];
- lp->init_block.filter[0] = 0x00000000;
- lp->init_block.filter[1] = 0x00000000;
- lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr);
- lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr);
-
- /* switch pcnet32 to 32bit mode */
- a->write_bcr(ioaddr, 20, 2);
-
- a->write_csr(ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private,
- init_block)) & 0xffff);
- a->write_csr(ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private,
- init_block)) >> 16);
-
- if (pdev) { /* use the IRQ provided by PCI */
- dev->irq = pdev->irq;
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(" assigned IRQ %d.\n", dev->irq);
- } else {
- unsigned long irq_mask = probe_irq_on();
+ lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
- /*
- * To auto-IRQ we enable the initialization-done and DMA error
- * interrupts. For ISA boards we get a DMA error, but VLB and PCI
- * boards will work.
- */
- /* Trigger an initialization just for the interrupt. */
- a->write_csr (ioaddr, 0, 0x41);
- mdelay (1);
+ lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
+ lp->init_block.tlen_rlen =
+ le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ lp->init_block.rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr);
+ lp->init_block.tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr);
+
+ /* switch pcnet32 to 32bit mode */
+ a->write_bcr(ioaddr, 20, 2);
+
+ a->write_csr(ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private,
+ init_block)) & 0xffff);
+ a->write_csr(ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private,
+ init_block)) >> 16);
+
+ if (pdev) { /* use the IRQ provided by PCI */
+ dev->irq = pdev->irq;
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(" assigned IRQ %d.\n", dev->irq);
+ } else {
+ unsigned long irq_mask = probe_irq_on();
+
+ /*
+ * To auto-IRQ we enable the initialization-done and DMA error
+ * interrupts. For ISA boards we get a DMA error, but VLB and PCI
+ * boards will work.
+ */
+ /* Trigger an initialization just for the interrupt. */
+ a->write_csr(ioaddr, 0, 0x41);
+ mdelay(1);
+
+ dev->irq = probe_irq_off(irq_mask);
+ if (!dev->irq) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(", failed to detect IRQ line.\n");
+ ret = -ENODEV;
+ goto err_free_ring;
+ }
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(", probed IRQ %d.\n", dev->irq);
+ }
- dev->irq = probe_irq_off (irq_mask);
- if (!dev->irq) {
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(", failed to detect IRQ line.\n");
- ret = -ENODEV;
- goto err_free_ring;
+ /* Set the mii phy_id so that we can query the link state */
+ if (lp->mii) {
+ /* lp->phycount and lp->phymask are set to 0 by memset above */
+
+ lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
+ /* scan for PHYs */
+ for (i = 0; i < PCNET32_MAX_PHYS; i++) {
+ unsigned short id1, id2;
+
+ id1 = mdio_read(dev, i, MII_PHYSID1);
+ if (id1 == 0xffff)
+ continue;
+ id2 = mdio_read(dev, i, MII_PHYSID2);
+ if (id2 == 0xffff)
+ continue;
+ if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624)
+ continue; /* 79C971 & 79C972 have phantom phy at id 31 */
+ lp->phycount++;
+ lp->phymask |= (1 << i);
+ lp->mii_if.phy_id = i;
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_INFO PFX
+ "Found PHY %04x:%04x at address %d.\n",
+ id1, id2, i);
+ }
+ lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
+ if (lp->phycount > 1) {
+ lp->options |= PCNET32_PORT_MII;
+ }
}
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(", probed IRQ %d.\n", dev->irq);
- }
-
- /* Set the mii phy_id so that we can query the link state */
- if (lp->mii)
- lp->mii_if.phy_id = ((lp->a.read_bcr (ioaddr, 33)) >> 5) & 0x1f;
-
- init_timer (&lp->watchdog_timer);
- lp->watchdog_timer.data = (unsigned long) dev;
- lp->watchdog_timer.function = (void *) &pcnet32_watchdog;
-
- /* The PCNET32-specific entries in the device structure. */
- dev->open = &pcnet32_open;
- dev->hard_start_xmit = &pcnet32_start_xmit;
- dev->stop = &pcnet32_close;
- dev->get_stats = &pcnet32_get_stats;
- dev->set_multicast_list = &pcnet32_set_multicast_list;
- dev->do_ioctl = &pcnet32_ioctl;
- dev->ethtool_ops = &pcnet32_ethtool_ops;
- dev->tx_timeout = pcnet32_tx_timeout;
- dev->watchdog_timeo = (5*HZ);
+
+ init_timer(&lp->watchdog_timer);
+ lp->watchdog_timer.data = (unsigned long)dev;
+ lp->watchdog_timer.function = (void *)&pcnet32_watchdog;
+
+ /* The PCNET32-specific entries in the device structure. */
+ dev->open = &pcnet32_open;
+ dev->hard_start_xmit = &pcnet32_start_xmit;
+ dev->stop = &pcnet32_close;
+ dev->get_stats = &pcnet32_get_stats;
+ dev->set_multicast_list = &pcnet32_set_multicast_list;
+ dev->do_ioctl = &pcnet32_ioctl;
+ dev->ethtool_ops = &pcnet32_ethtool_ops;
+ dev->tx_timeout = pcnet32_tx_timeout;
+ dev->watchdog_timeo = (5 * HZ);
#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = pcnet32_poll_controller;
+ dev->poll_controller = pcnet32_poll_controller;
#endif
- /* Fill in the generic fields of the device structure. */
- if (register_netdev(dev))
- goto err_free_ring;
-
- if (pdev) {
- pci_set_drvdata(pdev, dev);
- } else {
- lp->next = pcnet32_dev;
- pcnet32_dev = dev;
- }
-
- if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name);
- cards_found++;
-
- /* enable LED writes */
- a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000);
-
- return 0;
-
-err_free_ring:
- pcnet32_free_ring(dev);
-err_free_consistent:
- pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
-err_free_netdev:
- free_netdev(dev);
-err_release_region:
- release_region(ioaddr, PCNET32_TOTAL_SIZE);
- return ret;
-}
+ /* Fill in the generic fields of the device structure. */
+ if (register_netdev(dev))
+ goto err_free_ring;
+
+ if (pdev) {
+ pci_set_drvdata(pdev, dev);
+ } else {
+ lp->next = pcnet32_dev;
+ pcnet32_dev = dev;
+ }
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name);
+ cards_found++;
+
+ /* enable LED writes */
+ a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000);
+
+ return 0;
+
+ err_free_ring:
+ pcnet32_free_ring(dev);
+ err_free_consistent:
+ pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
+ err_free_netdev:
+ free_netdev(dev);
+ err_release_region:
+ release_region(ioaddr, PCNET32_TOTAL_SIZE);
+ return ret;
+}
/* if any allocation fails, caller must also call pcnet32_free_ring */
static int pcnet32_alloc_ring(struct net_device *dev, char *name)
{
- struct pcnet32_private *lp = dev->priv;
+ struct pcnet32_private *lp = dev->priv;
- lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
- sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
- &lp->tx_ring_dma_addr);
- if (lp->tx_ring == NULL) {
- if (pcnet32_debug & NETIF_MSG_DRV)
- printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n",
- name);
- return -ENOMEM;
- }
-
- lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
- sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
- &lp->rx_ring_dma_addr);
- if (lp->rx_ring == NULL) {
- if (pcnet32_debug & NETIF_MSG_DRV)
- printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n",
- name);
- return -ENOMEM;
- }
-
- lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size,
- GFP_ATOMIC);
- if (!lp->tx_dma_addr) {
- if (pcnet32_debug & NETIF_MSG_DRV)
- printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
- return -ENOMEM;
- }
- memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size);
-
- lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size,
- GFP_ATOMIC);
- if (!lp->rx_dma_addr) {
- if (pcnet32_debug & NETIF_MSG_DRV)
- printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
- return -ENOMEM;
- }
- memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size);
-
- lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size,
- GFP_ATOMIC);
- if (!lp->tx_skbuff) {
- if (pcnet32_debug & NETIF_MSG_DRV)
- printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
- return -ENOMEM;
- }
- memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size);
-
- lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size,
- GFP_ATOMIC);
- if (!lp->rx_skbuff) {
- if (pcnet32_debug & NETIF_MSG_DRV)
- printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
- return -ENOMEM;
- }
- memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size);
+ lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_tx_head) *
+ lp->tx_ring_size,
+ &lp->tx_ring_dma_addr);
+ if (lp->tx_ring == NULL) {
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk("\n" KERN_ERR PFX
+ "%s: Consistent memory allocation failed.\n",
+ name);
+ return -ENOMEM;
+ }
- return 0;
-}
+ lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_rx_head) *
+ lp->rx_ring_size,
+ &lp->rx_ring_dma_addr);
+ if (lp->rx_ring == NULL) {
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk("\n" KERN_ERR PFX
+ "%s: Consistent memory allocation failed.\n",
+ name);
+ return -ENOMEM;
+ }
+ lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size,
+ GFP_ATOMIC);
+ if (!lp->tx_dma_addr) {
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk("\n" KERN_ERR PFX
+ "%s: Memory allocation failed.\n", name);
+ return -ENOMEM;
+ }
+ memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size);
+
+ lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size,
+ GFP_ATOMIC);
+ if (!lp->rx_dma_addr) {
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk("\n" KERN_ERR PFX
+ "%s: Memory allocation failed.\n", name);
+ return -ENOMEM;
+ }
+ memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size);
+
+ lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size,
+ GFP_ATOMIC);
+ if (!lp->tx_skbuff) {
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk("\n" KERN_ERR PFX
+ "%s: Memory allocation failed.\n", name);
+ return -ENOMEM;
+ }
+ memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size);
+
+ lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size,
+ GFP_ATOMIC);
+ if (!lp->rx_skbuff) {
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk("\n" KERN_ERR PFX
+ "%s: Memory allocation failed.\n", name);
+ return -ENOMEM;
+ }
+ memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size);
+
+ return 0;
+}
static void pcnet32_free_ring(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
+ struct pcnet32_private *lp = dev->priv;
- kfree(lp->tx_skbuff);
- lp->tx_skbuff = NULL;
+ kfree(lp->tx_skbuff);
+ lp->tx_skbuff = NULL;
- kfree(lp->rx_skbuff);
- lp->rx_skbuff = NULL;
+ kfree(lp->rx_skbuff);
+ lp->rx_skbuff = NULL;
- kfree(lp->tx_dma_addr);
- lp->tx_dma_addr = NULL;
+ kfree(lp->tx_dma_addr);
+ lp->tx_dma_addr = NULL;
- kfree(lp->rx_dma_addr);
- lp->rx_dma_addr = NULL;
+ kfree(lp->rx_dma_addr);
+ lp->rx_dma_addr = NULL;
- if (lp->tx_ring) {
- pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
- lp->tx_ring, lp->tx_ring_dma_addr);
- lp->tx_ring = NULL;
- }
+ if (lp->tx_ring) {
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_tx_head) *
+ lp->tx_ring_size, lp->tx_ring,
+ lp->tx_ring_dma_addr);
+ lp->tx_ring = NULL;
+ }
- if (lp->rx_ring) {
- pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
- lp->rx_ring, lp->rx_ring_dma_addr);
- lp->rx_ring = NULL;
- }
+ if (lp->rx_ring) {
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_rx_head) *
+ lp->rx_ring_size, lp->rx_ring,
+ lp->rx_ring_dma_addr);
+ lp->rx_ring = NULL;
+ }
}
-
-static int
-pcnet32_open(struct net_device *dev)
+static int pcnet32_open(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long ioaddr = dev->base_addr;
- u16 val;
- int i;
- int rc;
- unsigned long flags;
-
- if (request_irq(dev->irq, &pcnet32_interrupt,
- lp->shared_irq ? SA_SHIRQ : 0, dev->name, (void *)dev)) {
- return -EAGAIN;
- }
-
- spin_lock_irqsave(&lp->lock, flags);
- /* Check for a valid station address */
- if (!is_valid_ether_addr(dev->dev_addr)) {
- rc = -EINVAL;
- goto err_free_irq;
- }
-
- /* Reset the PCNET32 */
- lp->a.reset (ioaddr);
-
- /* switch pcnet32 to 32bit mode */
- lp->a.write_bcr (ioaddr, 20, 2);
-
- if (netif_msg_ifup(lp))
- printk(KERN_DEBUG "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
- dev->name, dev->irq,
- (u32) (lp->tx_ring_dma_addr),
- (u32) (lp->rx_ring_dma_addr),
- (u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block)));
-
- /* set/reset autoselect bit */
- val = lp->a.read_bcr (ioaddr, 2) & ~2;
- if (lp->options & PCNET32_PORT_ASEL)
- val |= 2;
- lp->a.write_bcr (ioaddr, 2, val);
-
- /* handle full duplex setting */
- if (lp->mii_if.full_duplex) {
- val = lp->a.read_bcr (ioaddr, 9) & ~3;
- if (lp->options & PCNET32_PORT_FD) {
- val |= 1;
- if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ u16 val;
+ int i;
+ int rc;
+ unsigned long flags;
+
+ if (request_irq(dev->irq, &pcnet32_interrupt,
+ lp->shared_irq ? SA_SHIRQ : 0, dev->name,
+ (void *)dev)) {
+ return -EAGAIN;
+ }
+
+ spin_lock_irqsave(&lp->lock, flags);
+ /* Check for a valid station address */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ rc = -EINVAL;
+ goto err_free_irq;
+ }
+
+ /* Reset the PCNET32 */
+ lp->a.reset(ioaddr);
+
+ /* switch pcnet32 to 32bit mode */
+ lp->a.write_bcr(ioaddr, 20, 2);
+
+ if (netif_msg_ifup(lp))
+ printk(KERN_DEBUG
+ "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
+ dev->name, dev->irq, (u32) (lp->tx_ring_dma_addr),
+ (u32) (lp->rx_ring_dma_addr),
+ (u32) (lp->dma_addr +
+ offsetof(struct pcnet32_private, init_block)));
+
+ /* set/reset autoselect bit */
+ val = lp->a.read_bcr(ioaddr, 2) & ~2;
+ if (lp->options & PCNET32_PORT_ASEL)
val |= 2;
- } else if (lp->options & PCNET32_PORT_ASEL) {
- /* workaround of xSeries250, turn on for 79C975 only */
- i = ((lp->a.read_csr(ioaddr, 88) |
- (lp->a.read_csr(ioaddr,89) << 16)) >> 12) & 0xffff;
- if (i == 0x2627)
- val |= 3;
- }
- lp->a.write_bcr (ioaddr, 9, val);
- }
-
- /* set/reset GPSI bit in test register */
- val = lp->a.read_csr (ioaddr, 124) & ~0x10;
- if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
- val |= 0x10;
- lp->a.write_csr (ioaddr, 124, val);
-
- /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
- if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT &&
+ lp->a.write_bcr(ioaddr, 2, val);
+
+ /* handle full duplex setting */
+ if (lp->mii_if.full_duplex) {
+ val = lp->a.read_bcr(ioaddr, 9) & ~3;
+ if (lp->options & PCNET32_PORT_FD) {
+ val |= 1;
+ if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
+ val |= 2;
+ } else if (lp->options & PCNET32_PORT_ASEL) {
+ /* workaround of xSeries250, turn on for 79C975 only */
+ i = ((lp->a.read_csr(ioaddr, 88) |
+ (lp->a.
+ read_csr(ioaddr, 89) << 16)) >> 12) & 0xffff;
+ if (i == 0x2627)
+ val |= 3;
+ }
+ lp->a.write_bcr(ioaddr, 9, val);
+ }
+
+ /* set/reset GPSI bit in test register */
+ val = lp->a.read_csr(ioaddr, 124) & ~0x10;
+ if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
+ val |= 0x10;
+ lp->a.write_csr(ioaddr, 124, val);
+
+ /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
+ if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT &&
(lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
- if (lp->options & PCNET32_PORT_ASEL) {
- lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
- if (netif_msg_link(lp))
- printk(KERN_DEBUG "%s: Setting 100Mb-Full Duplex.\n",
- dev->name);
- }
- }
- {
- /*
- * 24 Jun 2004 according AMD, in order to change the PHY,
- * DANAS (or DISPM for 79C976) must be set; then select the speed,
- * duplex, and/or enable auto negotiation, and clear DANAS
- */
- if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
- lp->a.write_bcr(ioaddr, 32,
- lp->a.read_bcr(ioaddr, 32) | 0x0080);
- /* disable Auto Negotiation, set 10Mpbs, HD */
- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
- if (lp->options & PCNET32_PORT_FD)
- val |= 0x10;
- if (lp->options & PCNET32_PORT_100)
- val |= 0x08;
- lp->a.write_bcr (ioaddr, 32, val);
+ if (lp->options & PCNET32_PORT_ASEL) {
+ lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
+ if (netif_msg_link(lp))
+ printk(KERN_DEBUG
+ "%s: Setting 100Mb-Full Duplex.\n",
+ dev->name);
+ }
+ }
+ if (lp->phycount < 2) {
+ /*
+ * 24 Jun 2004 according AMD, in order to change the PHY,
+ * DANAS (or DISPM for 79C976) must be set; then select the speed,
+ * duplex, and/or enable auto negotiation, and clear DANAS
+ */
+ if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
+ lp->a.write_bcr(ioaddr, 32,
+ lp->a.read_bcr(ioaddr, 32) | 0x0080);
+ /* disable Auto Negotiation, set 10Mpbs, HD */
+ val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
+ if (lp->options & PCNET32_PORT_FD)
+ val |= 0x10;
+ if (lp->options & PCNET32_PORT_100)
+ val |= 0x08;
+ lp->a.write_bcr(ioaddr, 32, val);
+ } else {
+ if (lp->options & PCNET32_PORT_ASEL) {
+ lp->a.write_bcr(ioaddr, 32,
+ lp->a.read_bcr(ioaddr,
+ 32) | 0x0080);
+ /* enable auto negotiate, setup, disable fd */
+ val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
+ val |= 0x20;
+ lp->a.write_bcr(ioaddr, 32, val);
+ }
+ }
} else {
- if (lp->options & PCNET32_PORT_ASEL) {
- lp->a.write_bcr(ioaddr, 32,
- lp->a.read_bcr(ioaddr, 32) | 0x0080);
- /* enable auto negotiate, setup, disable fd */
- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
- val |= 0x20;
- lp->a.write_bcr(ioaddr, 32, val);
- }
+ int first_phy = -1;
+ u16 bmcr;
+ u32 bcr9;
+ struct ethtool_cmd ecmd;
+
+ /*
+ * There is really no good other way to handle multiple PHYs
+ * other than turning off all automatics
+ */
+ val = lp->a.read_bcr(ioaddr, 2);
+ lp->a.write_bcr(ioaddr, 2, val & ~2);
+ val = lp->a.read_bcr(ioaddr, 32);
+ lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
+
+ if (!(lp->options & PCNET32_PORT_ASEL)) {
+ /* setup ecmd */
+ ecmd.port = PORT_MII;
+ ecmd.transceiver = XCVR_INTERNAL;
+ ecmd.autoneg = AUTONEG_DISABLE;
+ ecmd.speed =
+ lp->
+ options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
+ bcr9 = lp->a.read_bcr(ioaddr, 9);
+
+ if (lp->options & PCNET32_PORT_FD) {
+ ecmd.duplex = DUPLEX_FULL;
+ bcr9 |= (1 << 0);
+ } else {
+ ecmd.duplex = DUPLEX_HALF;
+ bcr9 |= ~(1 << 0);
+ }
+ lp->a.write_bcr(ioaddr, 9, bcr9);
+ }
+
+ for (i = 0; i < PCNET32_MAX_PHYS; i++) {
+ if (lp->phymask & (1 << i)) {
+ /* isolate all but the first PHY */
+ bmcr = mdio_read(dev, i, MII_BMCR);
+ if (first_phy == -1) {
+ first_phy = i;
+ mdio_write(dev, i, MII_BMCR,
+ bmcr & ~BMCR_ISOLATE);
+ } else {
+ mdio_write(dev, i, MII_BMCR,
+ bmcr | BMCR_ISOLATE);
+ }
+ /* use mii_ethtool_sset to setup PHY */
+ lp->mii_if.phy_id = i;
+ ecmd.phy_address = i;
+ if (lp->options & PCNET32_PORT_ASEL) {
+ mii_ethtool_gset(&lp->mii_if, &ecmd);
+ ecmd.autoneg = AUTONEG_ENABLE;
+ }
+ mii_ethtool_sset(&lp->mii_if, &ecmd);
+ }
+ }
+ lp->mii_if.phy_id = first_phy;
+ if (netif_msg_link(lp))
+ printk(KERN_INFO "%s: Using PHY number %d.\n",
+ dev->name, first_phy);
}
- }
#ifdef DO_DXSUFLO
- if (lp->dxsuflo) { /* Disable transmit stop on underflow */
- val = lp->a.read_csr (ioaddr, 3);
- val |= 0x40;
- lp->a.write_csr (ioaddr, 3, val);
- }
+ if (lp->dxsuflo) { /* Disable transmit stop on underflow */
+ val = lp->a.read_csr(ioaddr, 3);
+ val |= 0x40;
+ lp->a.write_csr(ioaddr, 3, val);
+ }
#endif
- lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
- pcnet32_load_multicast(dev);
-
- if (pcnet32_init_ring(dev)) {
- rc = -ENOMEM;
- goto err_free_ring;
- }
-
- /* Re-initialize the PCNET32, and start it when done. */
- lp->a.write_csr (ioaddr, 1, (lp->dma_addr +
- offsetof(struct pcnet32_private, init_block)) & 0xffff);
- lp->a.write_csr (ioaddr, 2, (lp->dma_addr +
- offsetof(struct pcnet32_private, init_block)) >> 16);
-
- lp->a.write_csr (ioaddr, 4, 0x0915);
- lp->a.write_csr (ioaddr, 0, 0x0001);
-
- netif_start_queue(dev);
-
- /* If we have mii, print the link status and start the watchdog */
- if (lp->mii) {
- mii_check_media (&lp->mii_if, netif_msg_link(lp), 1);
- mod_timer (&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
- }
-
- i = 0;
- while (i++ < 100)
- if (lp->a.read_csr (ioaddr, 0) & 0x0100)
- break;
- /*
- * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
- * reports that doing so triggers a bug in the '974.
- */
- lp->a.write_csr (ioaddr, 0, 0x0042);
-
- if (netif_msg_ifup(lp))
- printk(KERN_DEBUG "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
- dev->name, i, (u32) (lp->dma_addr +
- offsetof(struct pcnet32_private, init_block)),
- lp->a.read_csr(ioaddr, 0));
-
- spin_unlock_irqrestore(&lp->lock, flags);
-
- return 0; /* Always succeed */
-
-err_free_ring:
- /* free any allocated skbuffs */
- for (i = 0; i < lp->rx_ring_size; i++) {
- lp->rx_ring[i].status = 0;
- if (lp->rx_skbuff[i]) {
- pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2,
- PCI_DMA_FROMDEVICE);
- dev_kfree_skb(lp->rx_skbuff[i]);
- }
- lp->rx_skbuff[i] = NULL;
- lp->rx_dma_addr[i] = 0;
- }
-
- pcnet32_free_ring(dev);
-
- /*
- * Switch back to 16bit mode to avoid problems with dumb
- * DOS packet driver after a warm reboot
- */
- lp->a.write_bcr (ioaddr, 20, 4);
-
-err_free_irq:
- spin_unlock_irqrestore(&lp->lock, flags);
- free_irq(dev->irq, dev);
- return rc;
+ lp->init_block.mode =
+ le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
+ pcnet32_load_multicast(dev);
+
+ if (pcnet32_init_ring(dev)) {
+ rc = -ENOMEM;
+ goto err_free_ring;
+ }
+
+ /* Re-initialize the PCNET32, and start it when done. */
+ lp->a.write_csr(ioaddr, 1, (lp->dma_addr +
+ offsetof(struct pcnet32_private,
+ init_block)) & 0xffff);
+ lp->a.write_csr(ioaddr, 2,
+ (lp->dma_addr +
+ offsetof(struct pcnet32_private, init_block)) >> 16);
+
+ lp->a.write_csr(ioaddr, 4, 0x0915);
+ lp->a.write_csr(ioaddr, 0, 0x0001);
+
+ netif_start_queue(dev);
+
+ /* Print the link status and start the watchdog */
+ pcnet32_check_media(dev, 1);
+ mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
+
+ i = 0;
+ while (i++ < 100)
+ if (lp->a.read_csr(ioaddr, 0) & 0x0100)
+ break;
+ /*
+ * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
+ * reports that doing so triggers a bug in the '974.
+ */
+ lp->a.write_csr(ioaddr, 0, 0x0042);
+
+ if (netif_msg_ifup(lp))
+ printk(KERN_DEBUG
+ "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
+ dev->name, i,
+ (u32) (lp->dma_addr +
+ offsetof(struct pcnet32_private, init_block)),
+ lp->a.read_csr(ioaddr, 0));
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return 0; /* Always succeed */
+
+ err_free_ring:
+ /* free any allocated skbuffs */
+ for (i = 0; i < lp->rx_ring_size; i++) {
+ lp->rx_ring[i].status = 0;
+ if (lp->rx_skbuff[i]) {
+ pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
+ PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(lp->rx_skbuff[i]);
+ }
+ lp->rx_skbuff[i] = NULL;
+ lp->rx_dma_addr[i] = 0;
+ }
+
+ pcnet32_free_ring(dev);
+
+ /*
+ * Switch back to 16bit mode to avoid problems with dumb
+ * DOS packet driver after a warm reboot
+ */
+ lp->a.write_bcr(ioaddr, 20, 4);
+
+ err_free_irq:
+ spin_unlock_irqrestore(&lp->lock, flags);
+ free_irq(dev->irq, dev);
+ return rc;
}
/*
@@ -1746,727 +1801,893 @@ err_free_irq:
* restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
*/
-static void
-pcnet32_purge_tx_ring(struct net_device *dev)
+static void pcnet32_purge_tx_ring(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- int i;
-
- for (i = 0; i < lp->tx_ring_size; i++) {
- lp->tx_ring[i].status = 0; /* CPU owns buffer */
- wmb(); /* Make sure adapter sees owner change */
- if (lp->tx_skbuff[i]) {
- pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
- lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE);
- dev_kfree_skb_any(lp->tx_skbuff[i]);
- }
- lp->tx_skbuff[i] = NULL;
- lp->tx_dma_addr[i] = 0;
- }
-}
+ struct pcnet32_private *lp = dev->priv;
+ int i;
+ for (i = 0; i < lp->tx_ring_size; i++) {
+ lp->tx_ring[i].status = 0; /* CPU owns buffer */
+ wmb(); /* Make sure adapter sees owner change */
+ if (lp->tx_skbuff[i]) {
+ pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
+ lp->tx_skbuff[i]->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(lp->tx_skbuff[i]);
+ }
+ lp->tx_skbuff[i] = NULL;
+ lp->tx_dma_addr[i] = 0;
+ }
+}
/* Initialize the PCNET32 Rx and Tx rings. */
-static int
-pcnet32_init_ring(struct net_device *dev)
+static int pcnet32_init_ring(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- int i;
-
- lp->tx_full = 0;
- lp->cur_rx = lp->cur_tx = 0;
- lp->dirty_rx = lp->dirty_tx = 0;
-
- for (i = 0; i < lp->rx_ring_size; i++) {
- struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
- if (rx_skbuff == NULL) {
- if (!(rx_skbuff = lp->rx_skbuff[i] = dev_alloc_skb (PKT_BUF_SZ))) {
- /* there is not much, we can do at this point */
- if (pcnet32_debug & NETIF_MSG_DRV)
- printk(KERN_ERR "%s: pcnet32_init_ring dev_alloc_skb failed.\n",
- dev->name);
- return -1;
- }
- skb_reserve (rx_skbuff, 2);
- }
-
- rmb();
- if (lp->rx_dma_addr[i] == 0)
- lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->data,
- PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
- lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]);
- lp->rx_ring[i].buf_length = le16_to_cpu(2-PKT_BUF_SZ);
- wmb(); /* Make sure owner changes after all others are visible */
- lp->rx_ring[i].status = le16_to_cpu(0x8000);
- }
- /* The Tx buffer address is filled in as needed, but we do need to clear
- * the upper ownership bit. */
- for (i = 0; i < lp->tx_ring_size; i++) {
- lp->tx_ring[i].status = 0; /* CPU owns buffer */
- wmb(); /* Make sure adapter sees owner change */
- lp->tx_ring[i].base = 0;
- lp->tx_dma_addr[i] = 0;
- }
-
- lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
- for (i = 0; i < 6; i++)
- lp->init_block.phys_addr[i] = dev->dev_addr[i];
- lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr);
- lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr);
- wmb(); /* Make sure all changes are visible */
- return 0;
+ struct pcnet32_private *lp = dev->priv;
+ int i;
+
+ lp->tx_full = 0;
+ lp->cur_rx = lp->cur_tx = 0;
+ lp->dirty_rx = lp->dirty_tx = 0;
+
+ for (i = 0; i < lp->rx_ring_size; i++) {
+ struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
+ if (rx_skbuff == NULL) {
+ if (!
+ (rx_skbuff = lp->rx_skbuff[i] =
+ dev_alloc_skb(PKT_BUF_SZ))) {
+ /* there is not much, we can do at this point */
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk(KERN_ERR
+ "%s: pcnet32_init_ring dev_alloc_skb failed.\n",
+ dev->name);
+ return -1;
+ }
+ skb_reserve(rx_skbuff, 2);
+ }
+
+ rmb();
+ if (lp->rx_dma_addr[i] == 0)
+ lp->rx_dma_addr[i] =
+ pci_map_single(lp->pci_dev, rx_skbuff->data,
+ PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
+ lp->rx_ring[i].base = (u32) le32_to_cpu(lp->rx_dma_addr[i]);
+ lp->rx_ring[i].buf_length = le16_to_cpu(2 - PKT_BUF_SZ);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->rx_ring[i].status = le16_to_cpu(0x8000);
+ }
+ /* The Tx buffer address is filled in as needed, but we do need to clear
+ * the upper ownership bit. */
+ for (i = 0; i < lp->tx_ring_size; i++) {
+ lp->tx_ring[i].status = 0; /* CPU owns buffer */
+ wmb(); /* Make sure adapter sees owner change */
+ lp->tx_ring[i].base = 0;
+ lp->tx_dma_addr[i] = 0;
+ }
+
+ lp->init_block.tlen_rlen =
+ le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr);
+ lp->init_block.tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr);
+ wmb(); /* Make sure all changes are visible */
+ return 0;
}
/* the pcnet32 has been issued a stop or reset. Wait for the stop bit
* then flush the pending transmit operations, re-initialize the ring,
* and tell the chip to initialize.
*/
-static void
-pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
+static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long ioaddr = dev->base_addr;
- int i;
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ int i;
- /* wait for stop */
- for (i=0; i<100; i++)
- if (lp->a.read_csr(ioaddr, 0) & 0x0004)
- break;
+ /* wait for stop */
+ for (i = 0; i < 100; i++)
+ if (lp->a.read_csr(ioaddr, 0) & 0x0004)
+ break;
- if (i >= 100 && netif_msg_drv(lp))
- printk(KERN_ERR "%s: pcnet32_restart timed out waiting for stop.\n",
- dev->name);
+ if (i >= 100 && netif_msg_drv(lp))
+ printk(KERN_ERR
+ "%s: pcnet32_restart timed out waiting for stop.\n",
+ dev->name);
- pcnet32_purge_tx_ring(dev);
- if (pcnet32_init_ring(dev))
- return;
+ pcnet32_purge_tx_ring(dev);
+ if (pcnet32_init_ring(dev))
+ return;
- /* ReInit Ring */
- lp->a.write_csr (ioaddr, 0, 1);
- i = 0;
- while (i++ < 1000)
- if (lp->a.read_csr (ioaddr, 0) & 0x0100)
- break;
+ /* ReInit Ring */
+ lp->a.write_csr(ioaddr, 0, 1);
+ i = 0;
+ while (i++ < 1000)
+ if (lp->a.read_csr(ioaddr, 0) & 0x0100)
+ break;
- lp->a.write_csr (ioaddr, 0, csr0_bits);
+ lp->a.write_csr(ioaddr, 0, csr0_bits);
}
-
-static void
-pcnet32_tx_timeout (struct net_device *dev)
+static void pcnet32_tx_timeout(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long ioaddr = dev->base_addr, flags;
-
- spin_lock_irqsave(&lp->lock, flags);
- /* Transmitter timeout, serious problems. */
- if (pcnet32_debug & NETIF_MSG_DRV)
- printk(KERN_ERR "%s: transmit timed out, status %4.4x, resetting.\n",
- dev->name, lp->a.read_csr(ioaddr, 0));
- lp->a.write_csr (ioaddr, 0, 0x0004);
- lp->stats.tx_errors++;
- if (netif_msg_tx_err(lp)) {
- int i;
- printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
- lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
- lp->cur_rx);
- for (i = 0 ; i < lp->rx_ring_size; i++)
- printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
- le32_to_cpu(lp->rx_ring[i].base),
- (-le16_to_cpu(lp->rx_ring[i].buf_length)) & 0xffff,
- le32_to_cpu(lp->rx_ring[i].msg_length),
- le16_to_cpu(lp->rx_ring[i].status));
- for (i = 0 ; i < lp->tx_ring_size; i++)
- printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
- le32_to_cpu(lp->tx_ring[i].base),
- (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
- le32_to_cpu(lp->tx_ring[i].misc),
- le16_to_cpu(lp->tx_ring[i].status));
- printk("\n");
- }
- pcnet32_restart(dev, 0x0042);
-
- dev->trans_start = jiffies;
- netif_wake_queue(dev);
-
- spin_unlock_irqrestore(&lp->lock, flags);
-}
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr, flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ /* Transmitter timeout, serious problems. */
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ printk(KERN_ERR
+ "%s: transmit timed out, status %4.4x, resetting.\n",
+ dev->name, lp->a.read_csr(ioaddr, 0));
+ lp->a.write_csr(ioaddr, 0, 0x0004);
+ lp->stats.tx_errors++;
+ if (netif_msg_tx_err(lp)) {
+ int i;
+ printk(KERN_DEBUG
+ " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
+ lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
+ lp->cur_rx);
+ for (i = 0; i < lp->rx_ring_size; i++)
+ printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
+ le32_to_cpu(lp->rx_ring[i].base),
+ (-le16_to_cpu(lp->rx_ring[i].buf_length)) &
+ 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length),
+ le16_to_cpu(lp->rx_ring[i].status));
+ for (i = 0; i < lp->tx_ring_size; i++)
+ printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
+ le32_to_cpu(lp->tx_ring[i].base),
+ (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
+ le32_to_cpu(lp->tx_ring[i].misc),
+ le16_to_cpu(lp->tx_ring[i].status));
+ printk("\n");
+ }
+ pcnet32_restart(dev, 0x0042);
+
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
-static int
-pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static int pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long ioaddr = dev->base_addr;
- u16 status;
- int entry;
- unsigned long flags;
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ u16 status;
+ int entry;
+ unsigned long flags;
- spin_lock_irqsave(&lp->lock, flags);
+ spin_lock_irqsave(&lp->lock, flags);
- if (netif_msg_tx_queued(lp)) {
- printk(KERN_DEBUG "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
- dev->name, lp->a.read_csr(ioaddr, 0));
- }
+ if (netif_msg_tx_queued(lp)) {
+ printk(KERN_DEBUG
+ "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
+ dev->name, lp->a.read_csr(ioaddr, 0));
+ }
- /* Default status -- will not enable Successful-TxDone
- * interrupt when that option is available to us.
- */
- status = 0x8300;
+ /* Default status -- will not enable Successful-TxDone
+ * interrupt when that option is available to us.
+ */
+ status = 0x8300;
- /* Fill in a Tx ring entry */
+ /* Fill in a Tx ring entry */
- /* Mask to ring buffer boundary. */
- entry = lp->cur_tx & lp->tx_mod_mask;
+ /* Mask to ring buffer boundary. */
+ entry = lp->cur_tx & lp->tx_mod_mask;
- /* Caution: the write order is important here, set the status
- * with the "ownership" bits last. */
+ /* Caution: the write order is important here, set the status
+ * with the "ownership" bits last. */
- lp->tx_ring[entry].length = le16_to_cpu(-skb->len);
+ lp->tx_ring[entry].length = le16_to_cpu(-skb->len);
- lp->tx_ring[entry].misc = 0x00000000;
+ lp->tx_ring[entry].misc = 0x00000000;
- lp->tx_skbuff[entry] = skb;
- lp->tx_dma_addr[entry] = pci_map_single(lp->pci_dev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- lp->tx_ring[entry].base = (u32)le32_to_cpu(lp->tx_dma_addr[entry]);
- wmb(); /* Make sure owner changes after all others are visible */
- lp->tx_ring[entry].status = le16_to_cpu(status);
+ lp->tx_skbuff[entry] = skb;
+ lp->tx_dma_addr[entry] =
+ pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ lp->tx_ring[entry].base = (u32) le32_to_cpu(lp->tx_dma_addr[entry]);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->tx_ring[entry].status = le16_to_cpu(status);
- lp->cur_tx++;
- lp->stats.tx_bytes += skb->len;
+ lp->cur_tx++;
+ lp->stats.tx_bytes += skb->len;
- /* Trigger an immediate send poll. */
- lp->a.write_csr (ioaddr, 0, 0x0048);
+ /* Trigger an immediate send poll. */
+ lp->a.write_csr(ioaddr, 0, 0x0048);
- dev->trans_start = jiffies;
+ dev->trans_start = jiffies;
- if (lp->tx_ring[(entry+1) & lp->tx_mod_mask].base != 0) {
- lp->tx_full = 1;
- netif_stop_queue(dev);
- }
- spin_unlock_irqrestore(&lp->lock, flags);
- return 0;
+ if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
+ lp->tx_full = 1;
+ netif_stop_queue(dev);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return 0;
}
/* The PCNET32 interrupt handler. */
static irqreturn_t
-pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+pcnet32_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
- struct net_device *dev = dev_id;
- struct pcnet32_private *lp;
- unsigned long ioaddr;
- u16 csr0,rap;
- int boguscnt = max_interrupt_work;
- int must_restart;
-
- if (!dev) {
- if (pcnet32_debug & NETIF_MSG_INTR)
- printk (KERN_DEBUG "%s(): irq %d for unknown device\n",
- __FUNCTION__, irq);
- return IRQ_NONE;
- }
-
- ioaddr = dev->base_addr;
- lp = dev->priv;
-
- spin_lock(&lp->lock);
-
- rap = lp->a.read_rap(ioaddr);
- while ((csr0 = lp->a.read_csr (ioaddr, 0)) & 0x8f00 && --boguscnt >= 0) {
- if (csr0 == 0xffff) {
- break; /* PCMCIA remove happened */
+ struct net_device *dev = dev_id;
+ struct pcnet32_private *lp;
+ unsigned long ioaddr;
+ u16 csr0, rap;
+ int boguscnt = max_interrupt_work;
+ int must_restart;
+
+ if (!dev) {
+ if (pcnet32_debug & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s(): irq %d for unknown device\n",
+ __FUNCTION__, irq);
+ return IRQ_NONE;
}
- /* Acknowledge all of the current interrupt sources ASAP. */
- lp->a.write_csr (ioaddr, 0, csr0 & ~0x004f);
- must_restart = 0;
+ ioaddr = dev->base_addr;
+ lp = dev->priv;
- if (netif_msg_intr(lp))
- printk(KERN_DEBUG "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
- dev->name, csr0, lp->a.read_csr (ioaddr, 0));
-
- if (csr0 & 0x0400) /* Rx interrupt */
- pcnet32_rx(dev);
-
- if (csr0 & 0x0200) { /* Tx-done interrupt */
- unsigned int dirty_tx = lp->dirty_tx;
- int delta;
-
- while (dirty_tx != lp->cur_tx) {
- int entry = dirty_tx & lp->tx_mod_mask;
- int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
-
- if (status < 0)
- break; /* It still hasn't been Txed */
-
- lp->tx_ring[entry].base = 0;
-
- if (status & 0x4000) {
- /* There was an major error, log it. */
- int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
- lp->stats.tx_errors++;
- if (netif_msg_tx_err(lp))
- printk(KERN_ERR "%s: Tx error status=%04x err_status=%08x\n",
- dev->name, status, err_status);
- if (err_status & 0x04000000) lp->stats.tx_aborted_errors++;
- if (err_status & 0x08000000) lp->stats.tx_carrier_errors++;
- if (err_status & 0x10000000) lp->stats.tx_window_errors++;
+ spin_lock(&lp->lock);
+
+ rap = lp->a.read_rap(ioaddr);
+ while ((csr0 = lp->a.read_csr(ioaddr, 0)) & 0x8f00 && --boguscnt >= 0) {
+ if (csr0 == 0xffff) {
+ break; /* PCMCIA remove happened */
+ }
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ lp->a.write_csr(ioaddr, 0, csr0 & ~0x004f);
+
+ must_restart = 0;
+
+ if (netif_msg_intr(lp))
+ printk(KERN_DEBUG
+ "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
+ dev->name, csr0, lp->a.read_csr(ioaddr, 0));
+
+ if (csr0 & 0x0400) /* Rx interrupt */
+ pcnet32_rx(dev);
+
+ if (csr0 & 0x0200) { /* Tx-done interrupt */
+ unsigned int dirty_tx = lp->dirty_tx;
+ int delta;
+
+ while (dirty_tx != lp->cur_tx) {
+ int entry = dirty_tx & lp->tx_mod_mask;
+ int status =
+ (short)le16_to_cpu(lp->tx_ring[entry].
+ status);
+
+ if (status < 0)
+ break; /* It still hasn't been Txed */
+
+ lp->tx_ring[entry].base = 0;
+
+ if (status & 0x4000) {
+ /* There was an major error, log it. */
+ int err_status =
+ le32_to_cpu(lp->tx_ring[entry].
+ misc);
+ lp->stats.tx_errors++;
+ if (netif_msg_tx_err(lp))
+ printk(KERN_ERR
+ "%s: Tx error status=%04x err_status=%08x\n",
+ dev->name, status,
+ err_status);
+ if (err_status & 0x04000000)
+ lp->stats.tx_aborted_errors++;
+ if (err_status & 0x08000000)
+ lp->stats.tx_carrier_errors++;
+ if (err_status & 0x10000000)
+ lp->stats.tx_window_errors++;
#ifndef DO_DXSUFLO
- if (err_status & 0x40000000) {
- lp->stats.tx_fifo_errors++;
- /* Ackk! On FIFO errors the Tx unit is turned off! */
- /* Remove this verbosity later! */
- if (netif_msg_tx_err(lp))
- printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n",
- dev->name, csr0);
- must_restart = 1;
- }
+ if (err_status & 0x40000000) {
+ lp->stats.tx_fifo_errors++;
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ /* Remove this verbosity later! */
+ if (netif_msg_tx_err(lp))
+ printk(KERN_ERR
+ "%s: Tx FIFO error! CSR0=%4.4x\n",
+ dev->name, csr0);
+ must_restart = 1;
+ }
#else
- if (err_status & 0x40000000) {
- lp->stats.tx_fifo_errors++;
- if (! lp->dxsuflo) { /* If controller doesn't recover ... */
- /* Ackk! On FIFO errors the Tx unit is turned off! */
- /* Remove this verbosity later! */
- if (netif_msg_tx_err(lp))
- printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n",
- dev->name, csr0);
- must_restart = 1;
- }
- }
+ if (err_status & 0x40000000) {
+ lp->stats.tx_fifo_errors++;
+ if (!lp->dxsuflo) { /* If controller doesn't recover ... */
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ /* Remove this verbosity later! */
+ if (netif_msg_tx_err
+ (lp))
+ printk(KERN_ERR
+ "%s: Tx FIFO error! CSR0=%4.4x\n",
+ dev->
+ name,
+ csr0);
+ must_restart = 1;
+ }
+ }
#endif
- } else {
- if (status & 0x1800)
- lp->stats.collisions++;
- lp->stats.tx_packets++;
+ } else {
+ if (status & 0x1800)
+ lp->stats.collisions++;
+ lp->stats.tx_packets++;
+ }
+
+ /* We must free the original skb */
+ if (lp->tx_skbuff[entry]) {
+ pci_unmap_single(lp->pci_dev,
+ lp->tx_dma_addr[entry],
+ lp->tx_skbuff[entry]->
+ len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(lp->tx_skbuff[entry]);
+ lp->tx_skbuff[entry] = NULL;
+ lp->tx_dma_addr[entry] = 0;
+ }
+ dirty_tx++;
+ }
+
+ delta =
+ (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask +
+ lp->tx_ring_size);
+ if (delta > lp->tx_ring_size) {
+ if (netif_msg_drv(lp))
+ printk(KERN_ERR
+ "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+ dev->name, dirty_tx, lp->cur_tx,
+ lp->tx_full);
+ dirty_tx += lp->tx_ring_size;
+ delta -= lp->tx_ring_size;
+ }
+
+ if (lp->tx_full &&
+ netif_queue_stopped(dev) &&
+ delta < lp->tx_ring_size - 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+ lp->dirty_tx = dirty_tx;
+ }
+
+ /* Log misc errors. */
+ if (csr0 & 0x4000)
+ lp->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & 0x1000) {
+ /*
+ * this happens when our receive ring is full. This shouldn't
+ * be a problem as we will see normal rx interrupts for the frames
+ * in the receive ring. But there are some PCI chipsets (I can
+ * reproduce this on SP3G with Intel saturn chipset) which have
+ * sometimes problems and will fill up the receive ring with
+ * error descriptors. In this situation we don't get a rx
+ * interrupt, but a missed frame interrupt sooner or later.
+ * So we try to clean up our receive ring here.
+ */
+ pcnet32_rx(dev);
+ lp->stats.rx_errors++; /* Missed a Rx frame. */
+ }
+ if (csr0 & 0x0800) {
+ if (netif_msg_drv(lp))
+ printk(KERN_ERR
+ "%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* unlike for the lance, there is no restart needed */
}
- /* We must free the original skb */
- if (lp->tx_skbuff[entry]) {
- pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[entry],
- lp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
- dev_kfree_skb_irq(lp->tx_skbuff[entry]);
- lp->tx_skbuff[entry] = NULL;
- lp->tx_dma_addr[entry] = 0;
+ if (must_restart) {
+ /* reset the chip to clear the error condition, then restart */
+ lp->a.reset(ioaddr);
+ lp->a.write_csr(ioaddr, 4, 0x0915);
+ pcnet32_restart(dev, 0x0002);
+ netif_wake_queue(dev);
}
- dirty_tx++;
- }
-
- delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
- if (delta > lp->tx_ring_size) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
- dev->name, dirty_tx, lp->cur_tx, lp->tx_full);
- dirty_tx += lp->tx_ring_size;
- delta -= lp->tx_ring_size;
- }
-
- if (lp->tx_full &&
- netif_queue_stopped(dev) &&
- delta < lp->tx_ring_size - 2) {
- /* The ring is no longer full, clear tbusy. */
- lp->tx_full = 0;
- netif_wake_queue (dev);
- }
- lp->dirty_tx = dirty_tx;
- }
-
- /* Log misc errors. */
- if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
- if (csr0 & 0x1000) {
- /*
- * this happens when our receive ring is full. This shouldn't
- * be a problem as we will see normal rx interrupts for the frames
- * in the receive ring. But there are some PCI chipsets (I can
- * reproduce this on SP3G with Intel saturn chipset) which have
- * sometimes problems and will fill up the receive ring with
- * error descriptors. In this situation we don't get a rx
- * interrupt, but a missed frame interrupt sooner or later.
- * So we try to clean up our receive ring here.
- */
- pcnet32_rx(dev);
- lp->stats.rx_errors++; /* Missed a Rx frame. */
- }
- if (csr0 & 0x0800) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR "%s: Bus master arbitration failure, status %4.4x.\n",
- dev->name, csr0);
- /* unlike for the lance, there is no restart needed */
- }
-
- if (must_restart) {
- /* reset the chip to clear the error condition, then restart */
- lp->a.reset(ioaddr);
- lp->a.write_csr(ioaddr, 4, 0x0915);
- pcnet32_restart(dev, 0x0002);
- netif_wake_queue(dev);
- }
- }
-
- /* Set interrupt enable. */
- lp->a.write_csr (ioaddr, 0, 0x0040);
- lp->a.write_rap (ioaddr,rap);
-
- if (netif_msg_intr(lp))
- printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
- dev->name, lp->a.read_csr (ioaddr, 0));
-
- spin_unlock(&lp->lock);
-
- return IRQ_HANDLED;
+ }
+
+ /* Set interrupt enable. */
+ lp->a.write_csr(ioaddr, 0, 0x0040);
+ lp->a.write_rap(ioaddr, rap);
+
+ if (netif_msg_intr(lp))
+ printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
+ dev->name, lp->a.read_csr(ioaddr, 0));
+
+ spin_unlock(&lp->lock);
+
+ return IRQ_HANDLED;
}
-static int
-pcnet32_rx(struct net_device *dev)
+static int pcnet32_rx(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- int entry = lp->cur_rx & lp->rx_mod_mask;
- int boguscnt = lp->rx_ring_size / 2;
-
- /* If we own the next entry, it's a new packet. Send it up. */
- while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
- int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8;
-
- if (status != 0x03) { /* There was an error. */
- /*
- * There is a tricky error noted by John Murphy,
- * <murf@perftech.com> to Russ Nelson: Even with full-sized
- * buffers it's possible for a jabber packet to use two
- * buffers, with only the last correctly noting the error.
- */
- if (status & 0x01) /* Only count a general error at the */
- lp->stats.rx_errors++; /* end of a packet.*/
- if (status & 0x20) lp->stats.rx_frame_errors++;
- if (status & 0x10) lp->stats.rx_over_errors++;
- if (status & 0x08) lp->stats.rx_crc_errors++;
- if (status & 0x04) lp->stats.rx_fifo_errors++;
- lp->rx_ring[entry].status &= le16_to_cpu(0x03ff);
- } else {
- /* Malloc up new buffer, compatible with net-2e. */
- short pkt_len = (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)-4;
- struct sk_buff *skb;
-
- /* Discard oversize frames. */
- if (unlikely(pkt_len > PKT_BUF_SZ - 2)) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR "%s: Impossible packet size %d!\n",
- dev->name, pkt_len);
- lp->stats.rx_errors++;
- } else if (pkt_len < 60) {
- if (netif_msg_rx_err(lp))
- printk(KERN_ERR "%s: Runt packet!\n", dev->name);
- lp->stats.rx_errors++;
- } else {
- int rx_in_place = 0;
-
- if (pkt_len > rx_copybreak) {
- struct sk_buff *newskb;
-
- if ((newskb = dev_alloc_skb(PKT_BUF_SZ))) {
- skb_reserve (newskb, 2);
- skb = lp->rx_skbuff[entry];
- pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[entry],
- PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
- skb_put (skb, pkt_len);
- lp->rx_skbuff[entry] = newskb;
- newskb->dev = dev;
- lp->rx_dma_addr[entry] =
- pci_map_single(lp->pci_dev, newskb->data,
- PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
- lp->rx_ring[entry].base = le32_to_cpu(lp->rx_dma_addr[entry]);
- rx_in_place = 1;
- } else
- skb = NULL;
+ struct pcnet32_private *lp = dev->priv;
+ int entry = lp->cur_rx & lp->rx_mod_mask;
+ int boguscnt = lp->rx_ring_size / 2;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
+ int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8;
+
+ if (status != 0x03) { /* There was an error. */
+ /*
+ * There is a tricky error noted by John Murphy,
+ * <murf@perftech.com> to Russ Nelson: Even with full-sized
+ * buffers it's possible for a jabber packet to use two
+ * buffers, with only the last correctly noting the error.
+ */
+ if (status & 0x01) /* Only count a general error at the */
+ lp->stats.rx_errors++; /* end of a packet. */
+ if (status & 0x20)
+ lp->stats.rx_frame_errors++;
+ if (status & 0x10)
+ lp->stats.rx_over_errors++;
+ if (status & 0x08)
+ lp->stats.rx_crc_errors++;
+ if (status & 0x04)
+ lp->stats.rx_fifo_errors++;
+ lp->rx_ring[entry].status &= le16_to_cpu(0x03ff);
} else {
- skb = dev_alloc_skb(pkt_len+2);
- }
-
- if (skb == NULL) {
- int i;
- if (netif_msg_drv(lp))
- printk(KERN_ERR "%s: Memory squeeze, deferring packet.\n",
- dev->name);
- for (i = 0; i < lp->rx_ring_size; i++)
- if ((short)le16_to_cpu(lp->rx_ring[(entry+i)
- & lp->rx_mod_mask].status) < 0)
- break;
-
- if (i > lp->rx_ring_size -2) {
- lp->stats.rx_dropped++;
- lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
- wmb(); /* Make sure adapter sees owner change */
- lp->cur_rx++;
- }
- break;
- }
- skb->dev = dev;
- if (!rx_in_place) {
- skb_reserve(skb,2); /* 16 byte align */
- skb_put(skb,pkt_len); /* Make room */
- pci_dma_sync_single_for_cpu(lp->pci_dev,
- lp->rx_dma_addr[entry],
- PKT_BUF_SZ-2,
- PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb,
- (unsigned char *)(lp->rx_skbuff[entry]->data),
- pkt_len,0);
- pci_dma_sync_single_for_device(lp->pci_dev,
- lp->rx_dma_addr[entry],
- PKT_BUF_SZ-2,
- PCI_DMA_FROMDEVICE);
+ /* Malloc up new buffer, compatible with net-2e. */
+ short pkt_len =
+ (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)
+ - 4;
+ struct sk_buff *skb;
+
+ /* Discard oversize frames. */
+ if (unlikely(pkt_len > PKT_BUF_SZ - 2)) {
+ if (netif_msg_drv(lp))
+ printk(KERN_ERR
+ "%s: Impossible packet size %d!\n",
+ dev->name, pkt_len);
+ lp->stats.rx_errors++;
+ } else if (pkt_len < 60) {
+ if (netif_msg_rx_err(lp))
+ printk(KERN_ERR "%s: Runt packet!\n",
+ dev->name);
+ lp->stats.rx_errors++;
+ } else {
+ int rx_in_place = 0;
+
+ if (pkt_len > rx_copybreak) {
+ struct sk_buff *newskb;
+
+ if ((newskb =
+ dev_alloc_skb(PKT_BUF_SZ))) {
+ skb_reserve(newskb, 2);
+ skb = lp->rx_skbuff[entry];
+ pci_unmap_single(lp->pci_dev,
+ lp->
+ rx_dma_addr
+ [entry],
+ PKT_BUF_SZ - 2,
+ PCI_DMA_FROMDEVICE);
+ skb_put(skb, pkt_len);
+ lp->rx_skbuff[entry] = newskb;
+ newskb->dev = dev;
+ lp->rx_dma_addr[entry] =
+ pci_map_single(lp->pci_dev,
+ newskb->data,
+ PKT_BUF_SZ -
+ 2,
+ PCI_DMA_FROMDEVICE);
+ lp->rx_ring[entry].base =
+ le32_to_cpu(lp->
+ rx_dma_addr
+ [entry]);
+ rx_in_place = 1;
+ } else
+ skb = NULL;
+ } else {
+ skb = dev_alloc_skb(pkt_len + 2);
+ }
+
+ if (skb == NULL) {
+ int i;
+ if (netif_msg_drv(lp))
+ printk(KERN_ERR
+ "%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+ for (i = 0; i < lp->rx_ring_size; i++)
+ if ((short)
+ le16_to_cpu(lp->
+ rx_ring[(entry +
+ i)
+ & lp->
+ rx_mod_mask].
+ status) < 0)
+ break;
+
+ if (i > lp->rx_ring_size - 2) {
+ lp->stats.rx_dropped++;
+ lp->rx_ring[entry].status |=
+ le16_to_cpu(0x8000);
+ wmb(); /* Make sure adapter sees owner change */
+ lp->cur_rx++;
+ }
+ break;
+ }
+ skb->dev = dev;
+ if (!rx_in_place) {
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, pkt_len); /* Make room */
+ pci_dma_sync_single_for_cpu(lp->pci_dev,
+ lp->
+ rx_dma_addr
+ [entry],
+ PKT_BUF_SZ -
+ 2,
+ PCI_DMA_FROMDEVICE);
+ eth_copy_and_sum(skb,
+ (unsigned char *)(lp->
+ rx_skbuff
+ [entry]->
+ data),
+ pkt_len, 0);
+ pci_dma_sync_single_for_device(lp->
+ pci_dev,
+ lp->
+ rx_dma_addr
+ [entry],
+ PKT_BUF_SZ
+ - 2,
+ PCI_DMA_FROMDEVICE);
+ }
+ lp->stats.rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ }
}
- lp->stats.rx_bytes += skb->len;
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->last_rx = jiffies;
- lp->stats.rx_packets++;
- }
+ /*
+ * The docs say that the buffer length isn't touched, but Andrew Boyd
+ * of QNX reports that some revs of the 79C965 clear it.
+ */
+ lp->rx_ring[entry].buf_length = le16_to_cpu(2 - PKT_BUF_SZ);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
+ entry = (++lp->cur_rx) & lp->rx_mod_mask;
+ if (--boguscnt <= 0)
+ break; /* don't stay in loop forever */
}
- /*
- * The docs say that the buffer length isn't touched, but Andrew Boyd
- * of QNX reports that some revs of the 79C965 clear it.
- */
- lp->rx_ring[entry].buf_length = le16_to_cpu(2-PKT_BUF_SZ);
- wmb(); /* Make sure owner changes after all others are visible */
- lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
- entry = (++lp->cur_rx) & lp->rx_mod_mask;
- if (--boguscnt <= 0) break; /* don't stay in loop forever */
- }
-
- return 0;
+
+ return 0;
}
-static int
-pcnet32_close(struct net_device *dev)
+static int pcnet32_close(struct net_device *dev)
{
- unsigned long ioaddr = dev->base_addr;
- struct pcnet32_private *lp = dev->priv;
- int i;
- unsigned long flags;
+ unsigned long ioaddr = dev->base_addr;
+ struct pcnet32_private *lp = dev->priv;
+ int i;
+ unsigned long flags;
- del_timer_sync(&lp->watchdog_timer);
+ del_timer_sync(&lp->watchdog_timer);
- netif_stop_queue(dev);
+ netif_stop_queue(dev);
- spin_lock_irqsave(&lp->lock, flags);
+ spin_lock_irqsave(&lp->lock, flags);
- lp->stats.rx_missed_errors = lp->a.read_csr (ioaddr, 112);
+ lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
- if (netif_msg_ifdown(lp))
- printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
- dev->name, lp->a.read_csr (ioaddr, 0));
+ if (netif_msg_ifdown(lp))
+ printk(KERN_DEBUG
+ "%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, lp->a.read_csr(ioaddr, 0));
- /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
- lp->a.write_csr (ioaddr, 0, 0x0004);
+ /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
+ lp->a.write_csr(ioaddr, 0, 0x0004);
- /*
- * Switch back to 16bit mode to avoid problems with dumb
- * DOS packet driver after a warm reboot
- */
- lp->a.write_bcr (ioaddr, 20, 4);
+ /*
+ * Switch back to 16bit mode to avoid problems with dumb
+ * DOS packet driver after a warm reboot
+ */
+ lp->a.write_bcr(ioaddr, 20, 4);
- spin_unlock_irqrestore(&lp->lock, flags);
+ spin_unlock_irqrestore(&lp->lock, flags);
- free_irq(dev->irq, dev);
+ free_irq(dev->irq, dev);
- spin_lock_irqsave(&lp->lock, flags);
+ spin_lock_irqsave(&lp->lock, flags);
- /* free all allocated skbuffs */
- for (i = 0; i < lp->rx_ring_size; i++) {
- lp->rx_ring[i].status = 0;
- wmb(); /* Make sure adapter sees owner change */
- if (lp->rx_skbuff[i]) {
- pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2,
- PCI_DMA_FROMDEVICE);
- dev_kfree_skb(lp->rx_skbuff[i]);
+ /* free all allocated skbuffs */
+ for (i = 0; i < lp->rx_ring_size; i++) {
+ lp->rx_ring[i].status = 0;
+ wmb(); /* Make sure adapter sees owner change */
+ if (lp->rx_skbuff[i]) {
+ pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
+ PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(lp->rx_skbuff[i]);
+ }
+ lp->rx_skbuff[i] = NULL;
+ lp->rx_dma_addr[i] = 0;
}
- lp->rx_skbuff[i] = NULL;
- lp->rx_dma_addr[i] = 0;
- }
- for (i = 0; i < lp->tx_ring_size; i++) {
- lp->tx_ring[i].status = 0; /* CPU owns buffer */
- wmb(); /* Make sure adapter sees owner change */
- if (lp->tx_skbuff[i]) {
- pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
- lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE);
- dev_kfree_skb(lp->tx_skbuff[i]);
+ for (i = 0; i < lp->tx_ring_size; i++) {
+ lp->tx_ring[i].status = 0; /* CPU owns buffer */
+ wmb(); /* Make sure adapter sees owner change */
+ if (lp->tx_skbuff[i]) {
+ pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
+ lp->tx_skbuff[i]->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(lp->tx_skbuff[i]);
+ }
+ lp->tx_skbuff[i] = NULL;
+ lp->tx_dma_addr[i] = 0;
}
- lp->tx_skbuff[i] = NULL;
- lp->tx_dma_addr[i] = 0;
- }
- spin_unlock_irqrestore(&lp->lock, flags);
+ spin_unlock_irqrestore(&lp->lock, flags);
- return 0;
+ return 0;
}
-static struct net_device_stats *
-pcnet32_get_stats(struct net_device *dev)
+static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long ioaddr = dev->base_addr;
- u16 saved_addr;
- unsigned long flags;
-
- spin_lock_irqsave(&lp->lock, flags);
- saved_addr = lp->a.read_rap(ioaddr);
- lp->stats.rx_missed_errors = lp->a.read_csr (ioaddr, 112);
- lp->a.write_rap(ioaddr, saved_addr);
- spin_unlock_irqrestore(&lp->lock, flags);
-
- return &lp->stats;
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ u16 saved_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ saved_addr = lp->a.read_rap(ioaddr);
+ lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
+ lp->a.write_rap(ioaddr, saved_addr);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return &lp->stats;
}
/* taken from the sunlance driver, which it took from the depca driver */
-static void pcnet32_load_multicast (struct net_device *dev)
+static void pcnet32_load_multicast(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- volatile struct pcnet32_init_block *ib = &lp->init_block;
- volatile u16 *mcast_table = (u16 *)&ib->filter;
- struct dev_mc_list *dmi=dev->mc_list;
- char *addrs;
- int i;
- u32 crc;
-
- /* set all multicast bits */
- if (dev->flags & IFF_ALLMULTI) {
- ib->filter[0] = 0xffffffff;
- ib->filter[1] = 0xffffffff;
+ struct pcnet32_private *lp = dev->priv;
+ volatile struct pcnet32_init_block *ib = &lp->init_block;
+ volatile u16 *mcast_table = (u16 *) & ib->filter;
+ struct dev_mc_list *dmi = dev->mc_list;
+ char *addrs;
+ int i;
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI) {
+ ib->filter[0] = 0xffffffff;
+ ib->filter[1] = 0xffffffff;
+ return;
+ }
+ /* clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+
+ /* Add addresses */
+ for (i = 0; i < dev->mc_count; i++) {
+ addrs = dmi->dmi_addr;
+ dmi = dmi->next;
+
+ /* multicast address? */
+ if (!(*addrs & 1))
+ continue;
+
+ crc = ether_crc_le(6, addrs);
+ crc = crc >> 26;
+ mcast_table[crc >> 4] =
+ le16_to_cpu(le16_to_cpu(mcast_table[crc >> 4]) |
+ (1 << (crc & 0xf)));
+ }
return;
- }
- /* clear the multicast filter */
- ib->filter[0] = 0;
- ib->filter[1] = 0;
-
- /* Add addresses */
- for (i = 0; i < dev->mc_count; i++) {
- addrs = dmi->dmi_addr;
- dmi = dmi->next;
-
- /* multicast address? */
- if (!(*addrs & 1))
- continue;
-
- crc = ether_crc_le(6, addrs);
- crc = crc >> 26;
- mcast_table [crc >> 4] = le16_to_cpu(
- le16_to_cpu(mcast_table [crc >> 4]) | (1 << (crc & 0xf)));
- }
- return;
}
-
/*
* Set or clear the multicast filter for this adaptor.
*/
static void pcnet32_set_multicast_list(struct net_device *dev)
{
- unsigned long ioaddr = dev->base_addr, flags;
- struct pcnet32_private *lp = dev->priv;
-
- spin_lock_irqsave(&lp->lock, flags);
- if (dev->flags&IFF_PROMISC) {
- /* Log any net taps. */
- if (netif_msg_hw(lp))
- printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
- lp->init_block.mode = le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << 7);
- } else {
- lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
- pcnet32_load_multicast (dev);
- }
-
- lp->a.write_csr (ioaddr, 0, 0x0004); /* Temporarily stop the lance. */
- pcnet32_restart(dev, 0x0042); /* Resume normal operation */
- netif_wake_queue(dev);
-
- spin_unlock_irqrestore(&lp->lock, flags);
+ unsigned long ioaddr = dev->base_addr, flags;
+ struct pcnet32_private *lp = dev->priv;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ if (dev->flags & IFF_PROMISC) {
+ /* Log any net taps. */
+ if (netif_msg_hw(lp))
+ printk(KERN_INFO "%s: Promiscuous mode enabled.\n",
+ dev->name);
+ lp->init_block.mode =
+ le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
+ 7);
+ } else {
+ lp->init_block.mode =
+ le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
+ pcnet32_load_multicast(dev);
+ }
+
+ lp->a.write_csr(ioaddr, 0, 0x0004); /* Temporarily stop the lance. */
+ pcnet32_restart(dev, 0x0042); /* Resume normal operation */
+ netif_wake_queue(dev);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
}
/* This routine assumes that the lp->lock is held */
static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long ioaddr = dev->base_addr;
- u16 val_out;
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
+ u16 val_out;
- if (!lp->mii)
- return 0;
+ if (!lp->mii)
+ return 0;
- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
- val_out = lp->a.read_bcr(ioaddr, 34);
+ lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
+ val_out = lp->a.read_bcr(ioaddr, 34);
- return val_out;
+ return val_out;
}
/* This routine assumes that the lp->lock is held */
static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long ioaddr = dev->base_addr;
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long ioaddr = dev->base_addr;
- if (!lp->mii)
- return;
+ if (!lp->mii)
+ return;
- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
- lp->a.write_bcr(ioaddr, 34, val);
+ lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
+ lp->a.write_bcr(ioaddr, 34, val);
}
static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct pcnet32_private *lp = dev->priv;
- int rc;
- unsigned long flags;
+ struct pcnet32_private *lp = dev->priv;
+ int rc;
+ unsigned long flags;
+
+ /* SIOC[GS]MIIxxx ioctls */
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ } else {
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
+static int pcnet32_check_otherphy(struct net_device *dev)
+{
+ struct pcnet32_private *lp = dev->priv;
+ struct mii_if_info mii = lp->mii_if;
+ u16 bmcr;
+ int i;
- /* SIOC[GS]MIIxxx ioctls */
- if (lp->mii) {
- spin_lock_irqsave(&lp->lock, flags);
- rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL);
- spin_unlock_irqrestore(&lp->lock, flags);
- } else {
- rc = -EOPNOTSUPP;
- }
+ for (i = 0; i < PCNET32_MAX_PHYS; i++) {
+ if (i == lp->mii_if.phy_id)
+ continue; /* skip active phy */
+ if (lp->phymask & (1 << i)) {
+ mii.phy_id = i;
+ if (mii_link_ok(&mii)) {
+ /* found PHY with active link */
+ if (netif_msg_link(lp))
+ printk(KERN_INFO
+ "%s: Using PHY number %d.\n",
+ dev->name, i);
+
+ /* isolate inactive phy */
+ bmcr =
+ mdio_read(dev, lp->mii_if.phy_id, MII_BMCR);
+ mdio_write(dev, lp->mii_if.phy_id, MII_BMCR,
+ bmcr | BMCR_ISOLATE);
+
+ /* de-isolate new phy */
+ bmcr = mdio_read(dev, i, MII_BMCR);
+ mdio_write(dev, i, MII_BMCR,
+ bmcr & ~BMCR_ISOLATE);
+
+ /* set new phy address */
+ lp->mii_if.phy_id = i;
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * Show the status of the media. Similar to mii_check_media however it
+ * correctly shows the link speed for all (tested) pcnet32 variants.
+ * Devices with no mii just report link state without speed.
+ *
+ * Caller is assumed to hold and release the lp->lock.
+ */
- return rc;
+static void pcnet32_check_media(struct net_device *dev, int verbose)
+{
+ struct pcnet32_private *lp = dev->priv;
+ int curr_link;
+ int prev_link = netif_carrier_ok(dev) ? 1 : 0;
+ u32 bcr9;
+
+ if (lp->mii) {
+ curr_link = mii_link_ok(&lp->mii_if);
+ } else {
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
+ }
+ if (!curr_link) {
+ if (prev_link || verbose) {
+ netif_carrier_off(dev);
+ if (netif_msg_link(lp))
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ }
+ if (lp->phycount > 1) {
+ curr_link = pcnet32_check_otherphy(dev);
+ prev_link = 0;
+ }
+ } else if (verbose || !prev_link) {
+ netif_carrier_on(dev);
+ if (lp->mii) {
+ if (netif_msg_link(lp)) {
+ struct ethtool_cmd ecmd;
+ mii_ethtool_gset(&lp->mii_if, &ecmd);
+ printk(KERN_INFO
+ "%s: link up, %sMbps, %s-duplex\n",
+ dev->name,
+ (ecmd.speed == SPEED_100) ? "100" : "10",
+ (ecmd.duplex ==
+ DUPLEX_FULL) ? "full" : "half");
+ }
+ bcr9 = lp->a.read_bcr(dev->base_addr, 9);
+ if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
+ if (lp->mii_if.full_duplex)
+ bcr9 |= (1 << 0);
+ else
+ bcr9 &= ~(1 << 0);
+ lp->a.write_bcr(dev->base_addr, 9, bcr9);
+ }
+ } else {
+ if (netif_msg_link(lp))
+ printk(KERN_INFO "%s: link up\n", dev->name);
+ }
+ }
}
+/*
+ * Check for loss of link and link establishment.
+ * Can not use mii_check_media because it does nothing if mode is forced.
+ */
+
static void pcnet32_watchdog(struct net_device *dev)
{
- struct pcnet32_private *lp = dev->priv;
- unsigned long flags;
+ struct pcnet32_private *lp = dev->priv;
+ unsigned long flags;
- /* Print the link status if it has changed */
- if (lp->mii) {
+ /* Print the link status if it has changed */
spin_lock_irqsave(&lp->lock, flags);
- mii_check_media (&lp->mii_if, netif_msg_link(lp), 0);
+ pcnet32_check_media(dev, 0);
spin_unlock_irqrestore(&lp->lock, flags);
- }
- mod_timer (&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
+ mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
}
static void __devexit pcnet32_remove_one(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata(pdev);
-
- if (dev) {
- struct pcnet32_private *lp = dev->priv;
-
- unregister_netdev(dev);
- pcnet32_free_ring(dev);
- release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
- pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
- free_netdev(dev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
- }
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct pcnet32_private *lp = dev->priv;
+
+ unregister_netdev(dev);
+ pcnet32_free_ring(dev);
+ release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
+ pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
+ free_netdev(dev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ }
}
static struct pci_driver pcnet32_driver = {
- .name = DRV_NAME,
- .probe = pcnet32_probe_pci,
- .remove = __devexit_p(pcnet32_remove_one),
- .id_table = pcnet32_pci_tbl,
+ .name = DRV_NAME,
+ .probe = pcnet32_probe_pci,
+ .remove = __devexit_p(pcnet32_remove_one),
+ .id_table = pcnet32_pci_tbl,
};
/* An additional parameter that may be passed in... */
@@ -2477,9 +2698,11 @@ static int pcnet32_have_pci;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, DRV_NAME " debug level");
module_param(max_interrupt_work, int, 0);
-MODULE_PARM_DESC(max_interrupt_work, DRV_NAME " maximum events handled per interrupt");
+MODULE_PARM_DESC(max_interrupt_work,
+ DRV_NAME " maximum events handled per interrupt");
module_param(rx_copybreak, int, 0);
-MODULE_PARM_DESC(rx_copybreak, DRV_NAME " copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(rx_copybreak,
+ DRV_NAME " copy breakpoint for copy-only-tiny-frames");
module_param(tx_start_pt, int, 0);
MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)");
module_param(pcnet32vlb, int, 0);
@@ -2490,7 +2713,9 @@ module_param_array(full_duplex, int, NULL, 0);
MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)");
/* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */
module_param_array(homepna, int, NULL, 0);
-MODULE_PARM_DESC(homepna, DRV_NAME " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet");
+MODULE_PARM_DESC(homepna,
+ DRV_NAME
+ " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet");
MODULE_AUTHOR("Thomas Bogendoerfer");
MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards");
@@ -2500,44 +2725,44 @@ MODULE_LICENSE("GPL");
static int __init pcnet32_init_module(void)
{
- printk(KERN_INFO "%s", version);
+ printk(KERN_INFO "%s", version);
- pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
+ pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
- if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
- tx_start = tx_start_pt;
+ if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
+ tx_start = tx_start_pt;
- /* find the PCI devices */
- if (!pci_module_init(&pcnet32_driver))
- pcnet32_have_pci = 1;
+ /* find the PCI devices */
+ if (!pci_module_init(&pcnet32_driver))
+ pcnet32_have_pci = 1;
- /* should we find any remaining VLbus devices ? */
- if (pcnet32vlb)
- pcnet32_probe_vlbus();
+ /* should we find any remaining VLbus devices ? */
+ if (pcnet32vlb)
+ pcnet32_probe_vlbus();
- if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
- printk(KERN_INFO PFX "%d cards_found.\n", cards_found);
+ if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
+ printk(KERN_INFO PFX "%d cards_found.\n", cards_found);
- return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
+ return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
}
static void __exit pcnet32_cleanup_module(void)
{
- struct net_device *next_dev;
-
- while (pcnet32_dev) {
- struct pcnet32_private *lp = pcnet32_dev->priv;
- next_dev = lp->next;
- unregister_netdev(pcnet32_dev);
- pcnet32_free_ring(pcnet32_dev);
- release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
- pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
- free_netdev(pcnet32_dev);
- pcnet32_dev = next_dev;
- }
+ struct net_device *next_dev;
+
+ while (pcnet32_dev) {
+ struct pcnet32_private *lp = pcnet32_dev->priv;
+ next_dev = lp->next;
+ unregister_netdev(pcnet32_dev);
+ pcnet32_free_ring(pcnet32_dev);
+ release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
+ pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
+ free_netdev(pcnet32_dev);
+ pcnet32_dev = next_dev;
+ }
- if (pcnet32_have_pci)
- pci_unregister_driver(&pcnet32_driver);
+ if (pcnet32_have_pci)
+ pci_unregister_driver(&pcnet32_driver);
}
module_init(pcnet32_init_module);
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 0245e40b51a..f608c12e3e8 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1691,8 +1691,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
|| ppp->npmode[npi] != NPMODE_PASS) {
kfree_skb(skb);
} else {
- skb_pull(skb, 2); /* chop off protocol */
- skb_postpull_rcsum(skb, skb->data - 2, 2);
+ /* chop off protocol */
+ skb_pull_rcsum(skb, 2);
skb->dev = ppp->dev;
skb->protocol = htons(npindex_to_ethertype[npi]);
skb->mac.raw = skb->data;
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 9369f811075..475dc930380 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -337,8 +337,7 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state & PPPOX_BOUND) {
struct pppoe_hdr *ph = (struct pppoe_hdr *) skb->nh.raw;
int len = ntohs(ph->length);
- skb_pull(skb, sizeof(struct pppoe_hdr));
- skb_postpull_rcsum(skb, ph, sizeof(*ph));
+ skb_pull_rcsum(skb, sizeof(struct pppoe_hdr));
if (pskb_trim_rcsum(skb, len))
goto abort_kfree;
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index a4b2b6975d6..0784f558ca9 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -549,12 +549,12 @@ void formac_tx_restart(struct s_smc *smc)
static void enable_formac(struct s_smc *smc)
{
/* set formac IMSK : 0 enables irq */
- outpw(FM_A(FM_IMSK1U),~mac_imsk1u) ;
- outpw(FM_A(FM_IMSK1L),~mac_imsk1l) ;
- outpw(FM_A(FM_IMSK2U),~mac_imsk2u) ;
- outpw(FM_A(FM_IMSK2L),~mac_imsk2l) ;
- outpw(FM_A(FM_IMSK3U),~mac_imsk3u) ;
- outpw(FM_A(FM_IMSK3L),~mac_imsk3l) ;
+ outpw(FM_A(FM_IMSK1U),(unsigned short)~mac_imsk1u);
+ outpw(FM_A(FM_IMSK1L),(unsigned short)~mac_imsk1l);
+ outpw(FM_A(FM_IMSK2U),(unsigned short)~mac_imsk2u);
+ outpw(FM_A(FM_IMSK2L),(unsigned short)~mac_imsk2l);
+ outpw(FM_A(FM_IMSK3U),(unsigned short)~mac_imsk3u);
+ outpw(FM_A(FM_IMSK3L),(unsigned short)~mac_imsk3l);
}
#if 0 /* Removed because the driver should use the ASICs TX complete IRQ. */
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 25e028b7ce4..4eda81d41b1 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -44,7 +44,7 @@
#include "skge.h"
#define DRV_NAME "skge"
-#define DRV_VERSION "1.3"
+#define DRV_VERSION "1.4"
#define PFX DRV_NAME " "
#define DEFAULT_TX_RING_SIZE 128
@@ -104,7 +104,6 @@ static const int txqaddr[] = { Q_XA1, Q_XA2 };
static const int rxqaddr[] = { Q_R1, Q_R2 };
static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
-static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 };
static int skge_get_regs_len(struct net_device *dev)
{
@@ -728,19 +727,18 @@ static struct ethtool_ops skge_ethtool_ops = {
* Allocate ring elements and chain them together
* One-to-one association of board descriptors with ring elements
*/
-static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base)
+static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
{
struct skge_tx_desc *d;
struct skge_element *e;
int i;
- ring->start = kmalloc(sizeof(*e)*ring->count, GFP_KERNEL);
+ ring->start = kcalloc(sizeof(*e), ring->count, GFP_KERNEL);
if (!ring->start)
return -ENOMEM;
for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
e->desc = d;
- e->skb = NULL;
if (i == ring->count - 1) {
e->next = ring->start;
d->next_offset = base;
@@ -2169,27 +2167,31 @@ static int skge_up(struct net_device *dev)
if (!skge->mem)
return -ENOMEM;
+ BUG_ON(skge->dma & 7);
+
+ if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) {
+ printk(KERN_ERR PFX "pci_alloc_consistent region crosses 4G boundary\n");
+ err = -EINVAL;
+ goto free_pci_mem;
+ }
+
memset(skge->mem, 0, skge->mem_size);
- if ((err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma)))
+ err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma);
+ if (err)
goto free_pci_mem;
err = skge_rx_fill(skge);
if (err)
goto free_rx_ring;
- if ((err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
- skge->dma + rx_size)))
+ err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
+ skge->dma + rx_size);
+ if (err)
goto free_rx_ring;
skge->tx_avail = skge->tx_ring.count - 1;
- /* Enable IRQ from port */
- spin_lock_irq(&hw->hw_lock);
- hw->intr_mask |= portirqmask[port];
- skge_write32(hw, B0_IMSK, hw->intr_mask);
- spin_unlock_irq(&hw->hw_lock);
-
/* Initialize MAC */
spin_lock_bh(&hw->phy_lock);
if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2246,11 +2248,6 @@ static int skge_down(struct net_device *dev)
else
yukon_stop(skge);
- spin_lock_irq(&hw->hw_lock);
- hw->intr_mask &= ~portirqmask[skge->port];
- skge_write32(hw, B0_IMSK, hw->intr_mask);
- spin_unlock_irq(&hw->hw_lock);
-
/* Stop transmitter */
skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
@@ -2307,18 +2304,15 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
int i;
u32 control, len;
u64 map;
- unsigned long flags;
skb = skb_padto(skb, ETH_ZLEN);
if (!skb)
return NETDEV_TX_OK;
- local_irq_save(flags);
if (!spin_trylock(&skge->tx_lock)) {
- /* Collision - tell upper layer to requeue */
- local_irq_restore(flags);
- return NETDEV_TX_LOCKED;
- }
+ /* Collision - tell upper layer to requeue */
+ return NETDEV_TX_LOCKED;
+ }
if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) {
if (!netif_queue_stopped(dev)) {
@@ -2327,7 +2321,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
dev->name);
}
- spin_unlock_irqrestore(&skge->tx_lock, flags);
+ spin_unlock(&skge->tx_lock);
return NETDEV_TX_BUSY;
}
@@ -2402,8 +2396,10 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
}
+ mmiowb();
+ spin_unlock(&skge->tx_lock);
+
dev->trans_start = jiffies;
- spin_unlock_irqrestore(&skge->tx_lock, flags);
return NETDEV_TX_OK;
}
@@ -2416,7 +2412,7 @@ static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e)
pci_unmap_addr(e, mapaddr),
pci_unmap_len(e, maplen),
PCI_DMA_TODEVICE);
- dev_kfree_skb_any(e->skb);
+ dev_kfree_skb(e->skb);
e->skb = NULL;
} else {
pci_unmap_page(hw->pdev,
@@ -2430,15 +2426,14 @@ static void skge_tx_clean(struct skge_port *skge)
{
struct skge_ring *ring = &skge->tx_ring;
struct skge_element *e;
- unsigned long flags;
- spin_lock_irqsave(&skge->tx_lock, flags);
+ spin_lock_bh(&skge->tx_lock);
for (e = ring->to_clean; e != ring->to_use; e = e->next) {
++skge->tx_avail;
skge_tx_free(skge->hw, e);
}
ring->to_clean = e;
- spin_unlock_irqrestore(&skge->tx_lock, flags);
+ spin_unlock_bh(&skge->tx_lock);
}
static void skge_tx_timeout(struct net_device *dev)
@@ -2663,6 +2658,37 @@ resubmit:
return NULL;
}
+static void skge_tx_done(struct skge_port *skge)
+{
+ struct skge_ring *ring = &skge->tx_ring;
+ struct skge_element *e;
+
+ spin_lock(&skge->tx_lock);
+ for (e = ring->to_clean; prefetch(e->next), e != ring->to_use; e = e->next) {
+ struct skge_tx_desc *td = e->desc;
+ u32 control;
+
+ rmb();
+ control = td->control;
+ if (control & BMU_OWN)
+ break;
+
+ if (unlikely(netif_msg_tx_done(skge)))
+ printk(KERN_DEBUG PFX "%s: tx done slot %td status 0x%x\n",
+ skge->netdev->name, e - ring->start, td->status);
+
+ skge_tx_free(skge->hw, e);
+ e->skb = NULL;
+ ++skge->tx_avail;
+ }
+ ring->to_clean = e;
+ skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
+
+ if (skge->tx_avail > MAX_SKB_FRAGS + 1)
+ netif_wake_queue(skge->netdev);
+
+ spin_unlock(&skge->tx_lock);
+}
static int skge_poll(struct net_device *dev, int *budget)
{
@@ -2670,8 +2696,10 @@ static int skge_poll(struct net_device *dev, int *budget)
struct skge_hw *hw = skge->hw;
struct skge_ring *ring = &skge->rx_ring;
struct skge_element *e;
- unsigned int to_do = min(dev->quota, *budget);
- unsigned int work_done = 0;
+ int to_do = min(dev->quota, *budget);
+ int work_done = 0;
+
+ skge_tx_done(skge);
for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
struct skge_rx_desc *rd = e->desc;
@@ -2683,8 +2711,8 @@ static int skge_poll(struct net_device *dev, int *budget)
if (control & BMU_OWN)
break;
- skb = skge_rx_get(skge, e, control, rd->status,
- le16_to_cpu(rd->csum2));
+ skb = skge_rx_get(skge, e, control, rd->status,
+ le16_to_cpu(rd->csum2));
if (likely(skb)) {
dev->last_rx = jiffies;
netif_receive_skb(skb);
@@ -2705,49 +2733,15 @@ static int skge_poll(struct net_device *dev, int *budget)
if (work_done >= to_do)
return 1; /* not done */
- spin_lock_irq(&hw->hw_lock);
- __netif_rx_complete(dev);
- hw->intr_mask |= portirqmask[skge->port];
+ netif_rx_complete(dev);
+ mmiowb();
+
+ hw->intr_mask |= skge->port == 0 ? (IS_R1_F|IS_XA1_F) : (IS_R2_F|IS_XA2_F);
skge_write32(hw, B0_IMSK, hw->intr_mask);
- spin_unlock_irq(&hw->hw_lock);
return 0;
}
-static inline void skge_tx_intr(struct net_device *dev)
-{
- struct skge_port *skge = netdev_priv(dev);
- struct skge_hw *hw = skge->hw;
- struct skge_ring *ring = &skge->tx_ring;
- struct skge_element *e;
-
- spin_lock(&skge->tx_lock);
- for (e = ring->to_clean; prefetch(e->next), e != ring->to_use; e = e->next) {
- struct skge_tx_desc *td = e->desc;
- u32 control;
-
- rmb();
- control = td->control;
- if (control & BMU_OWN)
- break;
-
- if (unlikely(netif_msg_tx_done(skge)))
- printk(KERN_DEBUG PFX "%s: tx done slot %td status 0x%x\n",
- dev->name, e - ring->start, td->status);
-
- skge_tx_free(hw, e);
- e->skb = NULL;
- ++skge->tx_avail;
- }
- ring->to_clean = e;
- skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
-
- if (skge->tx_avail > MAX_SKB_FRAGS + 1)
- netif_wake_queue(dev);
-
- spin_unlock(&skge->tx_lock);
-}
-
/* Parity errors seem to happen when Genesis is connected to a switch
* with no other ports present. Heartbeat error??
*/
@@ -2770,17 +2764,6 @@ static void skge_mac_parity(struct skge_hw *hw, int port)
? GMF_CLI_TX_FC : GMF_CLI_TX_PE);
}
-static void skge_pci_clear(struct skge_hw *hw)
-{
- u16 status;
-
- pci_read_config_word(hw->pdev, PCI_STATUS, &status);
- skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
- pci_write_config_word(hw->pdev, PCI_STATUS,
- status | PCI_STATUS_ERROR_BITS);
- skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
-}
-
static void skge_mac_intr(struct skge_hw *hw, int port)
{
if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2822,23 +2805,39 @@ static void skge_error_irq(struct skge_hw *hw)
if (hwstatus & IS_M2_PAR_ERR)
skge_mac_parity(hw, 1);
- if (hwstatus & IS_R1_PAR_ERR)
+ if (hwstatus & IS_R1_PAR_ERR) {
+ printk(KERN_ERR PFX "%s: receive queue parity error\n",
+ hw->dev[0]->name);
skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P);
+ }
- if (hwstatus & IS_R2_PAR_ERR)
+ if (hwstatus & IS_R2_PAR_ERR) {
+ printk(KERN_ERR PFX "%s: receive queue parity error\n",
+ hw->dev[1]->name);
skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P);
+ }
if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) {
- printk(KERN_ERR PFX "hardware error detected (status 0x%x)\n",
- hwstatus);
+ u16 pci_status, pci_cmd;
+
+ pci_read_config_word(hw->pdev, PCI_COMMAND, &pci_cmd);
+ pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
- skge_pci_clear(hw);
+ printk(KERN_ERR PFX "%s: PCI error cmd=%#x status=%#x\n",
+ pci_name(hw->pdev), pci_cmd, pci_status);
+
+ /* Write the error bits back to clear them. */
+ pci_status &= PCI_STATUS_ERROR_BITS;
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ pci_write_config_word(hw->pdev, PCI_COMMAND,
+ pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+ pci_write_config_word(hw->pdev, PCI_STATUS, pci_status);
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
/* if error still set then just ignore it */
hwstatus = skge_read32(hw, B0_HWE_ISRC);
if (hwstatus & IS_IRQ_STAT) {
- pr_debug("IRQ status %x: still set ignoring hardware errors\n",
- hwstatus);
+ printk(KERN_INFO PFX "unable to clear error (so ignoring them)\n");
hw->intr_mask &= ~IS_HW_ERR;
}
}
@@ -2855,12 +2854,11 @@ static void skge_extirq(unsigned long data)
int port;
spin_lock(&hw->phy_lock);
- for (port = 0; port < 2; port++) {
+ for (port = 0; port < hw->ports; port++) {
struct net_device *dev = hw->dev[port];
+ struct skge_port *skge = netdev_priv(dev);
- if (dev && netif_running(dev)) {
- struct skge_port *skge = netdev_priv(dev);
-
+ if (netif_running(dev)) {
if (hw->chip_id != CHIP_ID_GENESIS)
yukon_phy_intr(skge);
else
@@ -2869,38 +2867,39 @@ static void skge_extirq(unsigned long data)
}
spin_unlock(&hw->phy_lock);
- spin_lock_irq(&hw->hw_lock);
hw->intr_mask |= IS_EXT_REG;
skge_write32(hw, B0_IMSK, hw->intr_mask);
- spin_unlock_irq(&hw->hw_lock);
}
static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
{
struct skge_hw *hw = dev_id;
- u32 status = skge_read32(hw, B0_SP_ISRC);
+ u32 status;
- if (status == 0 || status == ~0) /* hotplug or shared irq */
+ /* Reading this register masks IRQ */
+ status = skge_read32(hw, B0_SP_ISRC);
+ if (status == 0)
return IRQ_NONE;
- spin_lock(&hw->hw_lock);
- if (status & IS_R1_F) {
+ if (status & IS_EXT_REG) {
+ hw->intr_mask &= ~IS_EXT_REG;
+ tasklet_schedule(&hw->ext_tasklet);
+ }
+
+ if (status & (IS_R1_F|IS_XA1_F)) {
skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
- hw->intr_mask &= ~IS_R1_F;
+ hw->intr_mask &= ~(IS_R1_F|IS_XA1_F);
netif_rx_schedule(hw->dev[0]);
}
- if (status & IS_R2_F) {
+ if (status & (IS_R2_F|IS_XA2_F)) {
skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
- hw->intr_mask &= ~IS_R2_F;
+ hw->intr_mask &= ~(IS_R2_F|IS_XA2_F);
netif_rx_schedule(hw->dev[1]);
}
- if (status & IS_XA1_F)
- skge_tx_intr(hw->dev[0]);
-
- if (status & IS_XA2_F)
- skge_tx_intr(hw->dev[1]);
+ if (likely((status & hw->intr_mask) == 0))
+ return IRQ_HANDLED;
if (status & IS_PA_TO_RX1) {
struct skge_port *skge = netdev_priv(hw->dev[0]);
@@ -2929,13 +2928,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
if (status & IS_HW_ERR)
skge_error_irq(hw);
- if (status & IS_EXT_REG) {
- hw->intr_mask &= ~IS_EXT_REG;
- tasklet_schedule(&hw->ext_tasklet);
- }
-
skge_write32(hw, B0_IMSK, hw->intr_mask);
- spin_unlock(&hw->hw_lock);
return IRQ_HANDLED;
}
@@ -3010,7 +3003,7 @@ static const char *skge_board_name(const struct skge_hw *hw)
static int skge_reset(struct skge_hw *hw)
{
u32 reg;
- u16 ctst;
+ u16 ctst, pci_status;
u8 t8, mac_cfg, pmd_type, phy_type;
int i;
@@ -3021,8 +3014,13 @@ static int skge_reset(struct skge_hw *hw)
skge_write8(hw, B0_CTST, CS_RST_CLR);
/* clear PCI errors, if any */
- skge_pci_clear(hw);
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+ skge_write8(hw, B2_TST_CTRL2, 0);
+ pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
+ pci_write_config_word(hw->pdev, PCI_STATUS,
+ pci_status | PCI_STATUS_ERROR_BITS);
+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
skge_write8(hw, B0_CTST, CS_MRST_CLR);
/* restore CLK_RUN bits (for Yukon-Lite) */
@@ -3081,7 +3079,10 @@ static int skge_reset(struct skge_hw *hw)
else
hw->ram_size = t8 * 4096;
- hw->intr_mask = IS_HW_ERR | IS_EXT_REG;
+ hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1;
+ if (hw->ports > 1)
+ hw->intr_mask |= IS_PORT_2;
+
if (hw->chip_id == CHIP_ID_GENESIS)
genesis_init(hw);
else {
@@ -3251,13 +3252,15 @@ static int __devinit skge_probe(struct pci_dev *pdev,
struct skge_hw *hw;
int err, using_dac = 0;
- if ((err = pci_enable_device(pdev))) {
+ err = pci_enable_device(pdev);
+ if (err) {
printk(KERN_ERR PFX "%s cannot enable PCI device\n",
pci_name(pdev));
goto err_out;
}
- if ((err = pci_request_regions(pdev, DRV_NAME))) {
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
pci_name(pdev));
goto err_out_disable_pdev;
@@ -3265,22 +3268,18 @@ static int __devinit skge_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- if (sizeof(dma_addr_t) > sizeof(u32) &&
- !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
+ if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
using_dac = 1;
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
- if (err < 0) {
- printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA "
- "for consistent allocations\n", pci_name(pdev));
- goto err_out_free_regions;
- }
- } else {
- err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
- if (err) {
- printk(KERN_ERR PFX "%s no usable DMA configuration\n",
- pci_name(pdev));
- goto err_out_free_regions;
- }
+ } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
+ using_dac = 0;
+ err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ }
+
+ if (err) {
+ printk(KERN_ERR PFX "%s no usable DMA configuration\n",
+ pci_name(pdev));
+ goto err_out_free_regions;
}
#ifdef __BIG_ENDIAN
@@ -3304,7 +3303,6 @@ static int __devinit skge_probe(struct pci_dev *pdev,
hw->pdev = pdev;
spin_lock_init(&hw->phy_lock);
- spin_lock_init(&hw->hw_lock);
tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw);
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
@@ -3314,7 +3312,8 @@ static int __devinit skge_probe(struct pci_dev *pdev,
goto err_out_free_hw;
}
- if ((err = request_irq(pdev->irq, skge_intr, SA_SHIRQ, DRV_NAME, hw))) {
+ err = request_irq(pdev->irq, skge_intr, SA_SHIRQ, DRV_NAME, hw);
+ if (err) {
printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
pci_name(pdev), pdev->irq);
goto err_out_iounmap;
@@ -3332,7 +3331,8 @@ static int __devinit skge_probe(struct pci_dev *pdev,
if ((dev = skge_devinit(hw, 0, using_dac)) == NULL)
goto err_out_led_off;
- if ((err = register_netdev(dev))) {
+ err = register_netdev(dev);
+ if (err) {
printk(KERN_ERR PFX "%s: cannot register net device\n",
pci_name(pdev));
goto err_out_free_netdev;
@@ -3387,7 +3387,6 @@ static void __devexit skge_remove(struct pci_dev *pdev)
skge_write32(hw, B0_IMSK, 0);
skge_write16(hw, B0_LED, LED_STAT_OFF);
- skge_pci_clear(hw);
skge_write8(hw, B0_CTST, CS_RST_SET);
tasklet_kill(&hw->ext_tasklet);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 941f12a333b..2efdacc290e 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2402,7 +2402,6 @@ struct skge_hw {
struct tasklet_struct ext_tasklet;
spinlock_t phy_lock;
- spinlock_t hw_lock;
};
enum {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 73260364cba..f08fe6c884b 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -51,7 +51,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "0.15"
+#define DRV_VERSION "1.1"
#define PFX DRV_NAME " "
/*
@@ -61,10 +61,6 @@
* a receive requires one (or two if using 64 bit dma).
*/
-#define is_ec_a1(hw) \
- unlikely((hw)->chip_id == CHIP_ID_YUKON_EC && \
- (hw)->chip_rev == CHIP_REV_YU_EC_A1)
-
#define RX_LE_SIZE 512
#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2)
@@ -96,6 +92,10 @@ static int copybreak __read_mostly = 256;
module_param(copybreak, int, 0);
MODULE_PARM_DESC(copybreak, "Receive copy threshold");
+static int disable_msi = 0;
+module_param(disable_msi, int, 0);
+MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
+
static const struct pci_device_id sky2_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
@@ -504,9 +504,9 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
/* Force a renegotiation */
static void sky2_phy_reinit(struct sky2_port *sky2)
{
- down(&sky2->phy_sema);
+ spin_lock_bh(&sky2->phy_lock);
sky2_phy_init(sky2->hw, sky2->port);
- up(&sky2->phy_sema);
+ spin_unlock_bh(&sky2->phy_lock);
}
static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
@@ -571,9 +571,9 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
- down(&sky2->phy_sema);
+ spin_lock_bh(&sky2->phy_lock);
sky2_phy_init(hw, port);
- up(&sky2->phy_sema);
+ spin_unlock_bh(&sky2->phy_lock);
/* MIB clear */
reg = gma_read16(hw, port, GM_PHY_ADDR);
@@ -725,37 +725,11 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
return le;
}
-/*
- * This is a workaround code taken from SysKonnect sk98lin driver
- * to deal with chip bug on Yukon EC rev 0 in the wraparound case.
- */
-static void sky2_put_idx(struct sky2_hw *hw, unsigned q,
- u16 idx, u16 *last, u16 size)
+/* Update chip's next pointer */
+static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
{
wmb();
- if (is_ec_a1(hw) && idx < *last) {
- u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
-
- if (hwget == 0) {
- /* Start prefetching again */
- sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 0xe0);
- goto setnew;
- }
-
- if (hwget == size - 1) {
- /* set watermark to one list element */
- sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 8);
-
- /* set put index to first list element */
- sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), 0);
- } else /* have hardware go to end of list */
- sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX),
- size - 1);
- } else {
-setnew:
- sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
- }
- *last = idx;
+ sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
mmiowb();
}
@@ -878,7 +852,7 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!netif_running(dev))
return -ENODEV; /* Phy still in reset */
- switch(cmd) {
+ switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = PHY_ADDR_MARV;
@@ -886,9 +860,9 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG: {
u16 val = 0;
- down(&sky2->phy_sema);
+ spin_lock_bh(&sky2->phy_lock);
err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val);
- up(&sky2->phy_sema);
+ spin_unlock_bh(&sky2->phy_lock);
data->val_out = val;
break;
@@ -898,10 +872,10 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- down(&sky2->phy_sema);
+ spin_lock_bh(&sky2->phy_lock);
err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f,
data->val_in);
- up(&sky2->phy_sema);
+ spin_unlock_bh(&sky2->phy_lock);
break;
}
return err;
@@ -1001,7 +975,6 @@ static int sky2_rx_start(struct sky2_port *sky2)
/* Tell chip about available buffers */
sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put);
- sky2->rx_last_put = sky2_read16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX));
return 0;
nomem:
sky2_rx_clean(sky2);
@@ -1014,7 +987,7 @@ static int sky2_up(struct net_device *dev)
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
- u32 ramsize, rxspace;
+ u32 ramsize, rxspace, imask;
int err = -ENOMEM;
if (netif_msg_ifup(sky2))
@@ -1079,10 +1052,10 @@ static int sky2_up(struct net_device *dev)
goto err_out;
/* Enable interrupts from phy/mac for port */
- spin_lock_irq(&hw->hw_lock);
- hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
- sky2_write32(hw, B0_IMSK, hw->intr_mask);
- spin_unlock_irq(&hw->hw_lock);
+ imask = sky2_read32(hw, B0_IMSK);
+ imask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
+ sky2_write32(hw, B0_IMSK, imask);
+
return 0;
err_out:
@@ -1299,8 +1272,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
}
- sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod,
- &sky2->tx_last_put, TX_RING_SIZE);
+ sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
out_unlock:
spin_unlock(&sky2->tx_lock);
@@ -1332,7 +1304,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
struct tx_ring_info *re = sky2->tx_ring + put;
struct sk_buff *skb = re->skb;
- nxt = re->idx;
+ nxt = re->idx;
BUG_ON(nxt >= TX_RING_SIZE);
prefetch(sky2->tx_ring + nxt);
@@ -1348,7 +1320,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
struct tx_ring_info *fre;
fre = sky2->tx_ring + (put + i + 1) % TX_RING_SIZE;
pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr),
- skb_shinfo(skb)->frags[i].size,
+ skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
}
@@ -1356,7 +1328,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
}
sky2->tx_cons = put;
- if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE)
+ if (tx_avail(sky2) > MAX_SKB_TX_LE)
netif_wake_queue(dev);
}
@@ -1375,6 +1347,7 @@ static int sky2_down(struct net_device *dev)
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
u16 ctrl;
+ u32 imask;
/* Never really got started! */
if (!sky2->tx_le)
@@ -1386,14 +1359,6 @@ static int sky2_down(struct net_device *dev)
/* Stop more packets from being queued */
netif_stop_queue(dev);
- /* Disable port IRQ */
- spin_lock_irq(&hw->hw_lock);
- hw->intr_mask &= ~((sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
- sky2_write32(hw, B0_IMSK, hw->intr_mask);
- spin_unlock_irq(&hw->hw_lock);
-
- flush_scheduled_work();
-
sky2_phy_reset(hw, port);
/* Stop transmitter */
@@ -1437,6 +1402,11 @@ static int sky2_down(struct net_device *dev)
sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
+ /* Disable port IRQ */
+ imask = sky2_read32(hw, B0_IMSK);
+ imask &= ~(sky2->port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
+ sky2_write32(hw, B0_IMSK, imask);
+
/* turn off LED's */
sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
@@ -1631,20 +1601,19 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
return 0;
}
-/*
- * Interrupt from PHY are handled outside of interrupt context
- * because accessing phy registers requires spin wait which might
- * cause excess interrupt latency.
- */
-static void sky2_phy_task(void *arg)
+/* Interrupt from PHY */
+static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
{
- struct sky2_port *sky2 = arg;
- struct sky2_hw *hw = sky2->hw;
+ struct net_device *dev = hw->dev[port];
+ struct sky2_port *sky2 = netdev_priv(dev);
u16 istatus, phystat;
- down(&sky2->phy_sema);
- istatus = gm_phy_read(hw, sky2->port, PHY_MARV_INT_STAT);
- phystat = gm_phy_read(hw, sky2->port, PHY_MARV_PHY_STAT);
+ spin_lock(&sky2->phy_lock);
+ istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
+ phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
+
+ if (!netif_running(dev))
+ goto out;
if (netif_msg_intr(sky2))
printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n",
@@ -1670,12 +1639,7 @@ static void sky2_phy_task(void *arg)
sky2_link_down(sky2);
}
out:
- up(&sky2->phy_sema);
-
- spin_lock_irq(&hw->hw_lock);
- hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2;
- sky2_write32(hw, B0_IMSK, hw->intr_mask);
- spin_unlock_irq(&hw->hw_lock);
+ spin_unlock(&sky2->phy_lock);
}
@@ -1687,31 +1651,40 @@ static void sky2_tx_timeout(struct net_device *dev)
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned txq = txqaddr[sky2->port];
- u16 ridx;
-
- /* Maybe we just missed an status interrupt */
- spin_lock(&sky2->tx_lock);
- ridx = sky2_read16(hw,
- sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
- sky2_tx_complete(sky2, ridx);
- spin_unlock(&sky2->tx_lock);
-
- if (!netif_queue_stopped(dev)) {
- if (net_ratelimit())
- pr_info(PFX "transmit interrupt missed? recovered\n");
- return;
- }
+ u16 report, done;
if (netif_msg_timer(sky2))
printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
- sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP);
- sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
+ report = sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
+ done = sky2_read16(hw, Q_ADDR(txq, Q_DONE));
- sky2_tx_clean(sky2);
+ printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n",
+ dev->name,
+ sky2->tx_cons, sky2->tx_prod, report, done);
- sky2_qset(hw, txq);
- sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
+ if (report != done) {
+ printk(KERN_INFO PFX "status burst pending (irq moderation?)\n");
+
+ sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
+ sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
+ } else if (report != sky2->tx_cons) {
+ printk(KERN_INFO PFX "status report lost?\n");
+
+ spin_lock_bh(&sky2->tx_lock);
+ sky2_tx_complete(sky2, report);
+ spin_unlock_bh(&sky2->tx_lock);
+ } else {
+ printk(KERN_INFO PFX "hardware hung? flushing\n");
+
+ sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP);
+ sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
+
+ sky2_tx_clean(sky2);
+
+ sky2_qset(hw, txq);
+ sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
+ }
}
@@ -1730,6 +1703,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
struct sky2_hw *hw = sky2->hw;
int err;
u16 ctl, mode;
+ u32 imask;
if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
return -EINVAL;
@@ -1742,12 +1716,15 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+ imask = sky2_read32(hw, B0_IMSK);
sky2_write32(hw, B0_IMSK, 0);
dev->trans_start = jiffies; /* prevent tx timeout */
netif_stop_queue(dev);
netif_poll_disable(hw->dev[0]);
+ synchronize_irq(hw->pdev->irq);
+
ctl = gma_read16(hw, sky2->port, GM_GP_CTRL);
gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
sky2_rx_stop(sky2);
@@ -1766,7 +1743,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD);
err = sky2_rx_start(sky2);
- sky2_write32(hw, B0_IMSK, hw->intr_mask);
+ sky2_write32(hw, B0_IMSK, imask);
if (err)
dev_close(dev);
@@ -1843,8 +1820,7 @@ resubmit:
sky2_rx_add(sky2, re->mapaddr);
/* Tell receiver about new buffers. */
- sky2_put_idx(sky2->hw, rxqaddr[sky2->port], sky2->rx_put,
- &sky2->rx_last_put, RX_LE_SIZE);
+ sky2_put_idx(sky2->hw, rxqaddr[sky2->port], sky2->rx_put);
return skb;
@@ -1871,76 +1847,51 @@ error:
goto resubmit;
}
-/*
- * Check for transmit complete
- */
-#define TX_NO_STATUS 0xffff
-
-static void sky2_tx_check(struct sky2_hw *hw, int port, u16 last)
+/* Transmit complete */
+static inline void sky2_tx_done(struct net_device *dev, u16 last)
{
- if (last != TX_NO_STATUS) {
- struct net_device *dev = hw->dev[port];
- if (dev && netif_running(dev)) {
- struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_port *sky2 = netdev_priv(dev);
- spin_lock(&sky2->tx_lock);
- sky2_tx_complete(sky2, last);
- spin_unlock(&sky2->tx_lock);
- }
+ if (netif_running(dev)) {
+ spin_lock(&sky2->tx_lock);
+ sky2_tx_complete(sky2, last);
+ spin_unlock(&sky2->tx_lock);
}
}
-/*
- * Both ports share the same status interrupt, therefore there is only
- * one poll routine.
- */
-static int sky2_poll(struct net_device *dev0, int *budget)
+/* Process status response ring */
+static int sky2_status_intr(struct sky2_hw *hw, int to_do)
{
- struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw;
- unsigned int to_do = min(dev0->quota, *budget);
- unsigned int work_done = 0;
- u16 hwidx;
- u16 tx_done[2] = { TX_NO_STATUS, TX_NO_STATUS };
-
- sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
-
- /*
- * Kick the STAT_LEV_TIMER_CTRL timer.
- * This fixes my hangs on Yukon-EC (0xb6) rev 1.
- * The if clause is there to start the timer only if it has been
- * configured correctly and not been disabled via ethtool.
- */
- if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_START) {
- sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
- sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
- }
+ int work_done = 0;
- hwidx = sky2_read16(hw, STAT_PUT_IDX);
- BUG_ON(hwidx >= STATUS_RING_SIZE);
rmb();
- while (hwidx != hw->st_idx) {
+ for(;;) {
struct sky2_status_le *le = hw->st_le + hw->st_idx;
struct net_device *dev;
struct sky2_port *sky2;
struct sk_buff *skb;
u32 status;
u16 length;
+ u8 link, opcode;
+
+ opcode = le->opcode;
+ if (!opcode)
+ break;
+ opcode &= ~HW_OWNER;
- le = hw->st_le + hw->st_idx;
hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE;
- prefetch(hw->st_le + hw->st_idx);
+ le->opcode = 0;
- BUG_ON(le->link >= 2);
- dev = hw->dev[le->link];
- if (dev == NULL || !netif_running(dev))
- continue;
+ link = le->link;
+ BUG_ON(link >= 2);
+ dev = hw->dev[link];
sky2 = netdev_priv(dev);
- status = le32_to_cpu(le->status);
- length = le16_to_cpu(le->length);
+ length = le->length;
+ status = le->status;
- switch (le->opcode & ~HW_OWNER) {
+ switch (opcode) {
case OP_RXSTAT:
skb = sky2_receive(sky2, length, status);
if (!skb)
@@ -1980,42 +1931,23 @@ static int sky2_poll(struct net_device *dev0, int *budget)
case OP_TXINDEXLE:
/* TX index reports status for both ports */
- tx_done[0] = status & 0xffff;
- tx_done[1] = ((status >> 24) & 0xff)
- | (u16)(length & 0xf) << 8;
+ sky2_tx_done(hw->dev[0], status & 0xffff);
+ if (hw->dev[1])
+ sky2_tx_done(hw->dev[1],
+ ((status >> 24) & 0xff)
+ | (u16)(length & 0xf) << 8);
break;
default:
if (net_ratelimit())
printk(KERN_WARNING PFX
- "unknown status opcode 0x%x\n", le->opcode);
+ "unknown status opcode 0x%x\n", opcode);
break;
}
}
exit_loop:
- sky2_tx_check(hw, 0, tx_done[0]);
- sky2_tx_check(hw, 1, tx_done[1]);
-
- if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) {
- sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
- sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
- }
-
- if (likely(work_done < to_do)) {
- spin_lock_irq(&hw->hw_lock);
- __netif_rx_complete(dev0);
-
- hw->intr_mask |= Y2_IS_STAT_BMU;
- sky2_write32(hw, B0_IMSK, hw->intr_mask);
- spin_unlock_irq(&hw->hw_lock);
-
- return 0;
- } else {
- *budget -= work_done;
- dev0->quota -= work_done;
- return 1;
- }
+ return work_done;
}
static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
@@ -2134,57 +2066,97 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
}
}
-static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
+/* This should never happen it is a fatal situation */
+static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port,
+ const char *rxtx, u32 mask)
{
struct net_device *dev = hw->dev[port];
struct sky2_port *sky2 = netdev_priv(dev);
+ u32 imask;
+
+ printk(KERN_ERR PFX "%s: %s descriptor error (hardware problem)\n",
+ dev ? dev->name : "<not registered>", rxtx);
- hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
- sky2_write32(hw, B0_IMSK, hw->intr_mask);
+ imask = sky2_read32(hw, B0_IMSK);
+ imask &= ~mask;
+ sky2_write32(hw, B0_IMSK, imask);
- schedule_work(&sky2->phy_task);
+ if (dev) {
+ spin_lock(&sky2->phy_lock);
+ sky2_link_down(sky2);
+ spin_unlock(&sky2->phy_lock);
+ }
}
-static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
+static int sky2_poll(struct net_device *dev0, int *budget)
{
- struct sky2_hw *hw = dev_id;
- struct net_device *dev0 = hw->dev[0];
- u32 status;
+ struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw;
+ int work_limit = min(dev0->quota, *budget);
+ int work_done = 0;
+ u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
- status = sky2_read32(hw, B0_Y2_SP_ISRC2);
- if (status == 0 || status == ~0)
- return IRQ_NONE;
+ if (unlikely(status & ~Y2_IS_STAT_BMU)) {
+ if (status & Y2_IS_HW_ERR)
+ sky2_hw_intr(hw);
- spin_lock(&hw->hw_lock);
- if (status & Y2_IS_HW_ERR)
- sky2_hw_intr(hw);
+ if (status & Y2_IS_IRQ_PHY1)
+ sky2_phy_intr(hw, 0);
- /* Do NAPI for Rx and Tx status */
- if (status & Y2_IS_STAT_BMU) {
- hw->intr_mask &= ~Y2_IS_STAT_BMU;
- sky2_write32(hw, B0_IMSK, hw->intr_mask);
+ if (status & Y2_IS_IRQ_PHY2)
+ sky2_phy_intr(hw, 1);
- if (likely(__netif_rx_schedule_prep(dev0))) {
- prefetch(&hw->st_le[hw->st_idx]);
- __netif_rx_schedule(dev0);
- }
+ if (status & Y2_IS_IRQ_MAC1)
+ sky2_mac_intr(hw, 0);
+
+ if (status & Y2_IS_IRQ_MAC2)
+ sky2_mac_intr(hw, 1);
+
+ if (status & Y2_IS_CHK_RX1)
+ sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1);
+
+ if (status & Y2_IS_CHK_RX2)
+ sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2);
+
+ if (status & Y2_IS_CHK_TXA1)
+ sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1);
+
+ if (status & Y2_IS_CHK_TXA2)
+ sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
}
- if (status & Y2_IS_IRQ_PHY1)
- sky2_phy_intr(hw, 0);
+ if (status & Y2_IS_STAT_BMU) {
+ work_done = sky2_status_intr(hw, work_limit);
+ *budget -= work_done;
+ dev0->quota -= work_done;
+
+ if (work_done >= work_limit)
+ return 1;
- if (status & Y2_IS_IRQ_PHY2)
- sky2_phy_intr(hw, 1);
+ sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
+ }
- if (status & Y2_IS_IRQ_MAC1)
- sky2_mac_intr(hw, 0);
+ netif_rx_complete(dev0);
- if (status & Y2_IS_IRQ_MAC2)
- sky2_mac_intr(hw, 1);
+ status = sky2_read32(hw, B0_Y2_SP_LISR);
+ return 0;
+}
- sky2_write32(hw, B0_Y2_SP_ICR, 2);
+static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct sky2_hw *hw = dev_id;
+ struct net_device *dev0 = hw->dev[0];
+ u32 status;
- spin_unlock(&hw->hw_lock);
+ /* Reading this mask interrupts as side effect */
+ status = sky2_read32(hw, B0_Y2_SP_ISRC2);
+ if (status == 0 || status == ~0)
+ return IRQ_NONE;
+
+ prefetch(&hw->st_le[hw->st_idx]);
+ if (likely(__netif_rx_schedule_prep(dev0)))
+ __netif_rx_schedule(dev0);
+ else
+ printk(KERN_DEBUG PFX "irq race detected\n");
return IRQ_HANDLED;
}
@@ -2238,6 +2210,23 @@ static int sky2_reset(struct sky2_hw *hw)
return -EOPNOTSUPP;
}
+ hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
+
+ /* This rev is really old, and requires untested workarounds */
+ if (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == CHIP_REV_YU_EC_A1) {
+ printk(KERN_ERR PFX "%s: unsupported revision Yukon-%s (0x%x) rev %d\n",
+ pci_name(hw->pdev), yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
+ hw->chip_id, hw->chip_rev);
+ return -EOPNOTSUPP;
+ }
+
+ /* This chip is new and not tested yet */
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
+ pr_info(PFX "%s: is a version of Yukon 2 chipset that has not been tested yet.\n",
+ pci_name(hw->pdev));
+ pr_info("Please report success/failure to maintainer <shemminger@osdl.org>\n");
+ }
+
/* disable ASF */
if (hw->chip_id <= CHIP_ID_YUKON_EC) {
sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
@@ -2258,7 +2247,7 @@ static int sky2_reset(struct sky2_hw *hw)
sky2_write8(hw, B0_CTST, CS_MRST_CLR);
/* clear any PEX errors */
- if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
+ if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
@@ -2271,7 +2260,6 @@ static int sky2_reset(struct sky2_hw *hw)
if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
++hw->ports;
}
- hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
sky2_set_power_state(hw, PCI_D0);
@@ -2337,30 +2325,18 @@ static int sky2_reset(struct sky2_hw *hw)
/* Set the list last index */
sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1);
- /* These status setup values are copied from SysKonnect's driver */
- if (is_ec_a1(hw)) {
- /* WA for dev. #4.3 */
- sky2_write16(hw, STAT_TX_IDX_TH, 0xfff); /* Tx Threshold */
-
- /* set Status-FIFO watermark */
- sky2_write8(hw, STAT_FIFO_WM, 0x21); /* WA for dev. #4.18 */
+ sky2_write16(hw, STAT_TX_IDX_TH, 10);
+ sky2_write8(hw, STAT_FIFO_WM, 16);
- /* set Status-FIFO ISR watermark */
- sky2_write8(hw, STAT_FIFO_ISR_WM, 0x07); /* WA for dev. #4.18 */
- sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 10000));
- } else {
- sky2_write16(hw, STAT_TX_IDX_TH, 10);
- sky2_write8(hw, STAT_FIFO_WM, 16);
-
- /* set Status-FIFO ISR watermark */
- if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
- sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
- else
- sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
+ /* set Status-FIFO ISR watermark */
+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
+ sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
+ else
+ sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
- sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
- sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 7));
- }
+ sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
+ sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
+ sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
/* enable status unit */
sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
@@ -2743,7 +2719,7 @@ static int sky2_phys_id(struct net_device *dev, u32 data)
ms = data * 1000;
/* save initial values */
- down(&sky2->phy_sema);
+ spin_lock_bh(&sky2->phy_lock);
if (hw->chip_id == CHIP_ID_YUKON_XL) {
u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
@@ -2759,9 +2735,9 @@ static int sky2_phys_id(struct net_device *dev, u32 data)
sky2_led(hw, port, onoff);
onoff = !onoff;
- up(&sky2->phy_sema);
+ spin_unlock_bh(&sky2->phy_lock);
interrupted = msleep_interruptible(250);
- down(&sky2->phy_sema);
+ spin_lock_bh(&sky2->phy_lock);
ms -= 250;
}
@@ -2776,7 +2752,7 @@ static int sky2_phys_id(struct net_device *dev, u32 data)
gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
}
- up(&sky2->phy_sema);
+ spin_unlock_bh(&sky2->phy_lock);
return 0;
}
@@ -2806,38 +2782,6 @@ static int sky2_set_pauseparam(struct net_device *dev,
return err;
}
-#ifdef CONFIG_PM
-static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
- struct sky2_port *sky2 = netdev_priv(dev);
-
- wol->supported = WAKE_MAGIC;
- wol->wolopts = sky2->wol ? WAKE_MAGIC : 0;
-}
-
-static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
- struct sky2_port *sky2 = netdev_priv(dev);
- struct sky2_hw *hw = sky2->hw;
-
- if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
- return -EOPNOTSUPP;
-
- sky2->wol = wol->wolopts == WAKE_MAGIC;
-
- if (sky2->wol) {
- memcpy_toio(hw->regs + WOL_MAC_ADDR, dev->dev_addr, ETH_ALEN);
-
- sky2_write16(hw, WOL_CTRL_STAT,
- WOL_CTL_ENA_PME_ON_MAGIC_PKT |
- WOL_CTL_ENA_MAGIC_PKT_UNIT);
- } else
- sky2_write16(hw, WOL_CTRL_STAT, WOL_CTL_DEFAULT);
-
- return 0;
-}
-#endif
-
static int sky2_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *ecmd)
{
@@ -2878,19 +2822,11 @@ static int sky2_set_coalesce(struct net_device *dev,
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
- const u32 tmin = sky2_clk2us(hw, 1);
- const u32 tmax = 5000;
-
- if (ecmd->tx_coalesce_usecs != 0 &&
- (ecmd->tx_coalesce_usecs < tmin || ecmd->tx_coalesce_usecs > tmax))
- return -EINVAL;
-
- if (ecmd->rx_coalesce_usecs != 0 &&
- (ecmd->rx_coalesce_usecs < tmin || ecmd->rx_coalesce_usecs > tmax))
- return -EINVAL;
+ const u32 tmax = sky2_clk2us(hw, 0x0ffffff);
- if (ecmd->rx_coalesce_usecs_irq != 0 &&
- (ecmd->rx_coalesce_usecs_irq < tmin || ecmd->rx_coalesce_usecs_irq > tmax))
+ if (ecmd->tx_coalesce_usecs > tmax ||
+ ecmd->rx_coalesce_usecs > tmax ||
+ ecmd->rx_coalesce_usecs_irq > tmax)
return -EINVAL;
if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1)
@@ -3025,10 +2961,6 @@ static struct ethtool_ops sky2_ethtool_ops = {
.set_ringparam = sky2_set_ringparam,
.get_pauseparam = sky2_get_pauseparam,
.set_pauseparam = sky2_set_pauseparam,
-#ifdef CONFIG_PM
- .get_wol = sky2_get_wol,
- .set_wol = sky2_set_wol,
-#endif
.phys_id = sky2_phys_id,
.get_stats_count = sky2_get_stats_count,
.get_ethtool_stats = sky2_get_ethtool_stats,
@@ -3082,16 +3014,15 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
sky2->speed = -1;
sky2->advertising = sky2_supported_modes(hw);
- /* Receive checksum disabled for Yukon XL
+ /* Receive checksum disabled for Yukon XL
* because of observed problems with incorrect
* values when multiple packets are received in one interrupt
*/
sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL);
- INIT_WORK(&sky2->phy_task, sky2_phy_task, sky2);
- init_MUTEX(&sky2->phy_sema);
+ spin_lock_init(&sky2->phy_lock);
sky2->tx_pending = TX_DEF_PENDING;
- sky2->rx_pending = is_ec_a1(hw) ? 8 : RX_DEF_PENDING;
+ sky2->rx_pending = RX_DEF_PENDING;
sky2->rx_bufsize = sky2_buf_size(ETH_DATA_LEN);
hw->dev[port] = dev;
@@ -3133,6 +3064,66 @@ static void __devinit sky2_show_addr(struct net_device *dev)
dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
}
+/* Handle software interrupt used during MSI test */
+static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ struct sky2_hw *hw = dev_id;
+ u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
+
+ if (status == 0)
+ return IRQ_NONE;
+
+ if (status & Y2_IS_IRQ_SW) {
+ hw->msi_detected = 1;
+ wake_up(&hw->msi_wait);
+ sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
+ }
+ sky2_write32(hw, B0_Y2_SP_ICR, 2);
+
+ return IRQ_HANDLED;
+}
+
+/* Test interrupt path by forcing a a software IRQ */
+static int __devinit sky2_test_msi(struct sky2_hw *hw)
+{
+ struct pci_dev *pdev = hw->pdev;
+ int err;
+
+ sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
+
+ err = request_irq(pdev->irq, sky2_test_intr, SA_SHIRQ, DRV_NAME, hw);
+ if (err) {
+ printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
+ pci_name(pdev), pdev->irq);
+ return err;
+ }
+
+ init_waitqueue_head (&hw->msi_wait);
+
+ sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
+ wmb();
+
+ wait_event_timeout(hw->msi_wait, hw->msi_detected, HZ/10);
+
+ if (!hw->msi_detected) {
+ /* MSI test failed, go back to INTx mode */
+ printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
+ "switching to INTx mode. Please report this failure to "
+ "the PCI maintainer and include system chipset information.\n",
+ pci_name(pdev));
+
+ err = -EOPNOTSUPP;
+ sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
+ }
+
+ sky2_write32(hw, B0_IMSK, 0);
+
+ free_irq(pdev->irq, hw);
+
+ return err;
+}
+
static int __devinit sky2_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -3201,7 +3192,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
goto err_out_free_hw;
}
hw->pm_cap = pm_cap;
- spin_lock_init(&hw->hw_lock);
#ifdef __BIG_ENDIAN
/* byte swap descriptors in hardware */
@@ -3254,21 +3244,29 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
}
}
- err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw);
+ if (!disable_msi && pci_enable_msi(pdev) == 0) {
+ err = sky2_test_msi(hw);
+ if (err == -EOPNOTSUPP)
+ pci_disable_msi(pdev);
+ else if (err)
+ goto err_out_unregister;
+ }
+
+ err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw);
if (err) {
printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
pci_name(pdev), pdev->irq);
goto err_out_unregister;
}
- hw->intr_mask = Y2_IS_BASE;
- sky2_write32(hw, B0_IMSK, hw->intr_mask);
+ sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
pci_set_drvdata(pdev, hw);
return 0;
err_out_unregister:
+ pci_disable_msi(pdev);
if (dev1) {
unregister_netdev(dev1);
free_netdev(dev1);
@@ -3311,6 +3309,7 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
sky2_read8(hw, B0_CTST);
free_irq(pdev->irq, hw);
+ pci_disable_msi(pdev);
pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index dce955c76f3..d63cd5a1b71 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -278,13 +278,11 @@ enum {
Y2_IS_CHK_TXS1 = 1<<1, /* Descriptor error TXS 1 */
Y2_IS_CHK_TXA1 = 1<<0, /* Descriptor error TXA 1 */
- Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU |
- Y2_IS_POLL_CHK | Y2_IS_TWSI_RDY |
- Y2_IS_IRQ_SW | Y2_IS_TIMINT,
- Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1 |
- Y2_IS_CHK_RX1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXS1,
- Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 |
- Y2_IS_CHK_RX2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_TXS2,
+ Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU,
+ Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1
+ | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1,
+ Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2
+ | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2,
};
/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
@@ -1832,6 +1830,7 @@ struct sky2_port {
struct net_device *netdev;
unsigned port;
u32 msg_enable;
+ spinlock_t phy_lock;
spinlock_t tx_lock ____cacheline_aligned_in_smp;
struct tx_ring_info *tx_ring;
@@ -1840,7 +1839,6 @@ struct sky2_port {
u16 tx_prod; /* next le to use */
u32 tx_addr64;
u16 tx_pending;
- u16 tx_last_put;
u16 tx_last_mss;
struct ring_info *rx_ring ____cacheline_aligned_in_smp;
@@ -1849,7 +1847,6 @@ struct sky2_port {
u16 rx_next; /* next re to check */
u16 rx_put; /* next le index to use */
u16 rx_pending;
- u16 rx_last_put;
u16 rx_bufsize;
#ifdef SKY2_VLAN_TAG_USED
u16 rx_tag;
@@ -1865,20 +1862,15 @@ struct sky2_port {
u8 rx_pause;
u8 tx_pause;
u8 rx_csum;
- u8 wol;
struct net_device_stats net_stats;
- struct work_struct phy_task;
- struct semaphore phy_sema;
};
struct sky2_hw {
void __iomem *regs;
struct pci_dev *pdev;
struct net_device *dev[2];
- spinlock_t hw_lock;
- u32 intr_mask;
int pm_cap;
u8 chip_id;
@@ -1889,6 +1881,8 @@ struct sky2_hw {
struct sky2_status_le *st_le;
u32 st_idx;
dma_addr_t st_dma;
+ int msi_detected;
+ wait_queue_head_t msi_wait;
};
/* Register accessor for memory mapped device */
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 7ec08127c9d..0e9833adf9f 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -215,15 +215,12 @@ struct smc_local {
spinlock_t lock;
-#ifdef SMC_CAN_USE_DATACS
- u32 __iomem *datacs;
-#endif
-
#ifdef SMC_USE_PXA_DMA
/* DMA needs the physical address of the chip */
u_long physaddr;
#endif
void __iomem *base;
+ void __iomem *datacs;
};
#if SMC_DEBUG > 0
@@ -2104,9 +2101,8 @@ static int smc_enable_device(struct platform_device *pdev)
* Set the appropriate byte/word mode.
*/
ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8;
-#ifndef SMC_CAN_USE_16BIT
- ecsr |= ECSR_IOIS8;
-#endif
+ if (!SMC_CAN_USE_16BIT)
+ ecsr |= ECSR_IOIS8;
writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT));
local_irq_restore(flags);
@@ -2143,40 +2139,39 @@ static void smc_release_attrib(struct platform_device *pdev)
release_mem_region(res->start, ATTRIB_SIZE);
}
-#ifdef SMC_CAN_USE_DATACS
-static void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev)
+static inline void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev)
{
- struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
- struct smc_local *lp = netdev_priv(ndev);
+ if (SMC_CAN_USE_DATACS) {
+ struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
+ struct smc_local *lp = netdev_priv(ndev);
- if (!res)
- return;
+ if (!res)
+ return;
- if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) {
- printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME);
- return;
- }
+ if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) {
+ printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME);
+ return;
+ }
- lp->datacs = ioremap(res->start, SMC_DATA_EXTENT);
+ lp->datacs = ioremap(res->start, SMC_DATA_EXTENT);
+ }
}
static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev)
{
- struct smc_local *lp = netdev_priv(ndev);
- struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
+ if (SMC_CAN_USE_DATACS) {
+ struct smc_local *lp = netdev_priv(ndev);
+ struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
- if (lp->datacs)
- iounmap(lp->datacs);
+ if (lp->datacs)
+ iounmap(lp->datacs);
- lp->datacs = NULL;
+ lp->datacs = NULL;
- if (res)
- release_mem_region(res->start, SMC_DATA_EXTENT);
+ if (res)
+ release_mem_region(res->start, SMC_DATA_EXTENT);
+ }
}
-#else
-static void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev) {}
-static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev) {}
-#endif
/*
* smc_init(void)
@@ -2221,6 +2216,10 @@ static int smc_drv_probe(struct platform_device *pdev)
ndev->dma = (unsigned char)-1;
ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq < 0) {
+ ret = -ENODEV;
+ goto out_free_netdev;
+ }
ret = smc_request_attrib(pdev);
if (ret)
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index e0efd1964e7..e1be1af5120 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -275,7 +275,10 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#define SMC_insw(a,r,p,l) readsw ((void*) ((a) + (r)), p, l)
#define SMC_outw(v,a,r) ({ writew ((v), (a) + (r)); LPD7A40X_IOBARRIER; })
-static inline void SMC_outsw (unsigned long a, int r, unsigned char* p, int l)
+#define SMC_outsw LPD7A40X_SMC_outsw
+
+static inline void LPD7A40X_SMC_outsw(unsigned long a, int r,
+ unsigned char* p, int l)
{
unsigned short* ps = (unsigned short*) p;
while (l-- > 0) {
@@ -342,10 +345,6 @@ static inline void SMC_outsw (unsigned long a, int r, unsigned char* p, int l)
#endif
-#ifndef SMC_IRQ_FLAGS
-#define SMC_IRQ_FLAGS SA_TRIGGER_RISING
-#endif
-
#ifdef SMC_USE_PXA_DMA
/*
* Let's use the DMA engine on the XScale PXA2xx for RX packets. This is
@@ -441,10 +440,85 @@ smc_pxa_dma_irq(int dma, void *dummy, struct pt_regs *regs)
#endif /* SMC_USE_PXA_DMA */
-/* Because of bank switching, the LAN91x uses only 16 I/O ports */
+/*
+ * Everything a particular hardware setup needs should have been defined
+ * at this point. Add stubs for the undefined cases, mainly to avoid
+ * compilation warnings since they'll be optimized away, or to prevent buggy
+ * use of them.
+ */
+
+#if ! SMC_CAN_USE_32BIT
+#define SMC_inl(ioaddr, reg) ({ BUG(); 0; })
+#define SMC_outl(x, ioaddr, reg) BUG()
+#define SMC_insl(a, r, p, l) BUG()
+#define SMC_outsl(a, r, p, l) BUG()
+#endif
+
+#if !defined(SMC_insl) || !defined(SMC_outsl)
+#define SMC_insl(a, r, p, l) BUG()
+#define SMC_outsl(a, r, p, l) BUG()
+#endif
+
+#if ! SMC_CAN_USE_16BIT
+
+/*
+ * Any 16-bit access is performed with two 8-bit accesses if the hardware
+ * can't do it directly. Most registers are 16-bit so those are mandatory.
+ */
+#define SMC_outw(x, ioaddr, reg) \
+ do { \
+ unsigned int __val16 = (x); \
+ SMC_outb( __val16, ioaddr, reg ); \
+ SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\
+ } while (0)
+#define SMC_inw(ioaddr, reg) \
+ ({ \
+ unsigned int __val16; \
+ __val16 = SMC_inb( ioaddr, reg ); \
+ __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
+ __val16; \
+ })
+
+#define SMC_insw(a, r, p, l) BUG()
+#define SMC_outsw(a, r, p, l) BUG()
+
+#endif
+
+#if !defined(SMC_insw) || !defined(SMC_outsw)
+#define SMC_insw(a, r, p, l) BUG()
+#define SMC_outsw(a, r, p, l) BUG()
+#endif
+
+#if ! SMC_CAN_USE_8BIT
+#define SMC_inb(ioaddr, reg) ({ BUG(); 0; })
+#define SMC_outb(x, ioaddr, reg) BUG()
+#define SMC_insb(a, r, p, l) BUG()
+#define SMC_outsb(a, r, p, l) BUG()
+#endif
+
+#if !defined(SMC_insb) || !defined(SMC_outsb)
+#define SMC_insb(a, r, p, l) BUG()
+#define SMC_outsb(a, r, p, l) BUG()
+#endif
+
+#ifndef SMC_CAN_USE_DATACS
+#define SMC_CAN_USE_DATACS 0
+#endif
+
#ifndef SMC_IO_SHIFT
#define SMC_IO_SHIFT 0
#endif
+
+#ifndef SMC_IRQ_FLAGS
+#define SMC_IRQ_FLAGS SA_TRIGGER_RISING
+#endif
+
+#ifndef SMC_INTERRUPT_PREAMBLE
+#define SMC_INTERRUPT_PREAMBLE
+#endif
+
+
+/* Because of bank switching, the LAN91x uses only 16 I/O ports */
#define SMC_IO_EXTENT (16 << SMC_IO_SHIFT)
#define SMC_DATA_EXTENT (4)
@@ -817,6 +891,11 @@ static const char * chip_ids[ 16 ] = {
* Note: the following macros do *not* select the bank -- this must
* be done separately as needed in the main code. The SMC_REG() macro
* only uses the bank argument for debugging purposes (when enabled).
+ *
+ * Note: despite inline functions being safer, everything leading to this
+ * should preferably be macros to let BUG() display the line number in
+ * the core source code since we're interested in the top call site
+ * not in any inline function location.
*/
#if SMC_DEBUG > 0
@@ -834,62 +913,142 @@ static const char * chip_ids[ 16 ] = {
#define SMC_REG(reg, bank) (reg<<SMC_IO_SHIFT)
#endif
-#if SMC_CAN_USE_8BIT
-#define SMC_GET_PN() SMC_inb( ioaddr, PN_REG )
-#define SMC_SET_PN(x) SMC_outb( x, ioaddr, PN_REG )
-#define SMC_GET_AR() SMC_inb( ioaddr, AR_REG )
-#define SMC_GET_TXFIFO() SMC_inb( ioaddr, TXFIFO_REG )
-#define SMC_GET_RXFIFO() SMC_inb( ioaddr, RXFIFO_REG )
-#define SMC_GET_INT() SMC_inb( ioaddr, INT_REG )
-#define SMC_ACK_INT(x) SMC_outb( x, ioaddr, INT_REG )
-#define SMC_GET_INT_MASK() SMC_inb( ioaddr, IM_REG )
-#define SMC_SET_INT_MASK(x) SMC_outb( x, ioaddr, IM_REG )
-#else
-#define SMC_GET_PN() (SMC_inw( ioaddr, PN_REG ) & 0xFF)
-#define SMC_SET_PN(x) SMC_outw( x, ioaddr, PN_REG )
-#define SMC_GET_AR() (SMC_inw( ioaddr, PN_REG ) >> 8)
-#define SMC_GET_TXFIFO() (SMC_inw( ioaddr, TXFIFO_REG ) & 0xFF)
-#define SMC_GET_RXFIFO() (SMC_inw( ioaddr, TXFIFO_REG ) >> 8)
-#define SMC_GET_INT() (SMC_inw( ioaddr, INT_REG ) & 0xFF)
+/*
+ * Hack Alert: Some setups just can't write 8 or 16 bits reliably when not
+ * aligned to a 32 bit boundary. I tell you that does exist!
+ * Fortunately the affected register accesses can be easily worked around
+ * since we can write zeroes to the preceeding 16 bits without adverse
+ * effects and use a 32-bit access.
+ *
+ * Enforce it on any 32-bit capable setup for now.
+ */
+#define SMC_MUST_ALIGN_WRITE SMC_CAN_USE_32BIT
+
+#define SMC_GET_PN() \
+ ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, PN_REG)) \
+ : (SMC_inw(ioaddr, PN_REG) & 0xFF) )
+
+#define SMC_SET_PN(x) \
+ do { \
+ if (SMC_MUST_ALIGN_WRITE) \
+ SMC_outl((x)<<16, ioaddr, SMC_REG(0, 2)); \
+ else if (SMC_CAN_USE_8BIT) \
+ SMC_outb(x, ioaddr, PN_REG); \
+ else \
+ SMC_outw(x, ioaddr, PN_REG); \
+ } while (0)
+
+#define SMC_GET_AR() \
+ ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, AR_REG)) \
+ : (SMC_inw(ioaddr, PN_REG) >> 8) )
+
+#define SMC_GET_TXFIFO() \
+ ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, TXFIFO_REG)) \
+ : (SMC_inw(ioaddr, TXFIFO_REG) & 0xFF) )
+
+#define SMC_GET_RXFIFO() \
+ ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, RXFIFO_REG)) \
+ : (SMC_inw(ioaddr, TXFIFO_REG) >> 8) )
+
+#define SMC_GET_INT() \
+ ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, INT_REG)) \
+ : (SMC_inw(ioaddr, INT_REG) & 0xFF) )
+
#define SMC_ACK_INT(x) \
do { \
- unsigned long __flags; \
- int __mask; \
- local_irq_save(__flags); \
- __mask = SMC_inw( ioaddr, INT_REG ) & ~0xff; \
- SMC_outw( __mask | (x), ioaddr, INT_REG ); \
- local_irq_restore(__flags); \
+ if (SMC_CAN_USE_8BIT) \
+ SMC_outb(x, ioaddr, INT_REG); \
+ else { \
+ unsigned long __flags; \
+ int __mask; \
+ local_irq_save(__flags); \
+ __mask = SMC_inw( ioaddr, INT_REG ) & ~0xff; \
+ SMC_outw( __mask | (x), ioaddr, INT_REG ); \
+ local_irq_restore(__flags); \
+ } \
+ } while (0)
+
+#define SMC_GET_INT_MASK() \
+ ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, IM_REG)) \
+ : (SMC_inw( ioaddr, INT_REG ) >> 8) )
+
+#define SMC_SET_INT_MASK(x) \
+ do { \
+ if (SMC_CAN_USE_8BIT) \
+ SMC_outb(x, ioaddr, IM_REG); \
+ else \
+ SMC_outw((x) << 8, ioaddr, INT_REG); \
+ } while (0)
+
+#define SMC_CURRENT_BANK() SMC_inw(ioaddr, BANK_SELECT)
+
+#define SMC_SELECT_BANK(x) \
+ do { \
+ if (SMC_MUST_ALIGN_WRITE) \
+ SMC_outl((x)<<16, ioaddr, 12<<SMC_IO_SHIFT); \
+ else \
+ SMC_outw(x, ioaddr, BANK_SELECT); \
+ } while (0)
+
+#define SMC_GET_BASE() SMC_inw(ioaddr, BASE_REG)
+
+#define SMC_SET_BASE(x) SMC_outw(x, ioaddr, BASE_REG)
+
+#define SMC_GET_CONFIG() SMC_inw(ioaddr, CONFIG_REG)
+
+#define SMC_SET_CONFIG(x) SMC_outw(x, ioaddr, CONFIG_REG)
+
+#define SMC_GET_COUNTER() SMC_inw(ioaddr, COUNTER_REG)
+
+#define SMC_GET_CTL() SMC_inw(ioaddr, CTL_REG)
+
+#define SMC_SET_CTL(x) SMC_outw(x, ioaddr, CTL_REG)
+
+#define SMC_GET_MII() SMC_inw(ioaddr, MII_REG)
+
+#define SMC_SET_MII(x) SMC_outw(x, ioaddr, MII_REG)
+
+#define SMC_GET_MIR() SMC_inw(ioaddr, MIR_REG)
+
+#define SMC_SET_MIR(x) SMC_outw(x, ioaddr, MIR_REG)
+
+#define SMC_GET_MMU_CMD() SMC_inw(ioaddr, MMU_CMD_REG)
+
+#define SMC_SET_MMU_CMD(x) SMC_outw(x, ioaddr, MMU_CMD_REG)
+
+#define SMC_GET_FIFO() SMC_inw(ioaddr, FIFO_REG)
+
+#define SMC_GET_PTR() SMC_inw(ioaddr, PTR_REG)
+
+#define SMC_SET_PTR(x) \
+ do { \
+ if (SMC_MUST_ALIGN_WRITE) \
+ SMC_outl((x)<<16, ioaddr, SMC_REG(4, 2)); \
+ else \
+ SMC_outw(x, ioaddr, PTR_REG); \
} while (0)
-#define SMC_GET_INT_MASK() (SMC_inw( ioaddr, INT_REG ) >> 8)
-#define SMC_SET_INT_MASK(x) SMC_outw( (x) << 8, ioaddr, INT_REG )
-#endif
-#define SMC_CURRENT_BANK() SMC_inw( ioaddr, BANK_SELECT )
-#define SMC_SELECT_BANK(x) SMC_outw( x, ioaddr, BANK_SELECT )
-#define SMC_GET_BASE() SMC_inw( ioaddr, BASE_REG )
-#define SMC_SET_BASE(x) SMC_outw( x, ioaddr, BASE_REG )
-#define SMC_GET_CONFIG() SMC_inw( ioaddr, CONFIG_REG )
-#define SMC_SET_CONFIG(x) SMC_outw( x, ioaddr, CONFIG_REG )
-#define SMC_GET_COUNTER() SMC_inw( ioaddr, COUNTER_REG )
-#define SMC_GET_CTL() SMC_inw( ioaddr, CTL_REG )
-#define SMC_SET_CTL(x) SMC_outw( x, ioaddr, CTL_REG )
-#define SMC_GET_MII() SMC_inw( ioaddr, MII_REG )
-#define SMC_SET_MII(x) SMC_outw( x, ioaddr, MII_REG )
-#define SMC_GET_MIR() SMC_inw( ioaddr, MIR_REG )
-#define SMC_SET_MIR(x) SMC_outw( x, ioaddr, MIR_REG )
-#define SMC_GET_MMU_CMD() SMC_inw( ioaddr, MMU_CMD_REG )
-#define SMC_SET_MMU_CMD(x) SMC_outw( x, ioaddr, MMU_CMD_REG )
-#define SMC_GET_FIFO() SMC_inw( ioaddr, FIFO_REG )
-#define SMC_GET_PTR() SMC_inw( ioaddr, PTR_REG )
-#define SMC_SET_PTR(x) SMC_outw( x, ioaddr, PTR_REG )
-#define SMC_GET_EPH_STATUS() SMC_inw( ioaddr, EPH_STATUS_REG )
-#define SMC_GET_RCR() SMC_inw( ioaddr, RCR_REG )
-#define SMC_SET_RCR(x) SMC_outw( x, ioaddr, RCR_REG )
-#define SMC_GET_REV() SMC_inw( ioaddr, REV_REG )
-#define SMC_GET_RPC() SMC_inw( ioaddr, RPC_REG )
-#define SMC_SET_RPC(x) SMC_outw( x, ioaddr, RPC_REG )
-#define SMC_GET_TCR() SMC_inw( ioaddr, TCR_REG )
-#define SMC_SET_TCR(x) SMC_outw( x, ioaddr, TCR_REG )
+#define SMC_GET_EPH_STATUS() SMC_inw(ioaddr, EPH_STATUS_REG)
+
+#define SMC_GET_RCR() SMC_inw(ioaddr, RCR_REG)
+
+#define SMC_SET_RCR(x) SMC_outw(x, ioaddr, RCR_REG)
+
+#define SMC_GET_REV() SMC_inw(ioaddr, REV_REG)
+
+#define SMC_GET_RPC() SMC_inw(ioaddr, RPC_REG)
+
+#define SMC_SET_RPC(x) \
+ do { \
+ if (SMC_MUST_ALIGN_WRITE) \
+ SMC_outl((x)<<16, ioaddr, SMC_REG(8, 0)); \
+ else \
+ SMC_outw(x, ioaddr, RPC_REG); \
+ } while (0)
+
+#define SMC_GET_TCR() SMC_inw(ioaddr, TCR_REG)
+
+#define SMC_SET_TCR(x) SMC_outw(x, ioaddr, TCR_REG)
#ifndef SMC_GET_MAC_ADDR
#define SMC_GET_MAC_ADDR(addr) \
@@ -920,151 +1079,84 @@ static const char * chip_ids[ 16 ] = {
SMC_outw( mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4 ); \
} while (0)
-#if SMC_CAN_USE_32BIT
-/*
- * Some setups just can't write 8 or 16 bits reliably when not aligned
- * to a 32 bit boundary. I tell you that exists!
- * We re-do the ones here that can be easily worked around if they can have
- * their low parts written to 0 without adverse effects.
- */
-#undef SMC_SELECT_BANK
-#define SMC_SELECT_BANK(x) SMC_outl( (x)<<16, ioaddr, 12<<SMC_IO_SHIFT )
-#undef SMC_SET_RPC
-#define SMC_SET_RPC(x) SMC_outl( (x)<<16, ioaddr, SMC_REG(8, 0) )
-#undef SMC_SET_PN
-#define SMC_SET_PN(x) SMC_outl( (x)<<16, ioaddr, SMC_REG(0, 2) )
-#undef SMC_SET_PTR
-#define SMC_SET_PTR(x) SMC_outl( (x)<<16, ioaddr, SMC_REG(4, 2) )
-#endif
-
-#if SMC_CAN_USE_32BIT
-#define SMC_PUT_PKT_HDR(status, length) \
- SMC_outl( (status) | (length) << 16, ioaddr, DATA_REG )
-#define SMC_GET_PKT_HDR(status, length) \
- do { \
- unsigned int __val = SMC_inl( ioaddr, DATA_REG ); \
- (status) = __val & 0xffff; \
- (length) = __val >> 16; \
- } while (0)
-#else
#define SMC_PUT_PKT_HDR(status, length) \
do { \
- SMC_outw( status, ioaddr, DATA_REG ); \
- SMC_outw( length, ioaddr, DATA_REG ); \
- } while (0)
-#define SMC_GET_PKT_HDR(status, length) \
- do { \
- (status) = SMC_inw( ioaddr, DATA_REG ); \
- (length) = SMC_inw( ioaddr, DATA_REG ); \
+ if (SMC_CAN_USE_32BIT) \
+ SMC_outl((status) | (length)<<16, ioaddr, DATA_REG); \
+ else { \
+ SMC_outw(status, ioaddr, DATA_REG); \
+ SMC_outw(length, ioaddr, DATA_REG); \
+ } \
} while (0)
-#endif
-#if SMC_CAN_USE_32BIT
-#define _SMC_PUSH_DATA(p, l) \
+#define SMC_GET_PKT_HDR(status, length) \
do { \
- char *__ptr = (p); \
- int __len = (l); \
- if (__len >= 2 && (unsigned long)__ptr & 2) { \
- __len -= 2; \
- SMC_outw( *(u16 *)__ptr, ioaddr, DATA_REG ); \
- __ptr += 2; \
- } \
- SMC_outsl( ioaddr, DATA_REG, __ptr, __len >> 2); \
- if (__len & 2) { \
- __ptr += (__len & ~3); \
- SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \
+ if (SMC_CAN_USE_32BIT) { \
+ unsigned int __val = SMC_inl(ioaddr, DATA_REG); \
+ (status) = __val & 0xffff; \
+ (length) = __val >> 16; \
+ } else { \
+ (status) = SMC_inw(ioaddr, DATA_REG); \
+ (length) = SMC_inw(ioaddr, DATA_REG); \
} \
} while (0)
-#define _SMC_PULL_DATA(p, l) \
- do { \
- char *__ptr = (p); \
- int __len = (l); \
- if ((unsigned long)__ptr & 2) { \
- /* \
- * We want 32bit alignment here. \
- * Since some buses perform a full 32bit \
- * fetch even for 16bit data we can't use \
- * SMC_inw() here. Back both source (on chip \
- * and destination) pointers of 2 bytes. \
- */ \
- __ptr -= 2; \
- __len += 2; \
- SMC_SET_PTR( 2|PTR_READ|PTR_RCV|PTR_AUTOINC ); \
- } \
- __len += 2; \
- SMC_insl( ioaddr, DATA_REG, __ptr, __len >> 2); \
- } while (0)
-#elif SMC_CAN_USE_16BIT
-#define _SMC_PUSH_DATA(p, l) SMC_outsw( ioaddr, DATA_REG, p, (l) >> 1 )
-#define _SMC_PULL_DATA(p, l) SMC_insw ( ioaddr, DATA_REG, p, (l) >> 1 )
-#elif SMC_CAN_USE_8BIT
-#define _SMC_PUSH_DATA(p, l) SMC_outsb( ioaddr, DATA_REG, p, l )
-#define _SMC_PULL_DATA(p, l) SMC_insb ( ioaddr, DATA_REG, p, l )
-#endif
-#if ! SMC_CAN_USE_16BIT
-#define SMC_outw(x, ioaddr, reg) \
+#define SMC_PUSH_DATA(p, l) \
do { \
- unsigned int __val16 = (x); \
- SMC_outb( __val16, ioaddr, reg ); \
- SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\
+ if (SMC_CAN_USE_32BIT) { \
+ void *__ptr = (p); \
+ int __len = (l); \
+ void *__ioaddr = ioaddr; \
+ if (__len >= 2 && (unsigned long)__ptr & 2) { \
+ __len -= 2; \
+ SMC_outw(*(u16 *)__ptr, ioaddr, DATA_REG); \
+ __ptr += 2; \
+ } \
+ if (SMC_CAN_USE_DATACS && lp->datacs) \
+ __ioaddr = lp->datacs; \
+ SMC_outsl(__ioaddr, DATA_REG, __ptr, __len>>2); \
+ if (__len & 2) { \
+ __ptr += (__len & ~3); \
+ SMC_outw(*((u16 *)__ptr), ioaddr, DATA_REG); \
+ } \
+ } else if (SMC_CAN_USE_16BIT) \
+ SMC_outsw(ioaddr, DATA_REG, p, (l) >> 1); \
+ else if (SMC_CAN_USE_8BIT) \
+ SMC_outsb(ioaddr, DATA_REG, p, l); \
} while (0)
-#define SMC_inw(ioaddr, reg) \
- ({ \
- unsigned int __val16; \
- __val16 = SMC_inb( ioaddr, reg ); \
- __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
- __val16; \
- })
-#endif
-
-#ifdef SMC_CAN_USE_DATACS
-#define SMC_PUSH_DATA(p, l) \
- if ( lp->datacs ) { \
- unsigned char *__ptr = (p); \
- int __len = (l); \
- if (__len >= 2 && (unsigned long)__ptr & 2) { \
- __len -= 2; \
- SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \
- __ptr += 2; \
- } \
- outsl(lp->datacs, __ptr, __len >> 2); \
- if (__len & 2) { \
- __ptr += (__len & ~3); \
- SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \
- } \
- } else { \
- _SMC_PUSH_DATA(p, l); \
- }
#define SMC_PULL_DATA(p, l) \
- if ( lp->datacs ) { \
- unsigned char *__ptr = (p); \
- int __len = (l); \
- if ((unsigned long)__ptr & 2) { \
- /* \
- * We want 32bit alignment here. \
- * Since some buses perform a full 32bit \
- * fetch even for 16bit data we can't use \
- * SMC_inw() here. Back both source (on chip \
- * and destination) pointers of 2 bytes. \
- */ \
- __ptr -= 2; \
+ do { \
+ if (SMC_CAN_USE_32BIT) { \
+ void *__ptr = (p); \
+ int __len = (l); \
+ void *__ioaddr = ioaddr; \
+ if ((unsigned long)__ptr & 2) { \
+ /* \
+ * We want 32bit alignment here. \
+ * Since some buses perform a full \
+ * 32bit fetch even for 16bit data \
+ * we can't use SMC_inw() here. \
+ * Back both source (on-chip) and \
+ * destination pointers of 2 bytes. \
+ * This is possible since the call to \
+ * SMC_GET_PKT_HDR() already advanced \
+ * the source pointer of 4 bytes, and \
+ * the skb_reserve(skb, 2) advanced \
+ * the destination pointer of 2 bytes. \
+ */ \
+ __ptr -= 2; \
+ __len += 2; \
+ SMC_SET_PTR(2|PTR_READ|PTR_RCV|PTR_AUTOINC); \
+ } \
+ if (SMC_CAN_USE_DATACS && lp->datacs) \
+ __ioaddr = lp->datacs; \
__len += 2; \
- SMC_SET_PTR( 2|PTR_READ|PTR_RCV|PTR_AUTOINC ); \
- } \
- __len += 2; \
- insl( lp->datacs, __ptr, __len >> 2); \
- } else { \
- _SMC_PULL_DATA(p, l); \
- }
-#else
-#define SMC_PUSH_DATA(p, l) _SMC_PUSH_DATA(p, l)
-#define SMC_PULL_DATA(p, l) _SMC_PULL_DATA(p, l)
-#endif
-
-#if !defined (SMC_INTERRUPT_PREAMBLE)
-# define SMC_INTERRUPT_PREAMBLE
-#endif
+ SMC_insl(__ioaddr, DATA_REG, __ptr, __len>>2); \
+ } else if (SMC_CAN_USE_16BIT) \
+ SMC_insw(ioaddr, DATA_REG, p, (l) >> 1); \
+ else if (SMC_CAN_USE_8BIT) \
+ SMC_insb(ioaddr, DATA_REG, p, l); \
+ } while (0)
#endif /* _SMC91X_H_ */
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 28ce47a0240..38cd30cb7c7 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -55,6 +55,7 @@
#include <linux/workqueue.h>
#include <linux/if_vlan.h>
#include <linux/bitops.h>
+#include <linux/mutex.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -2284,7 +2285,7 @@ static void gem_reset_task(void *data)
{
struct gem *gp = (struct gem *) data;
- down(&gp->pm_sem);
+ mutex_lock(&gp->pm_mutex);
netif_poll_disable(gp->dev);
@@ -2311,7 +2312,7 @@ static void gem_reset_task(void *data)
netif_poll_enable(gp->dev);
- up(&gp->pm_sem);
+ mutex_unlock(&gp->pm_mutex);
}
@@ -2320,14 +2321,14 @@ static int gem_open(struct net_device *dev)
struct gem *gp = dev->priv;
int rc = 0;
- down(&gp->pm_sem);
+ mutex_lock(&gp->pm_mutex);
/* We need the cell enabled */
if (!gp->asleep)
rc = gem_do_start(dev);
gp->opened = (rc == 0);
- up(&gp->pm_sem);
+ mutex_unlock(&gp->pm_mutex);
return rc;
}
@@ -2340,13 +2341,13 @@ static int gem_close(struct net_device *dev)
* our caller (dev_close) already did it for us
*/
- down(&gp->pm_sem);
+ mutex_lock(&gp->pm_mutex);
gp->opened = 0;
if (!gp->asleep)
gem_do_stop(dev, 0);
- up(&gp->pm_sem);
+ mutex_unlock(&gp->pm_mutex);
return 0;
}
@@ -2358,7 +2359,7 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
struct gem *gp = dev->priv;
unsigned long flags;
- down(&gp->pm_sem);
+ mutex_lock(&gp->pm_mutex);
netif_poll_disable(dev);
@@ -2391,11 +2392,11 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
/* Stop the link timer */
del_timer_sync(&gp->link_timer);
- /* Now we release the semaphore to not block the reset task who
+ /* Now we release the mutex to not block the reset task who
* can take it too. We are marked asleep, so there will be no
* conflict here
*/
- up(&gp->pm_sem);
+ mutex_unlock(&gp->pm_mutex);
/* Wait for a pending reset task to complete */
while (gp->reset_task_pending)
@@ -2424,7 +2425,7 @@ static int gem_resume(struct pci_dev *pdev)
printk(KERN_INFO "%s: resuming\n", dev->name);
- down(&gp->pm_sem);
+ mutex_lock(&gp->pm_mutex);
/* Keep the cell enabled during the entire operation, no need to
* take a lock here tho since nothing else can happen while we are
@@ -2440,7 +2441,7 @@ static int gem_resume(struct pci_dev *pdev)
* still asleep, a new sleep cycle may bring it back
*/
gem_put_cell(gp);
- up(&gp->pm_sem);
+ mutex_unlock(&gp->pm_mutex);
return 0;
}
pci_set_master(gp->pdev);
@@ -2486,7 +2487,7 @@ static int gem_resume(struct pci_dev *pdev)
netif_poll_enable(dev);
- up(&gp->pm_sem);
+ mutex_unlock(&gp->pm_mutex);
return 0;
}
@@ -2591,7 +2592,7 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
- down(&gp->pm_sem);
+ mutex_lock(&gp->pm_mutex);
spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock);
dev->mtu = new_mtu;
@@ -2602,7 +2603,7 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu)
}
spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
- up(&gp->pm_sem);
+ mutex_unlock(&gp->pm_mutex);
return 0;
}
@@ -2771,10 +2772,10 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
int rc = -EOPNOTSUPP;
unsigned long flags;
- /* Hold the PM semaphore while doing ioctl's or we may collide
+ /* Hold the PM mutex while doing ioctl's or we may collide
* with power management.
*/
- down(&gp->pm_sem);
+ mutex_lock(&gp->pm_mutex);
spin_lock_irqsave(&gp->lock, flags);
gem_get_cell(gp);
@@ -2812,7 +2813,7 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
gem_put_cell(gp);
spin_unlock_irqrestore(&gp->lock, flags);
- up(&gp->pm_sem);
+ mutex_unlock(&gp->pm_mutex);
return rc;
}
@@ -3033,7 +3034,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
spin_lock_init(&gp->lock);
spin_lock_init(&gp->tx_lock);
- init_MUTEX(&gp->pm_sem);
+ mutex_init(&gp->pm_mutex);
init_timer(&gp->link_timer);
gp->link_timer.function = gem_link_timer;
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
index 13006d759ad..89847215d00 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/sungem.h
@@ -980,15 +980,15 @@ struct gem {
int tx_new, tx_old;
unsigned int has_wol : 1; /* chip supports wake-on-lan */
- unsigned int asleep : 1; /* chip asleep, protected by pm_sem */
+ unsigned int asleep : 1; /* chip asleep, protected by pm_mutex */
unsigned int asleep_wol : 1; /* was asleep with WOL enabled */
- unsigned int opened : 1; /* driver opened, protected by pm_sem */
+ unsigned int opened : 1; /* driver opened, protected by pm_mutex */
unsigned int running : 1; /* chip running, protected by lock */
/* cell enable count, protected by lock */
int cell_enabled;
- struct semaphore pm_sem;
+ struct mutex pm_mutex;
u32 msg_enable;
u32 status;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 6c6c5498899..e03d1ae50c3 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,8 +69,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.49"
-#define DRV_MODULE_RELDATE "Feb 2, 2006"
+#define DRV_MODULE_VERSION "3.52"
+#define DRV_MODULE_RELDATE "Mar 06, 2006"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -221,10 +221,22 @@ static struct pci_device_id tg3_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
@@ -534,6 +546,9 @@ static void tg3_enable_ints(struct tg3 *tp)
(tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
(tp->last_tag << 24));
+ if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
+ tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+ (tp->last_tag << 24));
tg3_cond_int(tp);
}
@@ -1038,9 +1053,11 @@ static void tg3_frob_aux_power(struct tg3 *tp)
struct net_device *dev_peer;
dev_peer = pci_get_drvdata(tp->pdev_peer);
+ /* remove_one() may have been run on the peer. */
if (!dev_peer)
- BUG();
- tp_peer = netdev_priv(dev_peer);
+ tp_peer = tp;
+ else
+ tp_peer = netdev_priv(dev_peer);
}
if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
@@ -1131,7 +1148,7 @@ static int tg3_halt_cpu(struct tg3 *, u32);
static int tg3_nvram_lock(struct tg3 *);
static void tg3_nvram_unlock(struct tg3 *);
-static int tg3_set_power_state(struct tg3 *tp, int state)
+static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
{
u32 misc_host_ctrl;
u16 power_control, power_caps;
@@ -1150,7 +1167,7 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
power_control |= PCI_PM_CTRL_PME_STATUS;
power_control &= ~(PCI_PM_CTRL_STATE_MASK);
switch (state) {
- case 0:
+ case PCI_D0:
power_control |= 0;
pci_write_config_word(tp->pdev,
pm + PCI_PM_CTRL,
@@ -1163,15 +1180,15 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
return 0;
- case 1:
+ case PCI_D1:
power_control |= 1;
break;
- case 2:
+ case PCI_D2:
power_control |= 2;
break;
- case 3:
+ case PCI_D3hot:
power_control |= 3;
break;
@@ -2680,6 +2697,12 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+ bmsr |= BMSR_LSTATUS;
+ else
+ bmsr &= ~BMSR_LSTATUS;
+ }
err |= tg3_readphy(tp, MII_BMCR, &bmcr);
@@ -2748,6 +2771,13 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
bmcr = new_bmcr;
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+ ASIC_REV_5714) {
+ if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+ bmsr |= BMSR_LSTATUS;
+ else
+ bmsr &= ~BMSR_LSTATUS;
+ }
tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
}
}
@@ -3338,6 +3368,23 @@ static inline void tg3_full_unlock(struct tg3 *tp)
spin_unlock_bh(&tp->lock);
}
+/* One-shot MSI handler - Chip automatically disables interrupt
+ * after sending MSI so driver doesn't have to do it.
+ */
+static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct tg3 *tp = netdev_priv(dev);
+
+ prefetch(tp->hw_status);
+ prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
+
+ if (likely(!tg3_irq_sync(tp)))
+ netif_rx_schedule(dev); /* schedule NAPI poll */
+
+ return IRQ_HANDLED;
+}
+
/* MSI ISR - No need to check for interrupt sharing and no need to
* flush status block and interrupt mailbox. PCI ordering rules
* guarantee that MSI will arrive after the status block.
@@ -3628,11 +3675,139 @@ static void tg3_set_txd(struct tg3 *tp, int entry,
txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
}
+/* hard_start_xmit for devices that don't have any bugs and
+ * support TG3_FLG2_HW_TSO_2 only.
+ */
static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
dma_addr_t mapping;
u32 len, entry, base_flags, mss;
+
+ len = skb_headlen(skb);
+
+ /* No BH disabling for tx_lock here. We are running in BH disabled
+ * context and TX reclaim runs via tp->poll inside of a software
+ * interrupt. Furthermore, IRQ processing runs lockless so we have
+ * no IRQ context deadlocks to worry about either. Rejoice!
+ */
+ if (!spin_trylock(&tp->tx_lock))
+ return NETDEV_TX_LOCKED;
+
+ if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
+ if (!netif_queue_stopped(dev)) {
+ netif_stop_queue(dev);
+
+ /* This is a hard error, log it. */
+ printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
+ "queue awake!\n", dev->name);
+ }
+ spin_unlock(&tp->tx_lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = tp->tx_prod;
+ base_flags = 0;
+#if TG3_TSO_SUPPORT != 0
+ mss = 0;
+ if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
+ (mss = skb_shinfo(skb)->tso_size) != 0) {
+ int tcp_opt_len, ip_tcp_len;
+
+ if (skb_header_cloned(skb) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
+ dev_kfree_skb(skb);
+ goto out_unlock;
+ }
+
+ tcp_opt_len = ((skb->h.th->doff - 5) * 4);
+ ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
+
+ base_flags |= (TXD_FLAG_CPU_PRE_DMA |
+ TXD_FLAG_CPU_POST_DMA);
+
+ skb->nh.iph->check = 0;
+ skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
+
+ skb->h.th->check = 0;
+
+ mss |= (ip_tcp_len + tcp_opt_len) << 9;
+ }
+ else if (skb->ip_summed == CHECKSUM_HW)
+ base_flags |= TXD_FLAG_TCPUDP_CSUM;
+#else
+ mss = 0;
+ if (skb->ip_summed == CHECKSUM_HW)
+ base_flags |= TXD_FLAG_TCPUDP_CSUM;
+#endif
+#if TG3_VLAN_TAG_USED
+ if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
+ base_flags |= (TXD_FLAG_VLAN |
+ (vlan_tx_tag_get(skb) << 16));
+#endif
+
+ /* Queue skb data, a.k.a. the main skb fragment. */
+ mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+
+ tp->tx_buffers[entry].skb = skb;
+ pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
+
+ tg3_set_txd(tp, entry, mapping, len, base_flags,
+ (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
+
+ entry = NEXT_TX(entry);
+
+ /* Now loop through additional data fragments, and queue them. */
+ if (skb_shinfo(skb)->nr_frags > 0) {
+ unsigned int i, last;
+
+ last = skb_shinfo(skb)->nr_frags - 1;
+ for (i = 0; i <= last; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ len = frag->size;
+ mapping = pci_map_page(tp->pdev,
+ frag->page,
+ frag->page_offset,
+ len, PCI_DMA_TODEVICE);
+
+ tp->tx_buffers[entry].skb = NULL;
+ pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
+
+ tg3_set_txd(tp, entry, mapping, len,
+ base_flags, (i == last) | (mss << 1));
+
+ entry = NEXT_TX(entry);
+ }
+ }
+
+ /* Packets are ready, update Tx producer idx local and on card. */
+ tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
+
+ tp->tx_prod = entry;
+ if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
+ netif_stop_queue(dev);
+ if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
+ netif_wake_queue(tp->dev);
+ }
+
+out_unlock:
+ mmiowb();
+ spin_unlock(&tp->tx_lock);
+
+ dev->trans_start = jiffies;
+
+ return NETDEV_TX_OK;
+}
+
+/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
+ * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
+ */
+static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ dma_addr_t mapping;
+ u32 len, entry, base_flags, mss;
int would_hit_hwbug;
len = skb_headlen(skb);
@@ -4369,6 +4544,10 @@ static int tg3_chip_reset(struct tg3 *tp)
tp->nvram_lock_cnt = 0;
}
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+ tw32(GRC_FASTBOOT_PC, 0);
+
/*
* We must avoid the readl() that normally takes place.
* It locks machines, causes machine checks, and other
@@ -5518,6 +5697,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ if (!netif_running(dev))
+ return 0;
+
spin_lock_bh(&tp->lock);
__tg3_set_mac_addr(tp);
spin_unlock_bh(&tp->lock);
@@ -5585,6 +5767,9 @@ static int tg3_reset_hw(struct tg3 *tp)
tg3_abort_hw(tp, 1);
}
+ if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
+ tg3_phy_reset(tp);
+
err = tg3_chip_reset(tp);
if (err)
return err;
@@ -5993,6 +6178,10 @@ static int tg3_reset_hw(struct tg3 *tp)
}
}
+ /* Enable host coalescing bug fix */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+ val |= (1 << 29);
+
tw32_f(WDMAC_MODE, val);
udelay(40);
@@ -6097,6 +6286,17 @@ static int tg3_reset_hw(struct tg3 *tp)
tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
}
+ if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
+ u32 tmp;
+
+ tmp = tr32(SERDES_RX_CTRL);
+ tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
+ tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
+ tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
+ tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
+ }
+
err = tg3_setup_phy(tp, 1);
if (err)
return err;
@@ -6175,7 +6375,7 @@ static int tg3_init_hw(struct tg3 *tp)
int err;
/* Force the chip into D0. */
- err = tg3_set_power_state(tp, 0);
+ err = tg3_set_power_state(tp, PCI_D0);
if (err)
goto out;
@@ -6331,6 +6531,26 @@ static void tg3_timer(unsigned long __opaque)
add_timer(&tp->timer);
}
+static int tg3_request_irq(struct tg3 *tp)
+{
+ irqreturn_t (*fn)(int, void *, struct pt_regs *);
+ unsigned long flags;
+ struct net_device *dev = tp->dev;
+
+ if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+ fn = tg3_msi;
+ if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
+ fn = tg3_msi_1shot;
+ flags = SA_SAMPLE_RANDOM;
+ } else {
+ fn = tg3_interrupt;
+ if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
+ fn = tg3_interrupt_tagged;
+ flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
+ }
+ return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
+}
+
static int tg3_test_interrupt(struct tg3 *tp)
{
struct net_device *dev = tp->dev;
@@ -6367,16 +6587,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
free_irq(tp->pdev->irq, dev);
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
- err = request_irq(tp->pdev->irq, tg3_msi,
- SA_SAMPLE_RANDOM, dev->name, dev);
- else {
- irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
- if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
- fn = tg3_interrupt_tagged;
- err = request_irq(tp->pdev->irq, fn,
- SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
- }
+ err = tg3_request_irq(tp);
if (err)
return err;
@@ -6428,14 +6639,7 @@ static int tg3_test_msi(struct tg3 *tp)
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
- {
- irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
- if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
- fn = tg3_interrupt_tagged;
-
- err = request_irq(tp->pdev->irq, fn,
- SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
- }
+ err = tg3_request_irq(tp);
if (err)
return err;
@@ -6462,6 +6666,10 @@ static int tg3_open(struct net_device *dev)
tg3_full_lock(tp, 0);
+ err = tg3_set_power_state(tp, PCI_D0);
+ if (err)
+ return err;
+
tg3_disable_ints(tp);
tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
@@ -6476,7 +6684,9 @@ static int tg3_open(struct net_device *dev)
if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
(GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
- (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
+ (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
+ !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
+ (tp->pdev_peer == tp->pdev))) {
/* All MSI supporting chips should support tagged
* status. Assert that this is the case.
*/
@@ -6491,17 +6701,7 @@ static int tg3_open(struct net_device *dev)
tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
}
}
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
- err = request_irq(tp->pdev->irq, tg3_msi,
- SA_SAMPLE_RANDOM, dev->name, dev);
- else {
- irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
- if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
- fn = tg3_interrupt_tagged;
-
- err = request_irq(tp->pdev->irq, fn,
- SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
- }
+ err = tg3_request_irq(tp);
if (err) {
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
@@ -6566,6 +6766,14 @@ static int tg3_open(struct net_device *dev)
return err;
}
+
+ if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+ if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
+ u32 val = tr32(0x7c04);
+
+ tw32(0x7c04, val | (1 << 29));
+ }
+ }
}
tg3_full_lock(tp, 0);
@@ -6839,7 +7047,6 @@ static int tg3_close(struct net_device *dev)
tp->tg3_flags &=
~(TG3_FLAG_INIT_COMPLETE |
TG3_FLAG_GOT_SERDES_FLOWCTL);
- netif_carrier_off(tp->dev);
tg3_full_unlock(tp);
@@ -6856,6 +7063,10 @@ static int tg3_close(struct net_device *dev)
tg3_free_consistent(tp);
+ tg3_set_power_state(tp, PCI_D3hot);
+
+ netif_carrier_off(tp->dev);
+
return 0;
}
@@ -7150,6 +7361,9 @@ static void tg3_set_rx_mode(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
+ if (!netif_running(dev))
+ return;
+
tg3_full_lock(tp, 0);
__tg3_set_rx_mode(dev);
tg3_full_unlock(tp);
@@ -7174,6 +7388,9 @@ static void tg3_get_regs(struct net_device *dev,
memset(p, 0, TG3_REGDUMP_LEN);
+ if (tp->link_config.phy_is_low_power)
+ return;
+
tg3_full_lock(tp, 0);
#define __GET_REG32(reg) (*(p)++ = tr32(reg))
@@ -7240,6 +7457,7 @@ static int tg3_get_eeprom_len(struct net_device *dev)
}
static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
+static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
{
@@ -7248,6 +7466,9 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u8 *pd;
u32 i, offset, len, val, b_offset, b_count;
+ if (tp->link_config.phy_is_low_power)
+ return -EAGAIN;
+
offset = eeprom->offset;
len = eeprom->len;
eeprom->len = 0;
@@ -7309,6 +7530,9 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u32 offset, len, b_offset, odd_len, start, end;
u8 *buf;
+ if (tp->link_config.phy_is_low_power)
+ return -EAGAIN;
+
if (eeprom->magic != TG3_EEPROM_MAGIC)
return -EINVAL;
@@ -7442,6 +7666,7 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
strcpy(info->driver, DRV_MODULE_NAME);
strcpy(info->version, DRV_MODULE_VERSION);
+ strcpy(info->fw_version, tp->fw_ver);
strcpy(info->bus_info, pci_name(tp->pdev));
}
@@ -7536,11 +7761,20 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
ering->rx_mini_max_pending = 0;
- ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
+ if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
+ ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
+ else
+ ering->rx_jumbo_max_pending = 0;
+
+ ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
ering->rx_pending = tp->rx_pending;
ering->rx_mini_pending = 0;
- ering->rx_jumbo_pending = tp->rx_jumbo_pending;
+ if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
+ ering->rx_jumbo_pending = tp->rx_jumbo_pending;
+ else
+ ering->rx_jumbo_pending = 0;
+
ering->tx_pending = tp->tx_pending;
}
@@ -7661,10 +7895,10 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
return 0;
}
- if (data)
- dev->features |= NETIF_F_IP_CSUM;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+ ethtool_op_set_tx_hw_csum(dev, data);
else
- dev->features &= ~NETIF_F_IP_CSUM;
+ ethtool_op_set_tx_csum(dev, data);
return 0;
}
@@ -7734,29 +7968,52 @@ static void tg3_get_ethtool_stats (struct net_device *dev,
}
#define NVRAM_TEST_SIZE 0x100
+#define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
static int tg3_test_nvram(struct tg3 *tp)
{
- u32 *buf, csum;
- int i, j, err = 0;
+ u32 *buf, csum, magic;
+ int i, j, err = 0, size;
- buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
+ if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
+ return -EIO;
+
+ if (magic == TG3_EEPROM_MAGIC)
+ size = NVRAM_TEST_SIZE;
+ else if ((magic & 0xff000000) == 0xa5000000) {
+ if ((magic & 0xe00000) == 0x200000)
+ size = NVRAM_SELFBOOT_FORMAT1_SIZE;
+ else
+ return 0;
+ } else
+ return -EIO;
+
+ buf = kmalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
+ err = -EIO;
+ for (i = 0, j = 0; i < size; i += 4, j++) {
u32 val;
if ((err = tg3_nvram_read(tp, i, &val)) != 0)
break;
buf[j] = cpu_to_le32(val);
}
- if (i < NVRAM_TEST_SIZE)
+ if (i < size)
goto out;
- err = -EIO;
- if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
- goto out;
+ /* Selfboot format */
+ if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
+ u8 *buf8 = (u8 *) buf, csum8 = 0;
+
+ for (i = 0; i < size; i++)
+ csum8 += buf8[i];
+
+ if (csum8 == 0)
+ return 0;
+ return -EIO;
+ }
/* Bootstrap checksum at offset 0x10 */
csum = calc_crc((unsigned char *) buf, 0x10);
@@ -7802,7 +8059,7 @@ static int tg3_test_link(struct tg3 *tp)
}
/* Only test the commonly used registers */
-static const int tg3_test_registers(struct tg3 *tp)
+static int tg3_test_registers(struct tg3 *tp)
{
int i, is_5705;
u32 offset, read_mask, write_mask, val, save_val, read_val;
@@ -8050,14 +8307,24 @@ static int tg3_test_memory(struct tg3 *tp)
{ 0x00008000, 0x02000},
{ 0x00010000, 0x0e000},
{ 0xffffffff, 0x00000}
+ }, mem_tbl_5755[] = {
+ { 0x00000200, 0x00008},
+ { 0x00004000, 0x00800},
+ { 0x00006000, 0x00800},
+ { 0x00008000, 0x02000},
+ { 0x00010000, 0x0c000},
+ { 0xffffffff, 0x00000}
};
struct mem_entry *mem_tbl;
int err = 0;
int i;
- if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
- mem_tbl = mem_tbl_5705;
- else
+ if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+ mem_tbl = mem_tbl_5755;
+ else
+ mem_tbl = mem_tbl_5705;
+ } else
mem_tbl = mem_tbl_570x;
for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
@@ -8229,6 +8496,9 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
{
struct tg3 *tp = netdev_priv(dev);
+ if (tp->link_config.phy_is_low_power)
+ tg3_set_power_state(tp, PCI_D0);
+
memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
if (tg3_test_nvram(tp) != 0) {
@@ -8257,6 +8527,9 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!err)
tg3_nvram_unlock(tp);
+ if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
+ tg3_phy_reset(tp);
+
if (tg3_test_registers(tp) != 0) {
etest->flags |= ETH_TEST_FL_FAILED;
data[2] = 1;
@@ -8286,6 +8559,9 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
tg3_full_unlock(tp);
}
+ if (tp->link_config.phy_is_low_power)
+ tg3_set_power_state(tp, PCI_D3hot);
+
}
static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -8305,6 +8581,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
break; /* We have no PHY */
+ if (tp->link_config.phy_is_low_power)
+ return -EAGAIN;
+
spin_lock_bh(&tp->lock);
err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
spin_unlock_bh(&tp->lock);
@@ -8321,6 +8600,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!capable(CAP_NET_ADMIN))
return -EPERM;
+ if (tp->link_config.phy_is_low_power)
+ return -EAGAIN;
+
spin_lock_bh(&tp->lock);
err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
spin_unlock_bh(&tp->lock);
@@ -8464,14 +8746,14 @@ static struct ethtool_ops tg3_ethtool_ops = {
static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
{
- u32 cursize, val;
+ u32 cursize, val, magic;
tp->nvram_size = EEPROM_CHIP_SIZE;
- if (tg3_nvram_read(tp, 0, &val) != 0)
+ if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
return;
- if (swab32(val) != TG3_EEPROM_MAGIC)
+ if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
return;
/*
@@ -8479,13 +8761,13 @@ static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
* When we encounter our validation signature, we know the addressing
* has wrapped around, and thus have our chip size.
*/
- cursize = 0x800;
+ cursize = 0x10;
while (cursize < tp->nvram_size) {
- if (tg3_nvram_read(tp, cursize, &val) != 0)
+ if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
return;
- if (swab32(val) == TG3_EEPROM_MAGIC)
+ if (val == magic)
break;
cursize <<= 1;
@@ -8498,6 +8780,15 @@ static void __devinit tg3_get_nvram_size(struct tg3 *tp)
{
u32 val;
+ if (tg3_nvram_read_swab(tp, 0, &val) != 0)
+ return;
+
+ /* Selfboot format */
+ if (val != TG3_EEPROM_MAGIC) {
+ tg3_get_eeprom_size(tp);
+ return;
+ }
+
if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
if (val != 0) {
tp->nvram_size = (val >> 16) * 1024;
@@ -8621,6 +8912,44 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
}
}
+static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
+{
+ u32 nvcfg1;
+
+ nvcfg1 = tr32(NVRAM_CFG1);
+
+ switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+ case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
+ case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
+ case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
+ case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+
+ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+ tw32(NVRAM_CFG1, nvcfg1);
+ break;
+ case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+ case FLASH_5755VENDOR_ATMEL_FLASH_1:
+ case FLASH_5755VENDOR_ATMEL_FLASH_2:
+ case FLASH_5755VENDOR_ATMEL_FLASH_3:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tp->nvram_pagesize = 264;
+ break;
+ case FLASH_5752VENDOR_ST_M45PE10:
+ case FLASH_5752VENDOR_ST_M45PE20:
+ case FLASH_5752VENDOR_ST_M45PE40:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tp->nvram_pagesize = 256;
+ break;
+ }
+}
+
/* Chips other than 5700/5701 use the NVRAM for fetching info. */
static void __devinit tg3_nvram_init(struct tg3 *tp)
{
@@ -8656,6 +8985,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
tg3_get_5752_nvram_info(tp);
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+ tg3_get_5787_nvram_info(tp);
else
tg3_get_nvram_info(tp);
@@ -8725,6 +9056,34 @@ static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
return 0;
}
+static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
+{
+ if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
+ (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
+ (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
+ (tp->nvram_jedecnum == JEDEC_ATMEL))
+
+ addr = ((addr / tp->nvram_pagesize) <<
+ ATMEL_AT45DB0X1B_PAGE_POS) +
+ (addr % tp->nvram_pagesize);
+
+ return addr;
+}
+
+static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
+{
+ if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
+ (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
+ (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
+ (tp->nvram_jedecnum == JEDEC_ATMEL))
+
+ addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
+ tp->nvram_pagesize) +
+ (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
+
+ return addr;
+}
+
static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
{
int ret;
@@ -8737,14 +9096,7 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
return tg3_nvram_read_using_eeprom(tp, offset, val);
- if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
- (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
- (tp->nvram_jedecnum == JEDEC_ATMEL)) {
-
- offset = ((offset / tp->nvram_pagesize) <<
- ATMEL_AT45DB0X1B_PAGE_POS) +
- (offset % tp->nvram_pagesize);
- }
+ offset = tg3_nvram_phys_addr(tp, offset);
if (offset > NVRAM_ADDR_MSK)
return -EINVAL;
@@ -8769,6 +9121,16 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
return ret;
}
+static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
+{
+ int err;
+ u32 tmp;
+
+ err = tg3_nvram_read(tp, offset, &tmp);
+ *val = swab32(tmp);
+ return err;
+}
+
static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
u32 offset, u32 len, u8 *buf)
{
@@ -8921,15 +9283,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
page_off = offset % tp->nvram_pagesize;
- if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
- (tp->nvram_jedecnum == JEDEC_ATMEL)) {
-
- phy_addr = ((offset / tp->nvram_pagesize) <<
- ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
- }
- else {
- phy_addr = offset;
- }
+ phy_addr = tg3_nvram_phys_addr(tp, offset);
tw32(NVRAM_ADDR, phy_addr);
@@ -8944,6 +9298,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
nvram_cmd |= NVRAM_CMD_LAST;
if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
+ (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
(tp->nvram_jedecnum == JEDEC_ST) &&
(nvram_cmd & NVRAM_CMD_FIRST)) {
@@ -9347,6 +9702,7 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
{
unsigned char vpd_data[256];
int i;
+ u32 magic;
if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
/* Sun decided not to put the necessary bits in the
@@ -9356,16 +9712,43 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
return;
}
- for (i = 0; i < 256; i += 4) {
- u32 tmp;
+ if (tg3_nvram_read_swab(tp, 0x0, &magic))
+ return;
- if (tg3_nvram_read(tp, 0x100 + i, &tmp))
- goto out_not_found;
+ if (magic == TG3_EEPROM_MAGIC) {
+ for (i = 0; i < 256; i += 4) {
+ u32 tmp;
+
+ if (tg3_nvram_read(tp, 0x100 + i, &tmp))
+ goto out_not_found;
- vpd_data[i + 0] = ((tmp >> 0) & 0xff);
- vpd_data[i + 1] = ((tmp >> 8) & 0xff);
- vpd_data[i + 2] = ((tmp >> 16) & 0xff);
- vpd_data[i + 3] = ((tmp >> 24) & 0xff);
+ vpd_data[i + 0] = ((tmp >> 0) & 0xff);
+ vpd_data[i + 1] = ((tmp >> 8) & 0xff);
+ vpd_data[i + 2] = ((tmp >> 16) & 0xff);
+ vpd_data[i + 3] = ((tmp >> 24) & 0xff);
+ }
+ } else {
+ int vpd_cap;
+
+ vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
+ for (i = 0; i < 256; i += 4) {
+ u32 tmp, j = 0;
+ u16 tmp16;
+
+ pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
+ i);
+ while (j++ < 100) {
+ pci_read_config_word(tp->pdev, vpd_cap +
+ PCI_VPD_ADDR, &tmp16);
+ if (tmp16 & 0x8000)
+ break;
+ msleep(1);
+ }
+ pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
+ &tmp);
+ tmp = cpu_to_le32(tmp);
+ memcpy(&vpd_data[i], &tmp, 4);
+ }
}
/* Now parse and find the part number. */
@@ -9412,6 +9795,46 @@ out_not_found:
strcpy(tp->board_part_number, "none");
}
+static void __devinit tg3_read_fw_ver(struct tg3 *tp)
+{
+ u32 val, offset, start;
+
+ if (tg3_nvram_read_swab(tp, 0, &val))
+ return;
+
+ if (val != TG3_EEPROM_MAGIC)
+ return;
+
+ if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
+ tg3_nvram_read_swab(tp, 0x4, &start))
+ return;
+
+ offset = tg3_nvram_logical_addr(tp, offset);
+ if (tg3_nvram_read_swab(tp, offset, &val))
+ return;
+
+ if ((val & 0xfc000000) == 0x0c000000) {
+ u32 ver_offset, addr;
+ int i;
+
+ if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
+ tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
+ return;
+
+ if (val != 0)
+ return;
+
+ addr = offset + ver_offset - start;
+ for (i = 0; i < 16; i += 4) {
+ if (tg3_nvram_read(tp, addr + i, &val))
+ return;
+
+ val = cpu_to_le32(val);
+ memcpy(tp->fw_ver + i, &val, 4);
+ }
+ }
+}
+
#ifdef CONFIG_SPARC64
static int __devinit tg3_is_sun_570X(struct tg3 *tp)
{
@@ -9603,6 +10026,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
@@ -9610,12 +10034,18 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
- if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
- tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
+ if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
+ tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
+ tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
+ } else
+ tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1;
+ }
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
@@ -9772,7 +10202,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
/* Force the chip into D0. */
- err = tg3_set_power_state(tp, 0);
+ err = tg3_set_power_state(tp, PCI_D0);
if (err) {
printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
pci_name(tp->pdev));
@@ -9825,7 +10255,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
- if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
+ if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
+ (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787))
tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
tp->coalesce_mode = 0;
@@ -9925,6 +10356,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
}
tg3_read_partno(tp);
+ tg3_read_fw_ver(tp);
if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
@@ -9960,10 +10392,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
else
tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
- /* It seems all chips can get confused if TX buffers
+ /* All chips before 5787 can get confused if TX buffers
* straddle the 4GB address boundary in some cases.
*/
- tp->dev->hard_start_xmit = tg3_start_xmit;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+ tp->dev->hard_start_xmit = tg3_start_xmit;
+ else
+ tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
tp->rx_offset = 2;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
@@ -10491,7 +10926,6 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
tp->link_config.speed = SPEED_INVALID;
tp->link_config.duplex = DUPLEX_INVALID;
tp->link_config.autoneg = AUTONEG_ENABLE;
- netif_carrier_off(tp->dev);
tp->link_config.active_speed = SPEED_INVALID;
tp->link_config.active_duplex = DUPLEX_INVALID;
tp->link_config.phy_is_low_power = 0;
@@ -10550,6 +10984,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
case PHY_ID_BCM5752: return "5752";
case PHY_ID_BCM5714: return "5714";
case PHY_ID_BCM5780: return "5780";
+ case PHY_ID_BCM5787: return "5787";
case PHY_ID_BCM8002: return "8002/serdes";
case 0: return "serdes";
default: return "unknown";
@@ -10848,11 +11283,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
}
- /* TSO is off by default, user can enable using ethtool. */
-#if 0
- if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
+ /* TSO is on by default on chips that support hardware TSO.
+ * Firmware TSO on older chips gives lower performance, so it
+ * is off by default, but can be enabled using ethtool.
+ */
+ if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
dev->features |= NETIF_F_TSO;
-#endif
#endif
@@ -10896,7 +11332,11 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
* checksumming.
*/
if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
- dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+ dev->features |= NETIF_F_HW_CSUM;
+ else
+ dev->features |= NETIF_F_IP_CSUM;
+ dev->features |= NETIF_F_SG;
tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
} else
tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
@@ -10949,6 +11389,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
(pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
(((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
+ netif_carrier_off(tp->dev);
+
return 0;
err_out_iounmap:
@@ -11044,7 +11486,7 @@ static int tg3_resume(struct pci_dev *pdev)
pci_restore_state(tp->pdev);
- err = tg3_set_power_state(tp, 0);
+ err = tg3_set_power_state(tp, PCI_D0);
if (err)
return err;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 7e3b613afb2..baa34c4721d 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -138,6 +138,7 @@
#define ASIC_REV_5752 0x06
#define ASIC_REV_5780 0x08
#define ASIC_REV_5714 0x09
+#define ASIC_REV_5787 0x0b
#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
#define CHIPREV_5700_AX 0x70
#define CHIPREV_5700_BX 0x71
@@ -1393,6 +1394,7 @@
#define GRC_MDI_CTRL 0x00006844
#define GRC_SEEPROM_DELAY 0x00006848
/* 0x684c --> 0x6c00 unused */
+#define GRC_FASTBOOT_PC 0x00006894 /* 5752, 5755, 5787 */
/* 0x6c00 --> 0x7000 unused */
@@ -1436,6 +1438,13 @@
#define FLASH_5752VENDOR_ST_M45PE10 0x02400000
#define FLASH_5752VENDOR_ST_M45PE20 0x02400002
#define FLASH_5752VENDOR_ST_M45PE40 0x02400001
+#define FLASH_5755VENDOR_ATMEL_FLASH_1 0x03400001
+#define FLASH_5755VENDOR_ATMEL_FLASH_2 0x03400002
+#define FLASH_5755VENDOR_ATMEL_FLASH_3 0x03400000
+#define FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ 0x03000003
+#define FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ 0x03000002
+#define FLASH_5787VENDOR_MICRO_EEPROM_64KHZ 0x03000000
+#define FLASH_5787VENDOR_MICRO_EEPROM_376KHZ 0x02000000
#define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000
#define FLASH_5752PAGE_SIZE_256 0x00000000
#define FLASH_5752PAGE_SIZE_512 0x10000000
@@ -2185,7 +2194,7 @@ struct tg3 {
#define TG3_FLG2_PHY_SERDES 0x00002000
#define TG3_FLG2_CAPACITIVE_COUPLING 0x00004000
#define TG3_FLG2_FLASH 0x00008000
-#define TG3_FLG2_HW_TSO 0x00010000
+#define TG3_FLG2_HW_TSO_1 0x00010000
#define TG3_FLG2_SERDES_PREEMPHASIS 0x00020000
#define TG3_FLG2_5705_PLUS 0x00040000
#define TG3_FLG2_5750_PLUS 0x00080000
@@ -2198,6 +2207,9 @@ struct tg3 {
#define TG3_FLG2_PARALLEL_DETECT 0x01000000
#define TG3_FLG2_ICH_WORKAROUND 0x02000000
#define TG3_FLG2_5780_CLASS 0x04000000
+#define TG3_FLG2_HW_TSO_2 0x08000000
+#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2)
+#define TG3_FLG2_1SHOT_MSI 0x10000000
u32 split_mode_max_reqs;
#define SPLIT_MODE_5704_MAX_REQ 3
@@ -2247,6 +2259,7 @@ struct tg3 {
#define PHY_ID_BCM5752 0x60008100
#define PHY_ID_BCM5714 0x60008340
#define PHY_ID_BCM5780 0x60008350
+#define PHY_ID_BCM5787 0xbc050ce0
#define PHY_ID_BCM8002 0x60010140
#define PHY_ID_INVALID 0xffffffff
#define PHY_ID_REV_MASK 0x0000000f
@@ -2258,6 +2271,7 @@ struct tg3 {
u32 led_ctrl;
char board_part_number[24];
+ char fw_ver[16];
u32 nic_sram_data_cfg;
u32 pci_clock_ctrl;
struct pci_dev *pdev_peer;
@@ -2271,7 +2285,8 @@ struct tg3 {
(X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \
(X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \
(X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5714 || \
- (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM8002)
+ (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \
+ (X) == PHY_ID_BCM8002)
struct tg3_hw_stats *hw_stats;
dma_addr_t stats_mapping;
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index db2c798ba89..175ba13bce4 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -1495,8 +1495,7 @@ module_param(skip_pci_probe, bool, 0);
MODULE_LICENSE("GPL");
-int
-init_module( void )
+int __init init_module( void )
{
struct net_device *dev;
int err;