aboutsummaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c509.c13
-rw-r--r--drivers/net/8139cp.c37
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/bonding/bond_main.c1
-rw-r--r--drivers/net/chelsio/espi.c4
-rw-r--r--drivers/net/de620.c2
-rw-r--r--drivers/net/dl2k.c25
-rw-r--r--drivers/net/e1000/e1000.h3
-rw-r--r--drivers/net/e1000/e1000_main.c121
-rw-r--r--drivers/net/ifb.c2
-rw-r--r--drivers/net/irda/irda-usb.c90
-rw-r--r--drivers/net/irda/irda-usb.h7
-rw-r--r--drivers/net/pcmcia/axnet_cs.c1
-rw-r--r--drivers/net/r8169.c189
-rw-r--r--drivers/net/s2io.c1
-rw-r--r--drivers/net/sis190.c4
-rw-r--r--drivers/net/sis900.c4
-rw-r--r--drivers/net/skge.c85
-rw-r--r--drivers/net/skge.h1
-rw-r--r--drivers/net/sky2.c313
-rw-r--r--drivers/net/sky2.h84
-rw-r--r--drivers/net/tg3.c98
-rw-r--r--drivers/net/tlan.c2
-rw-r--r--drivers/net/tokenring/smctr.h2
-rw-r--r--drivers/net/tulip/de2104x.c26
-rw-r--r--drivers/net/tun.c5
-rw-r--r--drivers/net/via-velocity.c3
-rw-r--r--drivers/net/wireless/atmel.c98
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c5
-rw-r--r--drivers/net/wireless/ipw2200.c6
-rw-r--r--drivers/net/wireless/wavelan_cs.c16
31 files changed, 773 insertions, 483 deletions
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 824e430486c..830528dce0c 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -1574,6 +1574,7 @@ MODULE_LICENSE("GPL");
static int __init el3_init_module(void)
{
+ int ret = 0;
el3_cards = 0;
if (debug >= 0)
@@ -1589,14 +1590,16 @@ static int __init el3_init_module(void)
}
#ifdef CONFIG_EISA
- if (eisa_driver_register (&el3_eisa_driver) < 0) {
- eisa_driver_unregister (&el3_eisa_driver);
- }
+ ret = eisa_driver_register(&el3_eisa_driver);
#endif
#ifdef CONFIG_MCA
- mca_register_driver(&el3_mca_driver);
+ {
+ int err = mca_register_driver(&el3_mca_driver);
+ if (ret == 0)
+ ret = err;
+ }
#endif
- return 0;
+ return ret;
}
static void __exit el3_cleanup_module(void)
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index f822cd3025f..dd410496aad 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -1118,13 +1118,18 @@ err_out:
return -ENOMEM;
}
+static void cp_init_rings_index (struct cp_private *cp)
+{
+ cp->rx_tail = 0;
+ cp->tx_head = cp->tx_tail = 0;
+}
+
static int cp_init_rings (struct cp_private *cp)
{
memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
- cp->rx_tail = 0;
- cp->tx_head = cp->tx_tail = 0;
+ cp_init_rings_index(cp);
return cp_refill_rx (cp);
}
@@ -1886,30 +1891,30 @@ static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
spin_unlock_irqrestore (&cp->lock, flags);
- if (cp->pdev && cp->wol_enabled) {
- pci_save_state (cp->pdev);
- cp_set_d3_state (cp);
- }
+ pci_save_state(pdev);
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
static int cp_resume (struct pci_dev *pdev)
{
- struct net_device *dev;
- struct cp_private *cp;
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
- dev = pci_get_drvdata (pdev);
- cp = netdev_priv(dev);
+ if (!netif_running(dev))
+ return 0;
netif_device_attach (dev);
-
- if (cp->pdev && cp->wol_enabled) {
- pci_set_power_state (cp->pdev, PCI_D0);
- pci_restore_state (cp->pdev);
- }
-
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_enable_wake(pdev, PCI_D0, 0);
+
+ /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
+ cp_init_rings_index (cp);
cp_init_hw (cp);
netif_start_queue (dev);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 47c72a63dfe..aa633fa95e6 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1087,7 +1087,8 @@ config NE2000
without a specific driver are compatible with NE2000.
If you have a PCI NE2000 card however, say N here and Y to "PCI
- NE2000 support", above. If you have a NE2000 card and are running on
+ NE2000 and clone support" under "EISA, VLB, PCI and on board
+ controllers" below. If you have a NE2000 card and are running on
an MCA system (a bus system used on some IBM PS/2 computers and
laptops), say N here and Y to "NE/2 (ne2000 MCA version) support",
below.
@@ -2020,8 +2021,8 @@ config SIS190
will be called sis190. This is recommended.
config SKGE
- tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ tristate "New SysKonnect GigaEthernet support"
+ depends on PCI
select CRC32
---help---
This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx
@@ -2082,7 +2083,6 @@ config SK98LIN
- Allied Telesyn AT-2971SX Gigabit Ethernet Adapter
- Allied Telesyn AT-2971T Gigabit Ethernet Adapter
- Belkin Gigabit Desktop Card 10/100/1000Base-T Adapter, Copper RJ-45
- - DGE-530T Gigabit Ethernet Adapter
- EG1032 v2 Instant Gigabit Network Adapter
- EG1064 v2 Instant Gigabit Network Adapter
- Marvell 88E8001 Gigabit LOM Ethernet Adapter (Abit)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e0f51afec77..bcf9f17daf0 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1581,6 +1581,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
printk(KERN_INFO DRV_NAME
": %s: %s not enslaved\n",
bond_dev->name, slave_dev->name);
+ write_unlock_bh(&bond->lock);
return -EINVAL;
}
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c
index 230642571c9..e824acaf188 100644
--- a/drivers/net/chelsio/espi.c
+++ b/drivers/net/chelsio/espi.c
@@ -296,9 +296,7 @@ void t1_espi_destroy(struct peespi *espi)
struct peespi *t1_espi_create(adapter_t *adapter)
{
- struct peespi *espi = kmalloc(sizeof(*espi), GFP_KERNEL);
-
- memset(espi, 0, sizeof(*espi));
+ struct peespi *espi = kzalloc(sizeof(*espi), GFP_KERNEL);
if (espi)
espi->adapter = adapter;
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index 0069f5fa973..22fc5b869a6 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -1012,7 +1012,7 @@ static int __init read_eeprom(struct net_device *dev)
#ifdef MODULE
static struct net_device *de620_dev;
-int init_module(void)
+int __init init_module(void)
{
de620_dev = de620_probe(-1);
if (IS_ERR(de620_dev))
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 430c628279b..fb9dae302dc 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -50,8 +50,8 @@
*/
#define DRV_NAME "D-Link DL2000-based linux driver"
-#define DRV_VERSION "v1.17a"
-#define DRV_RELDATE "2002/10/04"
+#define DRV_VERSION "v1.17b"
+#define DRV_RELDATE "2006/03/10"
#include "dl2k.h"
static char version[] __devinitdata =
@@ -765,7 +765,7 @@ rio_free_tx (struct net_device *dev, int irq)
break;
skb = np->tx_skbuff[entry];
pci_unmap_single (np->pdev,
- np->tx_ring[entry].fraginfo,
+ np->tx_ring[entry].fraginfo & 0xffffffffffff,
skb->len, PCI_DMA_TODEVICE);
if (irq)
dev_kfree_skb_irq (skb);
@@ -892,14 +892,16 @@ receive_packet (struct net_device *dev)
/* Small skbuffs for short packets */
if (pkt_len > copy_thresh) {
- pci_unmap_single (np->pdev, desc->fraginfo,
+ pci_unmap_single (np->pdev,
+ desc->fraginfo & 0xffffffffffff,
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
skb_put (skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
} else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) {
pci_dma_sync_single_for_cpu(np->pdev,
- desc->fraginfo,
+ desc->fraginfo &
+ 0xffffffffffff,
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
skb->dev = dev;
@@ -910,7 +912,8 @@ receive_packet (struct net_device *dev)
pkt_len, 0);
skb_put (skb, pkt_len);
pci_dma_sync_single_for_device(np->pdev,
- desc->fraginfo,
+ desc->fraginfo &
+ 0xffffffffffff,
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
}
@@ -1796,8 +1799,9 @@ rio_close (struct net_device *dev)
np->rx_ring[i].fraginfo = 0;
skb = np->rx_skbuff[i];
if (skb) {
- pci_unmap_single (np->pdev, np->rx_ring[i].fraginfo,
- skb->len, PCI_DMA_FROMDEVICE);
+ pci_unmap_single(np->pdev,
+ np->rx_ring[i].fraginfo & 0xffffffffffff,
+ skb->len, PCI_DMA_FROMDEVICE);
dev_kfree_skb (skb);
np->rx_skbuff[i] = NULL;
}
@@ -1805,8 +1809,9 @@ rio_close (struct net_device *dev)
for (i = 0; i < TX_RING_SIZE; i++) {
skb = np->tx_skbuff[i];
if (skb) {
- pci_unmap_single (np->pdev, np->tx_ring[i].fraginfo,
- skb->len, PCI_DMA_TODEVICE);
+ pci_unmap_single(np->pdev,
+ np->tx_ring[i].fraginfo & 0xffffffffffff,
+ skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb (skb);
np->tx_skbuff[i] = NULL;
}
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 27c77306193..99baf0e099f 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -225,9 +225,6 @@ struct e1000_rx_ring {
struct e1000_ps_page *ps_page;
struct e1000_ps_page_dma *ps_page_dma;
- struct sk_buff *rx_skb_top;
- struct sk_buff *rx_skb_prev;
-
/* cpu for rx queue */
int cpu;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 31e332935e5..4c4db96d0b7 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -103,7 +103,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
-#define DRV_VERSION "6.3.9-k2"DRIVERNAPI
+#define DRV_VERSION "6.3.9-k4"DRIVERNAPI
char e1000_driver_version[] = DRV_VERSION;
static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
@@ -1635,8 +1635,6 @@ setup_rx_desc_die:
rxdr->next_to_clean = 0;
rxdr->next_to_use = 0;
- rxdr->rx_skb_top = NULL;
- rxdr->rx_skb_prev = NULL;
return 0;
}
@@ -1713,8 +1711,23 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
rctl |= adapter->rx_buffer_len << 0x11;
} else {
rctl &= ~E1000_RCTL_SZ_4096;
- rctl &= ~E1000_RCTL_BSEX;
- rctl |= E1000_RCTL_SZ_2048;
+ rctl |= E1000_RCTL_BSEX;
+ switch (adapter->rx_buffer_len) {
+ case E1000_RXBUFFER_2048:
+ default:
+ rctl |= E1000_RCTL_SZ_2048;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case E1000_RXBUFFER_4096:
+ rctl |= E1000_RCTL_SZ_4096;
+ break;
+ case E1000_RXBUFFER_8192:
+ rctl |= E1000_RCTL_SZ_8192;
+ break;
+ case E1000_RXBUFFER_16384:
+ rctl |= E1000_RCTL_SZ_16384;
+ break;
+ }
}
#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
@@ -2107,16 +2120,6 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
}
}
- /* there also may be some cached data in our adapter */
- if (rx_ring->rx_skb_top) {
- dev_kfree_skb(rx_ring->rx_skb_top);
-
- /* rx_skb_prev will be wiped out by rx_skb_top */
- rx_ring->rx_skb_top = NULL;
- rx_ring->rx_skb_prev = NULL;
- }
-
-
size = sizeof(struct e1000_buffer) * rx_ring->count;
memset(rx_ring->buffer_info, 0, size);
size = sizeof(struct e1000_ps_page) * rx_ring->count;
@@ -2914,7 +2917,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (!__pskb_pull_tail(skb, pull_size)) {
printk(KERN_ERR "__pskb_pull_tail failed.\n");
dev_kfree_skb_any(skb);
- return -EFAULT;
+ return NETDEV_TX_OK;
}
len = skb->len - skb->data_len;
}
@@ -3106,24 +3109,27 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
break;
}
- /* since the driver code now supports splitting a packet across
- * multiple descriptors, most of the fifo related limitations on
- * jumbo frame traffic have gone away.
- * simply use 2k descriptors for everything.
- *
- * NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
- * means we reserve 2 more, this pushes us to allocate from the next
- * larger slab size
- * i.e. RXBUFFER_2048 --> size-4096 slab */
-
- /* recent hardware supports 1KB granularity */
+
if (adapter->hw.mac_type > e1000_82547_rev_2) {
- adapter->rx_buffer_len =
- ((max_frame < E1000_RXBUFFER_2048) ?
- max_frame : E1000_RXBUFFER_2048);
+ adapter->rx_buffer_len = max_frame;
E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
- } else
- adapter->rx_buffer_len = E1000_RXBUFFER_2048;
+ } else {
+ if(unlikely((adapter->hw.mac_type < e1000_82543) &&
+ (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) {
+ DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
+ "on 82542\n");
+ return -EINVAL;
+ } else {
+ if(max_frame <= E1000_RXBUFFER_2048)
+ adapter->rx_buffer_len = E1000_RXBUFFER_2048;
+ else if(max_frame <= E1000_RXBUFFER_4096)
+ adapter->rx_buffer_len = E1000_RXBUFFER_4096;
+ else if(max_frame <= E1000_RXBUFFER_8192)
+ adapter->rx_buffer_len = E1000_RXBUFFER_8192;
+ else if(max_frame <= E1000_RXBUFFER_16384)
+ adapter->rx_buffer_len = E1000_RXBUFFER_16384;
+ }
+ }
netdev->mtu = new_mtu;
@@ -3620,7 +3626,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
uint8_t last_byte;
unsigned int i;
int cleaned_count = 0;
- boolean_t cleaned = FALSE, multi_descriptor = FALSE;
+ boolean_t cleaned = FALSE;
i = rx_ring->next_to_clean;
rx_desc = E1000_RX_DESC(*rx_ring, i);
@@ -3652,43 +3658,12 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
length = le16_to_cpu(rx_desc->length);
- skb_put(skb, length);
-
- if (!(status & E1000_RXD_STAT_EOP)) {
- if (!rx_ring->rx_skb_top) {
- rx_ring->rx_skb_top = skb;
- rx_ring->rx_skb_top->len = length;
- rx_ring->rx_skb_prev = skb;
- } else {
- if (skb_shinfo(rx_ring->rx_skb_top)->frag_list) {
- rx_ring->rx_skb_prev->next = skb;
- skb->prev = rx_ring->rx_skb_prev;
- } else {
- skb_shinfo(rx_ring->rx_skb_top)->frag_list = skb;
- }
- rx_ring->rx_skb_prev = skb;
- rx_ring->rx_skb_top->data_len += length;
- }
+ if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
+ /* All receives must fit into a single buffer */
+ E1000_DBG("%s: Receive packet consumed multiple"
+ " buffers\n", netdev->name);
+ dev_kfree_skb_irq(skb);
goto next_desc;
- } else {
- if (rx_ring->rx_skb_top) {
- if (skb_shinfo(rx_ring->rx_skb_top)
- ->frag_list) {
- rx_ring->rx_skb_prev->next = skb;
- skb->prev = rx_ring->rx_skb_prev;
- } else
- skb_shinfo(rx_ring->rx_skb_top)
- ->frag_list = skb;
-
- rx_ring->rx_skb_top->data_len += length;
- rx_ring->rx_skb_top->len +=
- rx_ring->rx_skb_top->data_len;
-
- skb = rx_ring->rx_skb_top;
- multi_descriptor = TRUE;
- rx_ring->rx_skb_top = NULL;
- rx_ring->rx_skb_prev = NULL;
- }
}
if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
@@ -3712,10 +3687,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
* performance for small packets with large amounts
* of reassembly being done in the stack */
#define E1000_CB_LENGTH 256
- if ((length < E1000_CB_LENGTH) &&
- !rx_ring->rx_skb_top &&
- /* or maybe (status & E1000_RXD_STAT_EOP) && */
- !multi_descriptor) {
+ if (length < E1000_CB_LENGTH) {
struct sk_buff *new_skb =
dev_alloc_skb(length + NET_IP_ALIGN);
if (new_skb) {
@@ -3729,7 +3701,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
skb = new_skb;
skb_put(skb, length);
}
- }
+ } else
+ skb_put(skb, length);
/* end copybreak code */
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 1b699259b4e..31fb2d75dc4 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -57,7 +57,7 @@ struct ifb_private {
struct sk_buff_head tq;
};
-static int numifbs = 1;
+static int numifbs = 2;
static void ri_tasklet(unsigned long dev);
static int ifb_xmit(struct sk_buff *skb, struct net_device *dev);
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index fa176ffb4ad..8936058a3cc 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -108,6 +108,7 @@ static void irda_usb_close(struct irda_usb_cb *self);
static void speed_bulk_callback(struct urb *urb, struct pt_regs *regs);
static void write_bulk_callback(struct urb *urb, struct pt_regs *regs);
static void irda_usb_receive(struct urb *urb, struct pt_regs *regs);
+static void irda_usb_rx_defer_expired(unsigned long data);
static int irda_usb_net_open(struct net_device *dev);
static int irda_usb_net_close(struct net_device *dev);
static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@@ -677,6 +678,12 @@ static void irda_usb_net_timeout(struct net_device *netdev)
* on the interrupt pipe and hang the Rx URB only when an interrupt is
* received.
* Jean II
+ *
+ * Note : don't read the above as what we are currently doing, but as
+ * something we could do with KC dongle. Also don't forget that the
+ * interrupt pipe is not part of the original standard, so this would
+ * need to be optional...
+ * Jean II
*/
/*------------------------------------------------------------------*/
@@ -704,10 +711,8 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc
/* Reinitialize URB */
usb_fill_bulk_urb(urb, self->usbdev,
usb_rcvbulkpipe(self->usbdev, self->bulk_in_ep),
- skb->data, skb->truesize,
+ skb->data, IRDA_SKB_MAX_MTU,
irda_usb_receive, skb);
- /* Note : unlink *must* be synchronous because of the code in
- * irda_usb_net_close() -> free the skb - Jean II */
urb->status = 0;
/* Can be called from irda_usb_receive (irq handler) -> GFP_ATOMIC */
@@ -734,6 +739,7 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs)
struct irda_skb_cb *cb;
struct sk_buff *newskb;
struct sk_buff *dataskb;
+ struct urb *next_urb;
int docopy;
IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length);
@@ -755,20 +761,37 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs)
if (urb->status != 0) {
switch (urb->status) {
case -EILSEQ:
- self->stats.rx_errors++;
self->stats.rx_crc_errors++;
- break;
+ /* Also precursor to a hot-unplug on UHCI. */
+ /* Fallthrough... */
case -ECONNRESET: /* -104 */
- IRDA_DEBUG(0, "%s(), Connection Reset (-104), transfer_flags 0x%04X \n", __FUNCTION__, urb->transfer_flags);
+ /* Random error, if I remember correctly */
/* uhci_cleanup_unlink() is going to kill the Rx
* URB just after we return. No problem, at this
* point the URB will be idle ;-) - Jean II */
- break;
+ case -ESHUTDOWN: /* -108 */
+ /* That's usually a hot-unplug. Submit will fail... */
+ case -ETIMEDOUT: /* -110 */
+ /* Usually precursor to a hot-unplug on OHCI. */
default:
- IRDA_DEBUG(0, "%s(), RX status %d,transfer_flags 0x%04X \n", __FUNCTION__, urb->status, urb->transfer_flags);
+ self->stats.rx_errors++;
+ IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __FUNCTION__, urb->status, urb->transfer_flags);
break;
}
- goto done;
+ /* If we received an error, we don't want to resubmit the
+ * Rx URB straight away but to give the USB layer a little
+ * bit of breathing room.
+ * We are in the USB thread context, therefore there is a
+ * danger of recursion (new URB we submit fails, we come
+ * back here).
+ * With recent USB stack (2.6.15+), I'm seeing that on
+ * hot unplug of the dongle...
+ * Lowest effective timer is 10ms...
+ * Jean II */
+ self->rx_defer_timer.function = &irda_usb_rx_defer_expired;
+ self->rx_defer_timer.data = (unsigned long) urb;
+ mod_timer(&self->rx_defer_timer, jiffies + (10 * HZ / 1000));
+ return;
}
/* Check for empty frames */
@@ -845,13 +868,45 @@ done:
* idle slot....
* Jean II */
/* Note : with this scheme, we could submit the idle URB before
- * processing the Rx URB. Another time... Jean II */
+ * processing the Rx URB. I don't think it would buy us anything as
+ * we are running in the USB thread context. Jean II */
+ next_urb = self->idle_rx_urb;
- /* Submit the idle URB to replace the URB we've just received */
- irda_usb_submit(self, skb, self->idle_rx_urb);
/* Recycle Rx URB : Now, the idle URB is the present one */
urb->context = NULL;
self->idle_rx_urb = urb;
+
+ /* Submit the idle URB to replace the URB we've just received.
+ * Do it last to avoid race conditions... Jean II */
+ irda_usb_submit(self, skb, next_urb);
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * In case of errors, we want the USB layer to have time to recover.
+ * Now, it is time to resubmit ouur Rx URB...
+ */
+static void irda_usb_rx_defer_expired(unsigned long data)
+{
+ struct urb *urb = (struct urb *) data;
+ struct sk_buff *skb = (struct sk_buff *) urb->context;
+ struct irda_usb_cb *self;
+ struct irda_skb_cb *cb;
+ struct urb *next_urb;
+
+ IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
+
+ /* Find ourselves */
+ cb = (struct irda_skb_cb *) skb->cb;
+ IRDA_ASSERT(cb != NULL, return;);
+ self = (struct irda_usb_cb *) cb->context;
+ IRDA_ASSERT(self != NULL, return;);
+
+ /* Same stuff as when Rx is done, see above... */
+ next_urb = self->idle_rx_urb;
+ urb->context = NULL;
+ self->idle_rx_urb = urb;
+ irda_usb_submit(self, skb, next_urb);
}
/*------------------------------------------------------------------*/
@@ -990,6 +1045,9 @@ static int irda_usb_net_close(struct net_device *netdev)
/* Stop network Tx queue */
netif_stop_queue(netdev);
+ /* Kill defered Rx URB */
+ del_timer(&self->rx_defer_timer);
+
/* Deallocate all the Rx path buffers (URBs and skb) */
for (i = 0; i < IU_MAX_RX_URBS; i++) {
struct urb *urb = self->rx_urb[i];
@@ -1365,6 +1423,7 @@ static int irda_usb_probe(struct usb_interface *intf,
self = net->priv;
self->netdev = net;
spin_lock_init(&self->lock);
+ init_timer(&self->rx_defer_timer);
/* Create all of the needed urbs */
for (i = 0; i < IU_MAX_RX_URBS; i++) {
@@ -1498,6 +1557,9 @@ static void irda_usb_disconnect(struct usb_interface *intf)
* This will stop/desactivate the Tx path. - Jean II */
self->present = 0;
+ /* Kill defered Rx URB */
+ del_timer(&self->rx_defer_timer);
+
/* We need to have irq enabled to unlink the URBs. That's OK,
* at this point the Tx path is gone - Jean II */
spin_unlock_irqrestore(&self->lock, flags);
@@ -1507,11 +1569,11 @@ static void irda_usb_disconnect(struct usb_interface *intf)
/* Accept no more transmissions */
/*netif_device_detach(self->netdev);*/
netif_stop_queue(self->netdev);
- /* Stop all the receive URBs */
+ /* Stop all the receive URBs. Must be synchronous. */
for (i = 0; i < IU_MAX_RX_URBS; i++)
usb_kill_urb(self->rx_urb[i]);
/* Cancel Tx and speed URB.
- * Toggle flags to make sure it's synchronous. */
+ * Make sure it's synchronous to avoid races. */
usb_kill_urb(self->tx_urb);
usb_kill_urb(self->speed_urb);
}
diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h
index bd8f6654232..4026af42dd4 100644
--- a/drivers/net/irda/irda-usb.h
+++ b/drivers/net/irda/irda-usb.h
@@ -136,8 +136,6 @@ struct irda_usb_cb {
__u16 bulk_out_mtu; /* Max Tx packet size in bytes */
__u8 bulk_int_ep; /* Interrupt Endpoint assignments */
- wait_queue_head_t wait_q; /* for timeouts */
-
struct urb *rx_urb[IU_MAX_RX_URBS]; /* URBs used to receive data frames */
struct urb *idle_rx_urb; /* Pointer to idle URB in Rx path */
struct urb *tx_urb; /* URB used to send data frames */
@@ -147,17 +145,18 @@ struct irda_usb_cb {
struct net_device_stats stats;
struct irlap_cb *irlap; /* The link layer we are binded to */
struct qos_info qos;
- hashbin_t *tx_list; /* Queued transmit skb's */
char *speed_buff; /* Buffer for speed changes */
struct timeval stamp;
struct timeval now;
- spinlock_t lock; /* For serializing operations */
+ spinlock_t lock; /* For serializing Tx operations */
__u16 xbofs; /* Current xbofs setting */
__s16 new_xbofs; /* xbofs we need to set */
__u32 speed; /* Current speed */
__s32 new_speed; /* speed we need to set */
+
+ struct timer_list rx_defer_timer; /* Wait for Rx error to clear */
};
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 01ddfc8cce3..aa558136939 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -806,6 +806,7 @@ static struct pcmcia_device_id axnet_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309),
PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1106),
PCMCIA_DEVICE_MANF_CARD(0x8a01, 0xc1ab),
+ PCMCIA_DEVICE_PROD_ID12("AmbiCom,Inc.", "Fast Ethernet PC Card(AMB8110)", 0x49b020a7, 0x119cc9fc),
PCMCIA_DEVICE_PROD_ID124("Fast Ethernet", "16-bit PC Card", "AX88190", 0xb4be14e3, 0x9a12eb6a, 0xab9be5ef),
PCMCIA_DEVICE_PROD_ID12("ASIX", "AX88190", 0x0959823b, 0xab9be5ef),
PCMCIA_DEVICE_PROD_ID12("Billionton", "LNA-100B", 0x552ab682, 0xbc3b87e1),
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 6e1018448ee..8cc0d0bbdf5 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -287,6 +287,20 @@ enum RTL8169_register_content {
TxInterFrameGapShift = 24,
TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
+ /* Config1 register p.24 */
+ PMEnable = (1 << 0), /* Power Management Enable */
+
+ /* Config3 register p.25 */
+ MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
+ LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
+
+ /* Config5 register p.27 */
+ BWF = (1 << 6), /* Accept Broadcast wakeup frame */
+ MWF = (1 << 5), /* Accept Multicast wakeup frame */
+ UWF = (1 << 4), /* Accept Unicast wakeup frame */
+ LanWake = (1 << 1), /* LanWake enable/disable */
+ PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
+
/* TBICSR p.28 */
TBIReset = 0x80000000,
TBILoopback = 0x40000000,
@@ -433,6 +447,7 @@ struct rtl8169_private {
unsigned int (*phy_reset_pending)(void __iomem *);
unsigned int (*link_ok)(void __iomem *);
struct work_struct task;
+ unsigned wol_enabled : 1;
};
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -607,6 +622,80 @@ static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex)
*duplex = p->duplex;
}
+static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ u8 options;
+
+ wol->wolopts = 0;
+
+#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
+ wol->supported = WAKE_ANY;
+
+ spin_lock_irq(&tp->lock);
+
+ options = RTL_R8(Config1);
+ if (!(options & PMEnable))
+ goto out_unlock;
+
+ options = RTL_R8(Config3);
+ if (options & LinkUp)
+ wol->wolopts |= WAKE_PHY;
+ if (options & MagicPacket)
+ wol->wolopts |= WAKE_MAGIC;
+
+ options = RTL_R8(Config5);
+ if (options & UWF)
+ wol->wolopts |= WAKE_UCAST;
+ if (options & BWF)
+ wol->wolopts |= WAKE_BCAST;
+ if (options & MWF)
+ wol->wolopts |= WAKE_MCAST;
+
+out_unlock:
+ spin_unlock_irq(&tp->lock);
+}
+
+static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ int i;
+ static struct {
+ u32 opt;
+ u16 reg;
+ u8 mask;
+ } cfg[] = {
+ { WAKE_ANY, Config1, PMEnable },
+ { WAKE_PHY, Config3, LinkUp },
+ { WAKE_MAGIC, Config3, MagicPacket },
+ { WAKE_UCAST, Config5, UWF },
+ { WAKE_BCAST, Config5, BWF },
+ { WAKE_MCAST, Config5, MWF },
+ { WAKE_ANY, Config5, LanWake }
+ };
+
+ spin_lock_irq(&tp->lock);
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+
+ for (i = 0; i < ARRAY_SIZE(cfg); i++) {
+ u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
+ if (wol->wolopts & cfg[i].opt)
+ options |= cfg[i].mask;
+ RTL_W8(cfg[i].reg, options);
+ }
+
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+
+ tp->wol_enabled = (wol->wolopts) ? 1 : 0;
+
+ spin_unlock_irq(&tp->lock);
+
+ return 0;
+}
+
static void rtl8169_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -1025,6 +1114,8 @@ static struct ethtool_ops rtl8169_ethtool_ops = {
.get_tso = ethtool_op_get_tso,
.set_tso = ethtool_op_set_tso,
.get_regs = rtl8169_get_regs,
+ .get_wol = rtl8169_get_wol,
+ .set_wol = rtl8169_set_wol,
.get_strings = rtl8169_get_strings,
.get_stats_count = rtl8169_get_stats_count,
.get_ethtool_stats = rtl8169_get_ethtool_stats,
@@ -1442,6 +1533,11 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
}
tp->chipset = i;
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+ RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
+ RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+
*ioaddr_out = ioaddr;
*dev_out = dev;
out:
@@ -1612,49 +1708,6 @@ rtl8169_remove_one(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
-#ifdef CONFIG_PM
-
-static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
- unsigned long flags;
-
- if (!netif_running(dev))
- return 0;
-
- netif_device_detach(dev);
- netif_stop_queue(dev);
- spin_lock_irqsave(&tp->lock, flags);
-
- /* Disable interrupts, stop Rx and Tx */
- RTL_W16(IntrMask, 0);
- RTL_W8(ChipCmd, 0);
-
- /* Update the error counts. */
- tp->stats.rx_missed_errors += RTL_R32(RxMissed);
- RTL_W32(RxMissed, 0);
- spin_unlock_irqrestore(&tp->lock, flags);
-
- return 0;
-}
-
-static int rtl8169_resume(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
-
- if (!netif_running(dev))
- return 0;
-
- netif_device_attach(dev);
- rtl8169_hw_start(dev);
-
- return 0;
-}
-
-#endif /* CONFIG_PM */
-
static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
struct net_device *dev)
{
@@ -2700,6 +2753,56 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
return &tp->stats;
}
+#ifdef CONFIG_PM
+
+static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (!netif_running(dev))
+ goto out;
+
+ netif_device_detach(dev);
+ netif_stop_queue(dev);
+
+ spin_lock_irq(&tp->lock);
+
+ rtl8169_asic_down(ioaddr);
+
+ tp->stats.rx_missed_errors += RTL_R32(RxMissed);
+ RTL_W32(RxMissed, 0);
+
+ spin_unlock_irq(&tp->lock);
+
+ pci_save_state(pdev);
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+out:
+ return 0;
+}
+
+static int rtl8169_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (!netif_running(dev))
+ goto out;
+
+ netif_device_attach(dev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_enable_wake(pdev, PCI_D0, 0);
+
+ rtl8169_schedule_work(dev, rtl8169_reset_task);
+out:
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
static struct pci_driver rtl8169_pci_driver = {
.name = MODULENAME,
.id_table = rtl8169_pci_tbl,
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 49b597cbc19..b7f00d6eb6a 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -4092,6 +4092,7 @@ static void s2io_set_multicast(struct net_device *dev)
i++, mclist = mclist->next) {
memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
ETH_ALEN);
+ mac_addr = 0;
for (j = 0; j < ETH_ALEN; j++) {
mac_addr |= mclist->dmi_addr[j];
mac_addr <<= 8;
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index b420182eec4..ed4bc91638d 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -1791,6 +1791,8 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
goto out;
}
+ pci_set_drvdata(pdev, dev);
+
tp = netdev_priv(dev);
ioaddr = tp->mmio_addr;
@@ -1827,8 +1829,6 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
if (rc < 0)
goto err_remove_mii;
- pci_set_drvdata(pdev, dev);
-
net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
pci_name(pdev), sis_chip_info[ent->driver_data].name,
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 3d95fa20cd8..7a952fe60be 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -540,7 +540,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
printk("%2.2x.\n", net_dev->dev_addr[i]);
/* Detect Wake on Lan support */
- ret = inl(CFGPMC & PMESP);
+ ret = (inl(net_dev->base_addr + CFGPMC) & PMESP) >> 27;
if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0)
printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name);
@@ -2040,7 +2040,7 @@ static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wo
if (wol->wolopts == 0) {
pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
- cfgpmcsr |= ~PME_EN;
+ cfgpmcsr &= ~PME_EN;
pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
outl(pmctrl_bits, pmctrl_addr);
if (netif_msg_wol(sis_priv))
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index bf55a4cfb3d..25e028b7ce4 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -879,13 +879,12 @@ static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
int i;
xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
- xm_read16(hw, port, XM_PHY_DATA);
+ *val = xm_read16(hw, port, XM_PHY_DATA);
- /* Need to wait for external PHY */
for (i = 0; i < PHY_RETRIES; i++) {
- udelay(1);
if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY)
goto ready;
+ udelay(1);
}
return -ETIMEDOUT;
@@ -918,7 +917,12 @@ static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
ready:
xm_write16(hw, port, XM_PHY_DATA, val);
- return 0;
+ for (i = 0; i < PHY_RETRIES; i++) {
+ if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
}
static void genesis_init(struct skge_hw *hw)
@@ -1168,13 +1172,17 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
u32 r;
const u8 zero[6] = { 0 };
- /* Clear MIB counters */
- xm_write16(hw, port, XM_STAT_CMD,
- XM_SC_CLR_RXC | XM_SC_CLR_TXC);
- /* Clear two times according to Errata #3 */
- xm_write16(hw, port, XM_STAT_CMD,
- XM_SC_CLR_RXC | XM_SC_CLR_TXC);
+ for (i = 0; i < 10; i++) {
+ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
+ MFF_SET_MAC_RST);
+ if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)
+ goto reset_ok;
+ udelay(1);
+ }
+ printk(KERN_WARNING PFX "%s: genesis reset failed\n", dev->name);
+
+ reset_ok:
/* Unreset the XMAC. */
skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
@@ -1191,7 +1199,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
r |= GP_DIR_2|GP_IO_2;
skge_write32(hw, B2_GP_IO, r);
- skge_read32(hw, B2_GP_IO);
+
/* Enable GMII interface */
xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
@@ -1205,6 +1213,13 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
for (i = 1; i < 16; i++)
xm_outaddr(hw, port, XM_EXM(i), zero);
+ /* Clear MIB counters */
+ xm_write16(hw, port, XM_STAT_CMD,
+ XM_SC_CLR_RXC | XM_SC_CLR_TXC);
+ /* Clear two times according to Errata #3 */
+ xm_write16(hw, port, XM_STAT_CMD,
+ XM_SC_CLR_RXC | XM_SC_CLR_TXC);
+
/* configure Rx High Water Mark (XM_RX_HI_WM) */
xm_write16(hw, port, XM_RX_HI_WM, 1450);
@@ -1697,6 +1712,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR);
skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
+
if (skge->autoneg == AUTONEG_DISABLE) {
reg = GM_GPCR_AU_ALL_DIS;
gma_write16(hw, port, GM_GP_CTRL,
@@ -1704,16 +1720,23 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
switch (skge->speed) {
case SPEED_1000:
+ reg &= ~GM_GPCR_SPEED_100;
reg |= GM_GPCR_SPEED_1000;
- /* fallthru */
+ break;
case SPEED_100:
+ reg &= ~GM_GPCR_SPEED_1000;
reg |= GM_GPCR_SPEED_100;
+ break;
+ case SPEED_10:
+ reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
+ break;
}
if (skge->duplex == DUPLEX_FULL)
reg |= GM_GPCR_DUP_FULL;
} else
reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
+
switch (skge->flow_control) {
case FLOW_MODE_NONE:
skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
@@ -2162,8 +2185,10 @@ static int skge_up(struct net_device *dev)
skge->tx_avail = skge->tx_ring.count - 1;
/* Enable IRQ from port */
+ spin_lock_irq(&hw->hw_lock);
hw->intr_mask |= portirqmask[port];
skge_write32(hw, B0_IMSK, hw->intr_mask);
+ spin_unlock_irq(&hw->hw_lock);
/* Initialize MAC */
spin_lock_bh(&hw->phy_lock);
@@ -2221,8 +2246,10 @@ static int skge_down(struct net_device *dev)
else
yukon_stop(skge);
+ spin_lock_irq(&hw->hw_lock);
hw->intr_mask &= ~portirqmask[skge->port];
skge_write32(hw, B0_IMSK, hw->intr_mask);
+ spin_unlock_irq(&hw->hw_lock);
/* Stop transmitter */
skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
@@ -2670,8 +2697,7 @@ static int skge_poll(struct net_device *dev, int *budget)
/* restart receiver */
wmb();
- skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR),
- CSR_START | CSR_IRQ_CL_F);
+ skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
*budget -= work_done;
dev->quota -= work_done;
@@ -2679,10 +2705,11 @@ static int skge_poll(struct net_device *dev, int *budget)
if (work_done >= to_do)
return 1; /* not done */
- netif_rx_complete(dev);
- hw->intr_mask |= portirqmask[skge->port];
- skge_write32(hw, B0_IMSK, hw->intr_mask);
- skge_read32(hw, B0_IMSK);
+ spin_lock_irq(&hw->hw_lock);
+ __netif_rx_complete(dev);
+ hw->intr_mask |= portirqmask[skge->port];
+ skge_write32(hw, B0_IMSK, hw->intr_mask);
+ spin_unlock_irq(&hw->hw_lock);
return 0;
}
@@ -2842,18 +2869,10 @@ static void skge_extirq(unsigned long data)
}
spin_unlock(&hw->phy_lock);
- local_irq_disable();
+ spin_lock_irq(&hw->hw_lock);
hw->intr_mask |= IS_EXT_REG;
skge_write32(hw, B0_IMSK, hw->intr_mask);
- local_irq_enable();
-}
-
-static inline void skge_wakeup(struct net_device *dev)
-{
- struct skge_port *skge = netdev_priv(dev);
-
- prefetch(skge->rx_ring.to_clean);
- netif_rx_schedule(dev);
+ spin_unlock_irq(&hw->hw_lock);
}
static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
@@ -2864,15 +2883,17 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
if (status == 0 || status == ~0) /* hotplug or shared irq */
return IRQ_NONE;
- status &= hw->intr_mask;
+ spin_lock(&hw->hw_lock);
if (status & IS_R1_F) {
+ skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
hw->intr_mask &= ~IS_R1_F;
- skge_wakeup(hw->dev[0]);
+ netif_rx_schedule(hw->dev[0]);
}
if (status & IS_R2_F) {
+ skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
hw->intr_mask &= ~IS_R2_F;
- skge_wakeup(hw->dev[1]);
+ netif_rx_schedule(hw->dev[1]);
}
if (status & IS_XA1_F)
@@ -2914,6 +2935,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
}
skge_write32(hw, B0_IMSK, hw->intr_mask);
+ spin_unlock(&hw->hw_lock);
return IRQ_HANDLED;
}
@@ -3282,6 +3304,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
hw->pdev = pdev;
spin_lock_init(&hw->phy_lock);
+ spin_lock_init(&hw->hw_lock);
tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw);
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 2efdacc290e..941f12a333b 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2402,6 +2402,7 @@ struct skge_hw {
struct tasklet_struct ext_tasklet;
spinlock_t phy_lock;
+ spinlock_t hw_lock;
};
enum {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index cae2edf2300..73260364cba 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -74,7 +74,7 @@
#define TX_RING_SIZE 512
#define TX_DEF_PENDING (TX_RING_SIZE - 1)
#define TX_MIN_PENDING 64
-#define MAX_SKB_TX_LE (4 + 2*MAX_SKB_FRAGS)
+#define MAX_SKB_TX_LE (4 + (sizeof(dma_addr_t)/sizeof(u32))*MAX_SKB_FRAGS)
#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */
#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
@@ -96,10 +96,6 @@ static int copybreak __read_mostly = 256;
module_param(copybreak, int, 0);
MODULE_PARM_DESC(copybreak, "Receive copy threshold");
-static int disable_msi = 0;
-module_param(disable_msi, int, 0);
-MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
-
static const struct pci_device_id sky2_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
@@ -195,11 +191,11 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
pr_debug("sky2_set_power_state %d\n", state);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
- pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_PMC, &power_control);
+ power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_PMC);
vaux = (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL) &&
(power_control & PCI_PM_CAP_PME_D3cold);
- pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_CTRL, &power_control);
+ power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL);
power_control |= PCI_PM_CTRL_PME_STATUS;
power_control &= ~(PCI_PM_CTRL_STATE_MASK);
@@ -223,7 +219,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
sky2_write8(hw, B2_Y2_CLK_GATE, 0);
/* Turn off phy power saving */
- pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg1);
+ reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
/* looks like this XL is back asswards .. */
@@ -232,18 +228,28 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
if (hw->ports > 1)
reg1 |= PCI_Y2_PHY2_COMA;
}
- pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1);
+
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
+ sky2_pci_write32(hw, PCI_DEV_REG3, 0);
+ reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
+ reg1 &= P_ASPM_CONTROL_MSK;
+ sky2_pci_write32(hw, PCI_DEV_REG4, reg1);
+ sky2_pci_write32(hw, PCI_DEV_REG5, 0);
+ }
+
+ sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+
break;
case PCI_D3hot:
case PCI_D3cold:
/* Turn on phy power saving */
- pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg1);
+ reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
else
reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
- pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1);
+ sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
sky2_write8(hw, B2_Y2_CLK_GATE, 0);
@@ -265,7 +271,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
ret = -1;
}
- pci_write_config_byte(hw->pdev, hw->pm_cap + PCI_PM_CTRL, power_control);
+ sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
return ret;
}
@@ -463,16 +469,31 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
}
- gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) {
+ /* apply fixes in PHY AFE */
+ gm_phy_write(hw, port, 22, 255);
+ /* increase differential signal amplitude in 10BASE-T */
+ gm_phy_write(hw, port, 24, 0xaa99);
+ gm_phy_write(hw, port, 23, 0x2011);
- if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
- /* turn on 100 Mbps LED (LED_LINK100) */
- ledover |= PHY_M_LED_MO_100(MO_LED_ON);
- }
+ /* fix for IEEE A/B Symmetry failure in 1000BASE-T */
+ gm_phy_write(hw, port, 24, 0xa204);
+ gm_phy_write(hw, port, 23, 0x2002);
- if (ledover)
- gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
+ /* set page register to 0 */
+ gm_phy_write(hw, port, 22, 0);
+ } else {
+ gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
+
+ if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
+ /* turn on 100 Mbps LED (LED_LINK100) */
+ ledover |= PHY_M_LED_MO_100(MO_LED_ON);
+ }
+
+ if (ledover)
+ gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
+ }
/* Enable phy interrupt on auto-negotiation complete (or link up) */
if (sky2->autoneg == AUTONEG_ENABLE)
gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
@@ -520,10 +541,16 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
switch (sky2->speed) {
case SPEED_1000:
+ reg &= ~GM_GPCR_SPEED_100;
reg |= GM_GPCR_SPEED_1000;
- /* fallthru */
+ break;
case SPEED_100:
+ reg &= ~GM_GPCR_SPEED_1000;
reg |= GM_GPCR_SPEED_100;
+ break;
+ case SPEED_10:
+ reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
+ break;
}
if (sky2->duplex == DUPLEX_FULL)
@@ -595,8 +622,8 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
/* Configure Rx MAC FIFO */
sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
- sky2_write16(hw, SK_REG(port, RX_GMF_CTRL_T),
- GMF_RX_CTRL_DEF);
+ sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
+ GMF_OPER_ON | GMF_RX_F_FL_ON);
/* Flush Rx MAC FIFO on any flow control or error */
sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
@@ -947,6 +974,12 @@ static int sky2_rx_start(struct sky2_port *sky2)
sky2->rx_put = sky2->rx_next = 0;
sky2_qset(hw, rxq);
+
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) {
+ /* MAC Rx RAM Read is controlled by hardware */
+ sky2_write32(hw, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS);
+ }
+
sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
rx_set_checksum(sky2);
@@ -962,6 +995,10 @@ static int sky2_rx_start(struct sky2_port *sky2)
sky2_rx_add(sky2, re->mapaddr);
}
+ /* Truncate oversize frames */
+ sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), sky2->rx_bufsize - 8);
+ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
+
/* Tell chip about available buffers */
sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put);
sky2->rx_last_put = sky2_read16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX));
@@ -1029,9 +1066,10 @@ static int sky2_up(struct net_device *dev)
RB_RST_SET);
sky2_qset(hw, txqaddr[port]);
- if (hw->chip_id == CHIP_ID_YUKON_EC_U)
- sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0);
+ /* Set almost empty threshold */
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == 1)
+ sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0);
sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
TX_RING_SIZE - 1);
@@ -1041,8 +1079,10 @@ static int sky2_up(struct net_device *dev)
goto err_out;
/* Enable interrupts from phy/mac for port */
+ spin_lock_irq(&hw->hw_lock);
hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
sky2_write32(hw, B0_IMSK, hw->intr_mask);
+ spin_unlock_irq(&hw->hw_lock);
return 0;
err_out:
@@ -1109,6 +1149,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
struct sky2_tx_le *le = NULL;
struct tx_ring_info *re;
unsigned i, len;
+ int avail;
dma_addr_t mapping;
u32 addr64;
u16 mss;
@@ -1251,12 +1292,16 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
re->idx = sky2->tx_prod;
le->ctrl |= EOP;
+ avail = tx_avail(sky2);
+ if (mss != 0 || avail < TX_MIN_PENDING) {
+ le->ctrl |= FRC_STAT;
+ if (avail <= MAX_SKB_TX_LE)
+ netif_stop_queue(dev);
+ }
+
sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod,
&sky2->tx_last_put, TX_RING_SIZE);
- if (tx_avail(sky2) <= MAX_SKB_TX_LE)
- netif_stop_queue(dev);
-
out_unlock:
spin_unlock(&sky2->tx_lock);
@@ -1342,10 +1387,10 @@ static int sky2_down(struct net_device *dev)
netif_stop_queue(dev);
/* Disable port IRQ */
- local_irq_disable();
+ spin_lock_irq(&hw->hw_lock);
hw->intr_mask &= ~((sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
sky2_write32(hw, B0_IMSK, hw->intr_mask);
- local_irq_enable();
+ spin_unlock_irq(&hw->hw_lock);
flush_scheduled_work();
@@ -1446,6 +1491,29 @@ static void sky2_link_up(struct sky2_port *sky2)
sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
reg = gma_read16(hw, port, GM_GP_CTRL);
+ if (sky2->autoneg == AUTONEG_DISABLE) {
+ reg |= GM_GPCR_AU_ALL_DIS;
+
+ /* Is write/read necessary? Copied from sky2_mac_init */
+ gma_write16(hw, port, GM_GP_CTRL, reg);
+ gma_read16(hw, port, GM_GP_CTRL);
+
+ switch (sky2->speed) {
+ case SPEED_1000:
+ reg &= ~GM_GPCR_SPEED_100;
+ reg |= GM_GPCR_SPEED_1000;
+ break;
+ case SPEED_100:
+ reg &= ~GM_GPCR_SPEED_1000;
+ reg |= GM_GPCR_SPEED_100;
+ break;
+ case SPEED_10:
+ reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
+ break;
+ }
+ } else
+ reg &= ~GM_GPCR_AU_ALL_DIS;
+
if (sky2->duplex == DUPLEX_FULL || sky2->autoneg == AUTONEG_ENABLE)
reg |= GM_GPCR_DUP_FULL;
@@ -1604,10 +1672,10 @@ static void sky2_phy_task(void *arg)
out:
up(&sky2->phy_sema);
- local_irq_disable();
+ spin_lock_irq(&hw->hw_lock);
hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2;
sky2_write32(hw, B0_IMSK, hw->intr_mask);
- local_irq_enable();
+ spin_unlock_irq(&hw->hw_lock);
}
@@ -1648,10 +1716,12 @@ static void sky2_tx_timeout(struct net_device *dev)
#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
-/* Want receive buffer size to be multiple of 64 bits, and incl room for vlan */
+/* Want receive buffer size to be multiple of 64 bits
+ * and incl room for vlan and truncation
+ */
static inline unsigned sky2_buf_size(int mtu)
{
- return roundup(mtu + ETH_HLEN + 4, 8);
+ return roundup(mtu + ETH_HLEN + VLAN_HLEN, 8) + 8;
}
static int sky2_change_mtu(struct net_device *dev, int new_mtu)
@@ -1734,7 +1804,7 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2,
if (!(status & GMR_FS_RX_OK))
goto resubmit;
- if ((status >> 16) != length || length > sky2->rx_bufsize)
+ if (length > sky2->netdev->mtu + ETH_HLEN)
goto oversize;
if (length < copybreak) {
@@ -1834,6 +1904,17 @@ static int sky2_poll(struct net_device *dev0, int *budget)
sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
+ /*
+ * Kick the STAT_LEV_TIMER_CTRL timer.
+ * This fixes my hangs on Yukon-EC (0xb6) rev 1.
+ * The if clause is there to start the timer only if it has been
+ * configured correctly and not been disabled via ethtool.
+ */
+ if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_START) {
+ sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
+ sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
+ }
+
hwidx = sky2_read16(hw, STAT_PUT_IDX);
BUG_ON(hwidx >= STATUS_RING_SIZE);
rmb();
@@ -1916,16 +1997,19 @@ exit_loop:
sky2_tx_check(hw, 0, tx_done[0]);
sky2_tx_check(hw, 1, tx_done[1]);
+ if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) {
+ sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
+ sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
+ }
+
if (likely(work_done < to_do)) {
- /* need to restart TX timer */
- if (is_ec_a1(hw)) {
- sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
- sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
- }
+ spin_lock_irq(&hw->hw_lock);
+ __netif_rx_complete(dev0);
- netif_rx_complete(dev0);
hw->intr_mask |= Y2_IS_STAT_BMU;
sky2_write32(hw, B0_IMSK, hw->intr_mask);
+ spin_unlock_irq(&hw->hw_lock);
+
return 0;
} else {
*budget -= work_done;
@@ -1988,13 +2072,13 @@ static void sky2_hw_intr(struct sky2_hw *hw)
if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
u16 pci_err;
- pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err);
+ pci_err = sky2_pci_read16(hw, PCI_STATUS);
if (net_ratelimit())
printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
pci_name(hw->pdev), pci_err);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
- pci_write_config_word(hw->pdev, PCI_STATUS,
+ sky2_pci_write16(hw, PCI_STATUS,
pci_err | PCI_STATUS_ERROR_BITS);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
@@ -2003,7 +2087,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
/* PCI-Express uncorrectable Error occurred */
u32 pex_err;
- pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err);
+ pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT);
if (net_ratelimit())
printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
@@ -2011,7 +2095,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
/* clear the interrupt */
sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
- pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT,
+ sky2_pci_write32(hw, PEX_UNC_ERR_STAT,
0xffffffffUL);
sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
@@ -2057,6 +2141,7 @@ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
sky2_write32(hw, B0_IMSK, hw->intr_mask);
+
schedule_work(&sky2->phy_task);
}
@@ -2070,6 +2155,7 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
if (status == 0 || status == ~0)
return IRQ_NONE;
+ spin_lock(&hw->hw_lock);
if (status & Y2_IS_HW_ERR)
sky2_hw_intr(hw);
@@ -2098,7 +2184,7 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
sky2_write32(hw, B0_Y2_SP_ICR, 2);
- sky2_read32(hw, B0_IMSK);
+ spin_unlock(&hw->hw_lock);
return IRQ_HANDLED;
}
@@ -2141,7 +2227,7 @@ static int sky2_reset(struct sky2_hw *hw)
{
u16 status;
u8 t8, pmd_type;
- int i, err;
+ int i;
sky2_write8(hw, B0_CTST, CS_RST_CLR);
@@ -2163,25 +2249,18 @@ static int sky2_reset(struct sky2_hw *hw)
sky2_write8(hw, B0_CTST, CS_RST_CLR);
/* clear PCI errors, if any */
- err = pci_read_config_word(hw->pdev, PCI_STATUS, &status);
- if (err)
- goto pci_err;
+ status = sky2_pci_read16(hw, PCI_STATUS);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
- err = pci_write_config_word(hw->pdev, PCI_STATUS,
- status | PCI_STATUS_ERROR_BITS);
- if (err)
- goto pci_err;
+ sky2_pci_write16(hw, PCI_STATUS, status | PCI_STATUS_ERROR_BITS);
+
sky2_write8(hw, B0_CTST, CS_MRST_CLR);
/* clear any PEX errors */
- if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) {
- err = pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT,
- 0xffffffffUL);
- if (err)
- goto pci_err;
- }
+ if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
+ sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
+
pmd_type = sky2_read8(hw, B2_PMD_TYP);
hw->copper = !(pmd_type == 'L' || pmd_type == 'S');
@@ -2280,8 +2359,7 @@ static int sky2_reset(struct sky2_hw *hw)
sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
- sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
- sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
+ sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 7));
}
/* enable status unit */
@@ -2292,14 +2370,6 @@ static int sky2_reset(struct sky2_hw *hw)
sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
return 0;
-
-pci_err:
- /* This is to catch a BIOS bug workaround where
- * mmconfig table doesn't have other buses.
- */
- printk(KERN_ERR PFX "%s: can't access PCI config space\n",
- pci_name(hw->pdev));
- return err;
}
static u32 sky2_supported_modes(const struct sky2_hw *hw)
@@ -2823,11 +2893,11 @@ static int sky2_set_coalesce(struct net_device *dev,
(ecmd->rx_coalesce_usecs_irq < tmin || ecmd->rx_coalesce_usecs_irq > tmax))
return -EINVAL;
- if (ecmd->tx_max_coalesced_frames > 0xffff)
+ if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1)
return -EINVAL;
- if (ecmd->rx_max_coalesced_frames > 0xff)
+ if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING)
return -EINVAL;
- if (ecmd->rx_max_coalesced_frames_irq > 0xff)
+ if (ecmd->rx_max_coalesced_frames_irq >RX_MAX_PENDING)
return -EINVAL;
if (ecmd->tx_coalesce_usecs == 0)
@@ -3063,61 +3133,6 @@ static void __devinit sky2_show_addr(struct net_device *dev)
dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
}
-/* Handle software interrupt used during MSI test */
-static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id,
- struct pt_regs *regs)
-{
- struct sky2_hw *hw = dev_id;
- u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
-
- if (status == 0)
- return IRQ_NONE;
-
- if (status & Y2_IS_IRQ_SW) {
- sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
- hw->msi = 1;
- }
- sky2_write32(hw, B0_Y2_SP_ICR, 2);
-
- sky2_read32(hw, B0_IMSK);
- return IRQ_HANDLED;
-}
-
-/* Test interrupt path by forcing a a software IRQ */
-static int __devinit sky2_test_msi(struct sky2_hw *hw)
-{
- struct pci_dev *pdev = hw->pdev;
- int i, err;
-
- sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
-
- err = request_irq(pdev->irq, sky2_test_intr, SA_SHIRQ, DRV_NAME, hw);
- if (err) {
- printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
- pci_name(pdev), pdev->irq);
- return err;
- }
-
- sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
- wmb();
-
- for (i = 0; i < 10; i++) {
- barrier();
- if (hw->msi)
- goto found;
- mdelay(1);
- }
-
- err = -EOPNOTSUPP;
- sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
- found:
- sky2_write32(hw, B0_IMSK, 0);
-
- free_irq(pdev->irq, hw);
-
- return err;
-}
-
static int __devinit sky2_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -3169,17 +3184,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
}
}
-#ifdef __BIG_ENDIAN
- /* byte swap descriptors in hardware */
- {
- u32 reg;
-
- pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
- reg |= PCI_REV_DESC;
- pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
- }
-#endif
-
err = -ENOMEM;
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
if (!hw) {
@@ -3197,6 +3201,18 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
goto err_out_free_hw;
}
hw->pm_cap = pm_cap;
+ spin_lock_init(&hw->hw_lock);
+
+#ifdef __BIG_ENDIAN
+ /* byte swap descriptors in hardware */
+ {
+ u32 reg;
+
+ reg = sky2_pci_read32(hw, PCI_DEV_REG2);
+ reg |= PCI_REV_DESC;
+ sky2_pci_write32(hw, PCI_DEV_REG2, reg);
+ }
+#endif
/* ring for status responses */
hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES,
@@ -3238,22 +3254,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
}
}
- if (!disable_msi && pci_enable_msi(pdev) == 0) {
- err = sky2_test_msi(hw);
- if (err == -EOPNOTSUPP) {
- /* MSI test failed, go back to INTx mode */
- printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
- "switching to INTx mode. Please report this failure to "
- "the PCI maintainer and include system chipset information.\n",
- pci_name(pdev));
- pci_disable_msi(pdev);
- }
- else if (err)
- goto err_out_unregister;
- }
-
- err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ | SA_SAMPLE_RANDOM,
- DRV_NAME, hw);
+ err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw);
if (err) {
printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
pci_name(pdev), pdev->irq);
@@ -3268,8 +3269,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
return 0;
err_out_unregister:
- if (hw->msi)
- pci_disable_msi(pdev);
if (dev1) {
unregister_netdev(dev1);
free_netdev(dev1);
@@ -3312,8 +3311,6 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
sky2_read8(hw, B0_CTST);
free_irq(pdev->irq, hw);
- if (hw->msi)
- pci_disable_msi(pdev);
pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index fd12c289a23..dce955c76f3 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -5,14 +5,22 @@
#define _SKY2_H
/* PCI config registers */
-#define PCI_DEV_REG1 0x40
-#define PCI_DEV_REG2 0x44
-#define PCI_DEV_STATUS 0x7c
-#define PCI_OS_PCI_X (1<<26)
+enum {
+ PCI_DEV_REG1 = 0x40,
+ PCI_DEV_REG2 = 0x44,
+ PCI_DEV_STATUS = 0x7c,
+ PCI_DEV_REG3 = 0x80,
+ PCI_DEV_REG4 = 0x84,
+ PCI_DEV_REG5 = 0x88,
+};
-#define PEX_LNK_STAT 0xf2
-#define PEX_UNC_ERR_STAT 0x104
-#define PEX_DEV_CTRL 0xe8
+enum {
+ PEX_DEV_CAP = 0xe4,
+ PEX_DEV_CTRL = 0xe8,
+ PEX_DEV_STA = 0xea,
+ PEX_LNK_STAT = 0xf2,
+ PEX_UNC_ERR_STAT= 0x104,
+};
/* Yukon-2 */
enum pci_dev_reg_1 {
@@ -37,6 +45,25 @@ enum pci_dev_reg_2 {
PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */
};
+/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */
+enum pci_dev_reg_4 {
+ /* (Link Training & Status State Machine) */
+ P_TIMER_VALUE_MSK = 0xffL<<16, /* Bit 23..16: Timer Value Mask */
+ /* (Active State Power Management) */
+ P_FORCE_ASPM_REQUEST = 1<<15, /* Force ASPM Request (A1 only) */
+ P_ASPM_GPHY_LINK_DOWN = 1<<14, /* GPHY Link Down (A1 only) */
+ P_ASPM_INT_FIFO_EMPTY = 1<<13, /* Internal FIFO Empty (A1 only) */
+ P_ASPM_CLKRUN_REQUEST = 1<<12, /* CLKRUN Request (A1 only) */
+
+ P_ASPM_FORCE_CLKREQ_ENA = 1<<4, /* Force CLKREQ Enable (A1b only) */
+ P_ASPM_CLKREQ_PAD_CTL = 1<<3, /* CLKREQ PAD Control (A1 only) */
+ P_ASPM_A1_MODE_SELECT = 1<<2, /* A1 Mode Select (A1 only) */
+ P_CLK_GATE_PEX_UNIT_ENA = 1<<1, /* Enable Gate PEX Unit Clock */
+ P_CLK_GATE_ROOT_COR_ENA = 1<<0, /* Enable Gate Root Core Clock */
+ P_ASPM_CONTROL_MSK = P_FORCE_ASPM_REQUEST | P_ASPM_GPHY_LINK_DOWN
+ | P_ASPM_CLKRUN_REQUEST | P_ASPM_INT_FIFO_EMPTY,
+};
+
#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
PCI_STATUS_SIG_SYSTEM_ERROR | \
@@ -507,6 +534,16 @@ enum {
};
#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
+/* Q_F 32 bit Flag Register */
+enum {
+ F_ALM_FULL = 1<<27, /* Rx FIFO: almost full */
+ F_EMPTY = 1<<27, /* Tx FIFO: empty flag */
+ F_FIFO_EOF = 1<<26, /* Tag (EOF Flag) bit in FIFO */
+ F_WM_REACHED = 1<<25, /* Watermark reached */
+ F_M_RX_RAM_DIS = 1<<24, /* MAC Rx RAM Read Port disable */
+ F_FIFO_LEVEL = 0x1fL<<16, /* Bit 23..16: # of Qwords in FIFO */
+ F_WATER_MARK = 0x0007ffL, /* Bit 10.. 0: Watermark */
+};
/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
enum {
@@ -909,10 +946,12 @@ enum {
PHY_BCOM_ID1_C0 = 0x6044,
PHY_BCOM_ID1_C5 = 0x6047,
- PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */
+ PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */
PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */
- PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */
- PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
+ PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */
+ PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
+ PHY_MARV_ID1_FE = 0x0C83, /* Yukon-FE (PHY 88E3082 Rev.A1) */
+ PHY_MARV_ID1_ECU= 0x0CB0, /* Yukon-ECU (PHY 88E1149 Rev.B2?) */
};
/* Advertisement register bits */
@@ -1837,11 +1876,11 @@ struct sky2_port {
struct sky2_hw {
void __iomem *regs;
struct pci_dev *pdev;
- u32 intr_mask;
struct net_device *dev[2];
+ spinlock_t hw_lock;
+ u32 intr_mask;
int pm_cap;
- int msi;
u8 chip_id;
u8 chip_rev;
u8 copper;
@@ -1912,4 +1951,25 @@ static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg,
gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
}
+
+/* PCI config space access */
+static inline u32 sky2_pci_read32(const struct sky2_hw *hw, unsigned reg)
+{
+ return sky2_read32(hw, Y2_CFG_SPC + reg);
+}
+
+static inline u16 sky2_pci_read16(const struct sky2_hw *hw, unsigned reg)
+{
+ return sky2_read16(hw, Y2_CFG_SPC + reg);
+}
+
+static inline void sky2_pci_write32(struct sky2_hw *hw, unsigned reg, u32 val)
+{
+ sky2_write32(hw, Y2_CFG_SPC + reg, val);
+}
+
+static inline void sky2_pci_write16(struct sky2_hw *hw, unsigned reg, u16 val)
+{
+ sky2_write16(hw, Y2_CFG_SPC + reg, val);
+}
#endif
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index e7dc653d5bd..b8f1524da55 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3532,9 +3532,23 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
(base + len + 8 < base));
}
+/* Test for DMA addresses > 40-bit */
+static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
+ int len)
+{
+#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
+ if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
+ return (((u64) mapping + len) > DMA_40BIT_MASK);
+ return 0;
+#else
+ return 0;
+#endif
+}
+
static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
-static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
+/* Workaround 4GB and 40-bit hardware DMA bugs. */
+static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
u32 last_plus_one, u32 *start,
u32 base_flags, u32 mss)
{
@@ -3742,6 +3756,9 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (tg3_4g_overflow_test(mapping, len))
would_hit_hwbug = 1;
+ if (tg3_40bit_overflow_test(tp, mapping, len))
+ would_hit_hwbug = 1;
+
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
tg3_set_txd(tp, entry, mapping, len,
base_flags, (i == last)|(mss << 1));
@@ -3763,7 +3780,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* If the workaround fails due to memory/mapping
* failure, silently drop this packet.
*/
- if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one,
+ if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
&start, base_flags, mss))
goto out_unlock;
@@ -9408,6 +9425,15 @@ static int __devinit tg3_is_sun_570X(struct tg3 *tp)
return 0;
if (venid == PCI_VENDOR_ID_SUN)
return 1;
+
+ /* TG3 chips onboard the SunBlade-2500 don't have the
+ * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
+ * are distinguishable from non-Sun variants by being
+ * named "network" by the firmware. Non-Sun cards will
+ * show up as being named "ethernet".
+ */
+ if (!strcmp(pcp->prom_name, "network"))
+ return 1;
}
return 0;
}
@@ -10517,8 +10543,6 @@ static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
strcat(str, "66MHz");
else if (clock_ctrl == 6)
strcat(str, "100MHz");
- else if (clock_ctrl == 7)
- strcat(str, "133MHz");
} else {
strcpy(str, "PCI:");
if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
@@ -10599,8 +10623,9 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
unsigned long tg3reg_base, tg3reg_len;
struct net_device *dev;
struct tg3 *tp;
- int i, err, pci_using_dac, pm_cap;
+ int i, err, pm_cap;
char str[40];
+ u64 dma_mask, persist_dma_mask;
if (tg3_version_printed++ == 0)
printk(KERN_INFO "%s", version);
@@ -10637,26 +10662,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
goto err_out_free_res;
}
- /* Configure DMA attributes. */
- err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
- if (!err) {
- pci_using_dac = 1;
- err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
- if (err < 0) {
- printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
- "for consistent allocations\n");
- goto err_out_free_res;
- }
- } else {
- err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
- if (err) {
- printk(KERN_ERR PFX "No usable DMA configuration, "
- "aborting.\n");
- goto err_out_free_res;
- }
- pci_using_dac = 0;
- }
-
tg3reg_base = pci_resource_start(pdev, 0);
tg3reg_len = pci_resource_len(pdev, 0);
@@ -10670,8 +10675,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- if (pci_using_dac)
- dev->features |= NETIF_F_HIGHDMA;
dev->features |= NETIF_F_LLTX;
#if TG3_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
@@ -10756,6 +10759,44 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
goto err_out_iounmap;
}
+ /* 5714, 5715 and 5780 cannot support DMA addresses > 40-bit.
+ * On 64-bit systems with IOMMU, use 40-bit dma_mask.
+ * On 64-bit systems without IOMMU, use 64-bit dma_mask and
+ * do DMA address check in tg3_start_xmit().
+ */
+ if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
+ persist_dma_mask = dma_mask = DMA_40BIT_MASK;
+#ifdef CONFIG_HIGHMEM
+ dma_mask = DMA_64BIT_MASK;
+#endif
+ } else if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
+ persist_dma_mask = dma_mask = DMA_32BIT_MASK;
+ else
+ persist_dma_mask = dma_mask = DMA_64BIT_MASK;
+
+ /* Configure DMA attributes. */
+ if (dma_mask > DMA_32BIT_MASK) {
+ err = pci_set_dma_mask(pdev, dma_mask);
+ if (!err) {
+ dev->features |= NETIF_F_HIGHDMA;
+ err = pci_set_consistent_dma_mask(pdev,
+ persist_dma_mask);
+ if (err < 0) {
+ printk(KERN_ERR PFX "Unable to obtain 64 bit "
+ "DMA for consistent allocations\n");
+ goto err_out_iounmap;
+ }
+ }
+ }
+ if (err || dma_mask == DMA_32BIT_MASK) {
+ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
+ printk(KERN_ERR PFX "No usable DMA configuration, "
+ "aborting.\n");
+ goto err_out_iounmap;
+ }
+ }
+
tg3_init_bufmgr_config(tp);
#if TG3_TSO_SUPPORT != 0
@@ -10824,9 +10865,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
} else
tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
- if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
- dev->features &= ~NETIF_F_HIGHDMA;
-
/* flow control autonegotiation is default behavior */
tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index c2506b56a18..12076f8f942 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -536,6 +536,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
u16 device_id;
int reg, rc = -ENODEV;
+#ifdef CONFIG_PCI
if (pdev) {
rc = pci_enable_device(pdev);
if (rc)
@@ -547,6 +548,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
goto err_out;
}
}
+#endif /* CONFIG_PCI */
dev = alloc_etherdev(sizeof(TLanPrivateInfo));
if (dev == NULL) {
diff --git a/drivers/net/tokenring/smctr.h b/drivers/net/tokenring/smctr.h
index b306c7e4c79..88dfa2e01d6 100644
--- a/drivers/net/tokenring/smctr.h
+++ b/drivers/net/tokenring/smctr.h
@@ -1042,7 +1042,7 @@ typedef struct net_local {
__u16 functional_address[2];
__u16 bitwise_group_address[2];
- __u8 *ptr_ucode;
+ const __u8 *ptr_ucode;
__u8 cleanup;
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index d7fb3ffe06a..2d0cfbceee2 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -1362,7 +1362,6 @@ static int de_open (struct net_device *dev)
{
struct de_private *de = dev->priv;
int rc;
- unsigned long flags;
if (netif_msg_ifup(de))
printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
@@ -1376,18 +1375,20 @@ static int de_open (struct net_device *dev)
return rc;
}
- rc = de_init_hw(de);
- if (rc) {
- printk(KERN_ERR "%s: h/w init failure, err=%d\n",
- dev->name, rc);
- goto err_out_free;
- }
+ dw32(IntrMask, 0);
rc = request_irq(dev->irq, de_interrupt, SA_SHIRQ, dev->name, dev);
if (rc) {
printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n",
dev->name, dev->irq, rc);
- goto err_out_hw;
+ goto err_out_free;
+ }
+
+ rc = de_init_hw(de);
+ if (rc) {
+ printk(KERN_ERR "%s: h/w init failure, err=%d\n",
+ dev->name, rc);
+ goto err_out_free_irq;
}
netif_start_queue(dev);
@@ -1395,11 +1396,8 @@ static int de_open (struct net_device *dev)
return 0;
-err_out_hw:
- spin_lock_irqsave(&de->lock, flags);
- de_stop_hw(de);
- spin_unlock_irqrestore(&de->lock, flags);
-
+err_out_free_irq:
+ free_irq(dev->irq, dev);
err_out_free:
de_free_rings(de);
return rc;
@@ -1455,6 +1453,8 @@ static void de_tx_timeout (struct net_device *dev)
synchronize_irq(dev->irq);
de_clean_rings(de);
+ de_init_rings(de);
+
de_init_hw(de);
netif_wake_queue(dev);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 50b8c6754b1..a1ed2d98374 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -249,8 +249,11 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv,
if (align)
skb_reserve(skb, align);
- if (memcpy_fromiovec(skb_put(skb, len), iv, len))
+ if (memcpy_fromiovec(skb_put(skb, len), iv, len)) {
+ tun->stats.rx_dropped++;
+ kfree_skb(skb);
return -EFAULT;
+ }
skb->dev = tun->dev;
switch (tun->flags & TUN_TYPE_MASK) {
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index c2d5907dc8e..ed1f837c8fd 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1106,6 +1106,9 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
for (i = 0; i < vptr->options.numrx; i++) {
struct velocity_rd_info *rd_info = &(vptr->rd_info[i]);
+ struct rx_desc *rd = vptr->rd_ring + i;
+
+ memset(rd, 0, sizeof(*rd));
if (!rd_info->skb)
continue;
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 98a76f10a0f..dfc24016ba8 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -1872,7 +1872,7 @@ static int atmel_set_encodeext(struct net_device *dev,
struct atmel_private *priv = netdev_priv(dev);
struct iw_point *encoding = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- int idx, key_len;
+ int idx, key_len, alg = ext->alg, set_key = 1;
/* Determine and validate the key index */
idx = encoding->flags & IW_ENCODE_INDEX;
@@ -1883,39 +1883,42 @@ static int atmel_set_encodeext(struct net_device *dev,
} else
idx = priv->default_key;
- if ((encoding->flags & IW_ENCODE_DISABLED) ||
- ext->alg == IW_ENCODE_ALG_NONE) {
- priv->wep_is_on = 0;
- priv->encryption_level = 0;
- priv->pairwise_cipher_suite = CIPHER_SUITE_NONE;
- }
+ if (encoding->flags & IW_ENCODE_DISABLED)
+ alg = IW_ENCODE_ALG_NONE;
- if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
+ if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
priv->default_key = idx;
+ set_key = ext->key_len > 0 ? 1 : 0;
+ }
- /* Set the requested key */
- switch (ext->alg) {
- case IW_ENCODE_ALG_NONE:
- break;
- case IW_ENCODE_ALG_WEP:
- if (ext->key_len > 5) {
- priv->wep_key_len[idx] = 13;
- priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128;
- priv->encryption_level = 2;
- } else if (ext->key_len > 0) {
- priv->wep_key_len[idx] = 5;
- priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64;
- priv->encryption_level = 1;
- } else {
+ if (set_key) {
+ /* Set the requested key first */
+ switch (alg) {
+ case IW_ENCODE_ALG_NONE:
+ priv->wep_is_on = 0;
+ priv->encryption_level = 0;
+ priv->pairwise_cipher_suite = CIPHER_SUITE_NONE;
+ break;
+ case IW_ENCODE_ALG_WEP:
+ if (ext->key_len > 5) {
+ priv->wep_key_len[idx] = 13;
+ priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128;
+ priv->encryption_level = 2;
+ } else if (ext->key_len > 0) {
+ priv->wep_key_len[idx] = 5;
+ priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64;
+ priv->encryption_level = 1;
+ } else {
+ return -EINVAL;
+ }
+ priv->wep_is_on = 1;
+ memset(priv->wep_keys[idx], 0, 13);
+ key_len = min ((int)ext->key_len, priv->wep_key_len[idx]);
+ memcpy(priv->wep_keys[idx], ext->key, key_len);
+ break;
+ default:
return -EINVAL;
}
- priv->wep_is_on = 1;
- memset(priv->wep_keys[idx], 0, 13);
- key_len = min ((int)ext->key_len, priv->wep_key_len[idx]);
- memcpy(priv->wep_keys[idx], ext->key, key_len);
- break;
- default:
- return -EINVAL;
}
return -EINPROGRESS;
@@ -3061,17 +3064,26 @@ static void authenticate(struct atmel_private *priv, u16 frame_len)
}
if (status == C80211_MGMT_SC_Success && priv->wep_is_on) {
+ int should_associate = 0;
/* WEP */
if (trans_seq_no != priv->ExpectedAuthentTransactionSeqNum)
return;
- if (trans_seq_no == 0x0002 &&
- auth->el_id == C80211_MGMT_ElementID_ChallengeText) {
- send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len);
- return;
+ if (system == C80211_MGMT_AAN_OPENSYSTEM) {
+ if (trans_seq_no == 0x0002) {
+ should_associate = 1;
+ }
+ } else if (system == C80211_MGMT_AAN_SHAREDKEY) {
+ if (trans_seq_no == 0x0002 &&
+ auth->el_id == C80211_MGMT_ElementID_ChallengeText) {
+ send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len);
+ return;
+ } else if (trans_seq_no == 0x0004) {
+ should_associate = 1;
+ }
}
- if (trans_seq_no == 0x0004) {
+ if (should_associate) {
if(priv->station_was_associated) {
atmel_enter_state(priv, STATION_STATE_REASSOCIATING);
send_association_request(priv, 1);
@@ -3084,11 +3096,13 @@ static void authenticate(struct atmel_private *priv, u16 frame_len)
}
}
- if (status == C80211_MGMT_SC_AuthAlgNotSupported) {
+ if (status == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) {
/* Do opensystem first, then try sharedkey */
- if (system == C80211_MGMT_AAN_OPENSYSTEM) {
+ if (system == WLAN_AUTH_OPEN) {
priv->CurrentAuthentTransactionSeqNum = 0x001;
- send_authentication_request(priv, C80211_MGMT_AAN_SHAREDKEY, NULL, 0);
+ priv->exclude_unencrypted = 1;
+ send_authentication_request(priv, WLAN_AUTH_SHARED_KEY, NULL, 0);
+ return;
} else if (priv->connect_to_any_BSS) {
int bss_index;
@@ -3439,10 +3453,13 @@ static void atmel_management_timer(u_long a)
priv->AuthenticationRequestRetryCnt = 0;
restart_search(priv);
} else {
+ int auth = C80211_MGMT_AAN_OPENSYSTEM;
priv->AuthenticationRequestRetryCnt++;
priv->CurrentAuthentTransactionSeqNum = 0x0001;
mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
- send_authentication_request(priv, C80211_MGMT_AAN_OPENSYSTEM, NULL, 0);
+ if (priv->wep_is_on && priv->exclude_unencrypted)
+ auth = C80211_MGMT_AAN_SHAREDKEY;
+ send_authentication_request(priv, auth, NULL, 0);
}
break;
@@ -3541,12 +3558,15 @@ static void atmel_command_irq(struct atmel_private *priv)
priv->station_was_associated = priv->station_is_associated;
atmel_enter_state(priv, STATION_STATE_READY);
} else {
+ int auth = C80211_MGMT_AAN_OPENSYSTEM;
priv->AuthenticationRequestRetryCnt = 0;
atmel_enter_state(priv, STATION_STATE_AUTHENTICATING);
mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
priv->CurrentAuthentTransactionSeqNum = 0x0001;
- send_authentication_request(priv, C80211_MGMT_AAN_SHAREDKEY, NULL, 0);
+ if (priv->wep_is_on && priv->exclude_unencrypted)
+ auth = C80211_MGMT_AAN_SHAREDKEY;
+ send_authentication_request(priv, auth, NULL, 0);
}
return;
}
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 8bc0b528548..f8f4503475f 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -877,7 +877,6 @@ static struct pcmcia_device_id hostap_cs_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777),
PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000),
PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002),
- PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002),
PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002),
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030b),
PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612),
@@ -891,6 +890,10 @@ static struct pcmcia_device_id hostap_cs_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002),
PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005),
PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010),
+ PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "INTERSIL",
+ 0x74c5e40d),
+ PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "Intersil",
+ 0x4b801a17),
PCMCIA_MFC_DEVICE_PROD_ID12(0, "SanDisk", "ConnectPlus",
0x7a954bd9, 0x74be00c6),
PCMCIA_DEVICE_PROD_ID1234(
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 14beab4bc91..287676ad80d 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -4616,9 +4616,9 @@ static void ipw_rx_notification(struct ipw_priv *priv,
}
default:
- IPW_ERROR("Unknown notification: "
- "subtype=%d,flags=0x%2x,size=%d\n",
- notif->subtype, notif->flags, notif->size);
+ IPW_DEBUG_NOTIF("Unknown notification: "
+ "subtype=%d,flags=0x%2x,size=%d\n",
+ notif->subtype, notif->flags, notif->size);
}
}
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index cf373625fc7..98122f3a4bc 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -950,16 +950,8 @@ wv_82593_cmd(struct net_device * dev,
static inline int
wv_diag(struct net_device * dev)
{
- int ret = FALSE;
-
- if(wv_82593_cmd(dev, "wv_diag(): diagnose",
- OP0_DIAGNOSE, SR0_DIAGNOSE_PASSED))
- ret = TRUE;
-
-#ifdef DEBUG_CONFIG_ERRORS
- printk(KERN_INFO "wavelan_cs: i82593 Self Test failed!\n");
-#endif
- return(ret);
+ return(wv_82593_cmd(dev, "wv_diag(): diagnose",
+ OP0_DIAGNOSE, SR0_DIAGNOSE_PASSED));
} /* wv_diag */
/*------------------------------------------------------------------*/
@@ -3604,8 +3596,8 @@ wv_82593_config(struct net_device * dev)
cfblk.lin_prio = 0; /* conform to 802.3 backoff algoritm */
cfblk.exp_prio = 5; /* conform to 802.3 backoff algoritm */
cfblk.bof_met = 1; /* conform to 802.3 backoff algoritm */
- cfblk.ifrm_spc = 0x20; /* 32 bit times interframe spacing */
- cfblk.slottim_low = 0x20; /* 32 bit times slot time */
+ cfblk.ifrm_spc = 0x20 >> 4; /* 32 bit times interframe spacing */
+ cfblk.slottim_low = 0x20 >> 5; /* 32 bit times slot time */
cfblk.slottim_hi = 0x0;
cfblk.max_retr = 15;
cfblk.prmisc = ((lp->promiscuous) ? TRUE: FALSE); /* Promiscuous mode */