aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-05-19 18:43:50 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-05-19 18:43:50 -0700
commitfbb5ba92766a0a7803635f053220c325d26def9c (patch)
tree4cf6d9ed725b5d1254d4d29f5750c37676173490 /drivers
parent4fe1103201057e74f630b1cb8d8d49bd6ce0e666 (diff)
parentbc8a5397433e4effbaddfa7e462d10b3c060cabb (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: ipv4: make default for INET_LRO consistent with help text net: fix skb_seq_read returning wrong offset/length for page frag data pkt_sched: gen_estimator: use 64 bit intermediate counters for bps be2net: add two new pci device ids to pci device table sch_teql: should not dereference skb after ndo_start_xmit() tcp: fix MSG_PEEK race check Doc: fixed descriptions on /proc/sys/net/core/* and /proc/sys/net/unix/* Neterion: *FIFO1_DMA_ERR set twice, should 2nd be *FIFO2_DMA_ERR? mv643xx_eth: fix PPC DMA breakage bonding: fix link down handling in 802.3ad mode bridge: fix initial packet flood if !STP bridge: relay bridge multicast pkgs if !STP NET: Meth: Fix unsafe mix of irq and non-irq spinlocks. mlx4_en: Fix not deleted napi structures ipconfig: handle case of delayed DHCP server netpoll: don't dereference NULL dev from np wimax/i2400m: fix device crash: fix optimization in _roq_queue_update_ws
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/benet/be.h14
-rw-r--r--drivers/net/benet/be_main.c10
-rw-r--r--drivers/net/bonding/bond_3ad.c11
-rw-r--r--drivers/net/meth.c27
-rw-r--r--drivers/net/mlx4/en_cq.c4
-rw-r--r--drivers/net/mv643xx_eth.c41
-rw-r--r--drivers/net/vxge/vxge-traffic.c2
-rw-r--r--drivers/net/wimax/i2400m/rx.c5
8 files changed, 69 insertions, 45 deletions
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index c49ddd08b2a..b4bb06fdf30 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -35,8 +35,22 @@
#define DRV_VER "2.0.348"
#define DRV_NAME "be2net"
#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
+#define OC_NAME "Emulex OneConnect 10Gbps NIC"
#define DRV_DESC BE_NAME "Driver"
+#define BE_VENDOR_ID 0x19a2
+#define BE_DEVICE_ID1 0x211
+#define OC_DEVICE_ID1 0x700
+#define OC_DEVICE_ID2 0x701
+
+static inline char *nic_name(struct pci_dev *pdev)
+{
+ if (pdev->device == OC_DEVICE_ID1 || pdev->device == OC_DEVICE_ID2)
+ return OC_NAME;
+ else
+ return BE_NAME;
+}
+
/* Number of bytes of an RX frame that are copied to skb->data */
#define BE_HDR_LEN 64
#define BE_MAX_JUMBO_FRAME_SIZE 9018
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 30d0c81c989..5c378b5e8e4 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -28,10 +28,10 @@ static unsigned int rx_frag_size = 2048;
module_param(rx_frag_size, uint, S_IRUGO);
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
-#define BE_VENDOR_ID 0x19a2
-#define BE2_DEVICE_ID_1 0x0211
static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
- { PCI_DEVICE(BE_VENDOR_ID, BE2_DEVICE_ID_1) },
+ { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
+ { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
+ { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -1859,7 +1859,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status != 0)
goto stats_clean;
- dev_info(&pdev->dev, BE_NAME " port %d\n", adapter->port_num);
+ dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
return 0;
stats_clean:
@@ -1873,7 +1873,7 @@ rel_reg:
disable_dev:
pci_disable_device(pdev);
do_none:
- dev_warn(&pdev->dev, BE_NAME " initialization failed\n");
+ dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
return status;
}
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 8c2e5ab51f0..faf094abef7 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1465,6 +1465,12 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
return best;
}
+static int agg_device_up(const struct aggregator *agg)
+{
+ return (netif_running(agg->slave->dev) &&
+ netif_carrier_ok(agg->slave->dev));
+}
+
/**
* ad_agg_selection_logic - select an aggregation group for a team
* @aggregator: the aggregator we're looking at
@@ -1496,14 +1502,13 @@ static void ad_agg_selection_logic(struct aggregator *agg)
struct port *port;
origin = agg;
-
active = __get_active_agg(agg);
- best = active;
+ best = (active && agg_device_up(active)) ? active : NULL;
do {
agg->is_active = 0;
- if (agg->num_of_ports)
+ if (agg->num_of_ports && agg_device_up(agg))
best = ad_agg_selection_test(best, agg);
} while ((agg = __get_next_agg(agg)));
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index aa08987f6e8..dbd3436912b 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -127,11 +127,11 @@ static unsigned long mdio_read(struct meth_private *priv, unsigned long phyreg)
static int mdio_probe(struct meth_private *priv)
{
int i;
- unsigned long p2, p3;
+ unsigned long p2, p3, flags;
/* check if phy is detected already */
if(priv->phy_addr>=0&&priv->phy_addr<32)
return 0;
- spin_lock(&priv->meth_lock);
+ spin_lock_irqsave(&priv->meth_lock, flags);
for (i=0;i<32;++i){
priv->phy_addr=i;
p2=mdio_read(priv,2);
@@ -157,7 +157,7 @@ static int mdio_probe(struct meth_private *priv)
break;
}
}
- spin_unlock(&priv->meth_lock);
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
if(priv->phy_addr<32) {
return 0;
}
@@ -373,14 +373,14 @@ static int meth_release(struct net_device *dev)
static void meth_rx(struct net_device* dev, unsigned long int_status)
{
struct sk_buff *skb;
- unsigned long status;
+ unsigned long status, flags;
struct meth_private *priv = netdev_priv(dev);
unsigned long fifo_rptr = (int_status & METH_INT_RX_RPTR_MASK) >> 8;
- spin_lock(&priv->meth_lock);
+ spin_lock_irqsave(&priv->meth_lock, flags);
priv->dma_ctrl &= ~METH_DMA_RX_INT_EN;
mace->eth.dma_ctrl = priv->dma_ctrl;
- spin_unlock(&priv->meth_lock);
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
if (int_status & METH_INT_RX_UNDERFLOW) {
fifo_rptr = (fifo_rptr - 1) & 0x0f;
@@ -452,12 +452,12 @@ static void meth_rx(struct net_device* dev, unsigned long int_status)
mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write];
ADVANCE_RX_PTR(priv->rx_write);
}
- spin_lock(&priv->meth_lock);
+ spin_lock_irqsave(&priv->meth_lock, flags);
/* In case there was underflow, and Rx DMA was disabled */
priv->dma_ctrl |= METH_DMA_RX_INT_EN | METH_DMA_RX_EN;
mace->eth.dma_ctrl = priv->dma_ctrl;
mace->eth.int_stat = METH_INT_RX_THRESHOLD;
- spin_unlock(&priv->meth_lock);
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
}
static int meth_tx_full(struct net_device *dev)
@@ -470,11 +470,11 @@ static int meth_tx_full(struct net_device *dev)
static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status)
{
struct meth_private *priv = netdev_priv(dev);
- unsigned long status;
+ unsigned long status, flags;
struct sk_buff *skb;
unsigned long rptr = (int_status&TX_INFO_RPTR) >> 16;
- spin_lock(&priv->meth_lock);
+ spin_lock_irqsave(&priv->meth_lock, flags);
/* Stop DMA notification */
priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN);
@@ -527,12 +527,13 @@ static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status)
}
mace->eth.int_stat = METH_INT_TX_EMPTY | METH_INT_TX_PKT;
- spin_unlock(&priv->meth_lock);
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
}
static void meth_error(struct net_device* dev, unsigned status)
{
struct meth_private *priv = netdev_priv(dev);
+ unsigned long flags;
printk(KERN_WARNING "meth: error status: 0x%08x\n",status);
/* check for errors too... */
@@ -547,7 +548,7 @@ static void meth_error(struct net_device* dev, unsigned status)
printk(KERN_WARNING "meth: Rx overflow\n");
if (status & (METH_INT_RX_UNDERFLOW)) {
printk(KERN_WARNING "meth: Rx underflow\n");
- spin_lock(&priv->meth_lock);
+ spin_lock_irqsave(&priv->meth_lock, flags);
mace->eth.int_stat = METH_INT_RX_UNDERFLOW;
/* more underflow interrupts will be delivered,
* effectively throwing us into an infinite loop.
@@ -555,7 +556,7 @@ static void meth_error(struct net_device* dev, unsigned status)
priv->dma_ctrl &= ~METH_DMA_RX_EN;
mace->eth.dma_ctrl = priv->dma_ctrl;
DPRINTK("Disabled meth Rx DMA temporarily\n");
- spin_unlock(&priv->meth_lock);
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
}
mace->eth.int_stat = METH_INT_ERROR;
}
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
index 91f50de84be..a276125b709 100644
--- a/drivers/net/mlx4/en_cq.c
+++ b/drivers/net/mlx4/en_cq.c
@@ -125,8 +125,10 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
if (cq->is_tx)
del_timer(&cq->timer);
- else
+ else {
napi_disable(&cq->napi);
+ netif_napi_del(&cq->napi);
+ }
mlx4_cq_free(mdev->dev, &cq->mcq);
}
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index a400d7115f7..6bb5af35eda 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -569,7 +569,7 @@ static int rxq_process(struct rx_queue *rxq, int budget)
if (rxq->rx_curr_desc == rxq->rx_ring_size)
rxq->rx_curr_desc = 0;
- dma_unmap_single(NULL, rx_desc->buf_ptr,
+ dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
rx_desc->buf_size, DMA_FROM_DEVICE);
rxq->rx_desc_count--;
rx++;
@@ -678,8 +678,9 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
rx_desc = rxq->rx_desc_area + rx;
- rx_desc->buf_ptr = dma_map_single(NULL, skb->data,
- mp->skb_size, DMA_FROM_DEVICE);
+ rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
+ skb->data, mp->skb_size,
+ DMA_FROM_DEVICE);
rx_desc->buf_size = mp->skb_size;
rxq->rx_skb[rx] = skb;
wmb();
@@ -718,6 +719,7 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
int nr_frags = skb_shinfo(skb)->nr_frags;
int frag;
@@ -746,10 +748,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
desc->l4i_chk = 0;
desc->byte_cnt = this_frag->size;
- desc->buf_ptr = dma_map_page(NULL, this_frag->page,
- this_frag->page_offset,
- this_frag->size,
- DMA_TO_DEVICE);
+ desc->buf_ptr = dma_map_page(mp->dev->dev.parent,
+ this_frag->page,
+ this_frag->page_offset,
+ this_frag->size, DMA_TO_DEVICE);
}
}
@@ -826,7 +828,8 @@ no_csum:
desc->l4i_chk = l4i_chk;
desc->byte_cnt = length;
- desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
+ desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
+ length, DMA_TO_DEVICE);
__skb_queue_tail(&txq->tx_skb, skb);
@@ -956,10 +959,10 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
}
if (cmd_sts & TX_FIRST_DESC) {
- dma_unmap_single(NULL, desc->buf_ptr,
+ dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
desc->byte_cnt, DMA_TO_DEVICE);
} else {
- dma_unmap_page(NULL, desc->buf_ptr,
+ dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr,
desc->byte_cnt, DMA_TO_DEVICE);
}
@@ -1894,9 +1897,9 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
mp->rx_desc_sram_size);
rxq->rx_desc_dma = mp->rx_desc_sram_addr;
} else {
- rxq->rx_desc_area = dma_alloc_coherent(NULL, size,
- &rxq->rx_desc_dma,
- GFP_KERNEL);
+ rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
+ size, &rxq->rx_desc_dma,
+ GFP_KERNEL);
}
if (rxq->rx_desc_area == NULL) {
@@ -1947,7 +1950,7 @@ out_free:
if (index == 0 && size <= mp->rx_desc_sram_size)
iounmap(rxq->rx_desc_area);
else
- dma_free_coherent(NULL, size,
+ dma_free_coherent(mp->dev->dev.parent, size,
rxq->rx_desc_area,
rxq->rx_desc_dma);
@@ -1979,7 +1982,7 @@ static void rxq_deinit(struct rx_queue *rxq)
rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
iounmap(rxq->rx_desc_area);
else
- dma_free_coherent(NULL, rxq->rx_desc_area_size,
+ dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
rxq->rx_desc_area, rxq->rx_desc_dma);
kfree(rxq->rx_skb);
@@ -2007,9 +2010,9 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
mp->tx_desc_sram_size);
txq->tx_desc_dma = mp->tx_desc_sram_addr;
} else {
- txq->tx_desc_area = dma_alloc_coherent(NULL, size,
- &txq->tx_desc_dma,
- GFP_KERNEL);
+ txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
+ size, &txq->tx_desc_dma,
+ GFP_KERNEL);
}
if (txq->tx_desc_area == NULL) {
@@ -2053,7 +2056,7 @@ static void txq_deinit(struct tx_queue *txq)
txq->tx_desc_area_size <= mp->tx_desc_sram_size)
iounmap(txq->tx_desc_area);
else
- dma_free_coherent(NULL, txq->tx_desc_area_size,
+ dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
txq->tx_desc_area, txq->tx_desc_dma);
}
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 7be0ae10d69..c2eeac4125f 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -115,7 +115,7 @@ enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
- VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR), 0, 32),
+ VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
&vp_reg->kdfcctl_errors_mask);
__vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 02419bfd64b..f9fc3890232 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -819,10 +819,9 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
roq_data = (struct i2400m_roq_data *) &skb->cb;
i2400m_net_erx(i2400m, skb, roq_data->cs);
}
- else {
+ else
__i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
- __i2400m_roq_update_ws(i2400m, roq, sn + 1);
- }
+ __i2400m_roq_update_ws(i2400m, roq, sn + 1);
i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
old_ws, len, sn, nsn, roq->ws);
}