aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/cxgb3/sge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3/sge.c')
-rw-r--r--drivers/net/cxgb3/sge.c29
1 files changed, 12 insertions, 17 deletions
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 73d569e758e..29c79eb43be 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -355,7 +355,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
(*d->pg_chunk.p_cnt)--;
if (!*d->pg_chunk.p_cnt)
pci_unmap_page(pdev,
- pci_unmap_addr(&d->pg_chunk, mapping),
+ d->pg_chunk.mapping,
q->alloc_size, PCI_DMA_FROMDEVICE);
put_page(d->pg_chunk.page);
@@ -454,7 +454,7 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
q->pg_chunk.offset = 0;
mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
0, q->alloc_size, PCI_DMA_FROMDEVICE);
- pci_unmap_addr_set(&q->pg_chunk, mapping, mapping);
+ q->pg_chunk.mapping = mapping;
}
sd->pg_chunk = q->pg_chunk;
@@ -511,8 +511,7 @@ static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
nomem: q->alloc_failed++;
break;
}
- mapping = pci_unmap_addr(&sd->pg_chunk, mapping) +
- sd->pg_chunk.offset;
+ mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
pci_unmap_addr_set(sd, dma_addr, mapping);
add_one_rx_chunk(mapping, d, q->gen);
@@ -882,7 +881,7 @@ recycle:
(*sd->pg_chunk.p_cnt)--;
if (!*sd->pg_chunk.p_cnt)
pci_unmap_page(adap->pdev,
- pci_unmap_addr(&sd->pg_chunk, mapping),
+ sd->pg_chunk.mapping,
fl->alloc_size,
PCI_DMA_FROMDEVICE);
if (!skb) {
@@ -1241,7 +1240,6 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
q = &qs->txq[TXQ_ETH];
txq = netdev_get_tx_queue(dev, qidx);
- spin_lock(&q->lock);
reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
credits = q->size - q->in_use;
@@ -1252,7 +1250,6 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
dev_err(&adap->pdev->dev,
"%s: Tx ring %u full while queue awake!\n",
dev->name, q->cntxt_id & 7);
- spin_unlock(&q->lock);
return NETDEV_TX_BUSY;
}
@@ -1286,9 +1283,6 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
if (vlan_tx_tag_present(skb) && pi->vlan_grp)
qs->port_stats[SGE_PSTAT_VLANINS]++;
- dev->trans_start = jiffies;
- spin_unlock(&q->lock);
-
/*
* We do not use Tx completion interrupts to free DMAd Tx packets.
* This is good for performamce but means that we rely on new Tx
@@ -2096,7 +2090,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
(*sd->pg_chunk.p_cnt)--;
if (!*sd->pg_chunk.p_cnt)
pci_unmap_page(adap->pdev,
- pci_unmap_addr(&sd->pg_chunk, mapping),
+ sd->pg_chunk.mapping,
fl->alloc_size,
PCI_DMA_FROMDEVICE);
@@ -2858,11 +2852,12 @@ static void sge_timer_tx(unsigned long data)
unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
unsigned long next_period;
- if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
- tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
- TX_RECLAIM_TIMER_CHUNK);
- spin_unlock(&qs->txq[TXQ_ETH].lock);
+ if (__netif_tx_trylock(qs->tx_q)) {
+ tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
+ TX_RECLAIM_TIMER_CHUNK);
+ __netif_tx_unlock(qs->tx_q);
}
+
if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
TX_RECLAIM_TIMER_CHUNK);
@@ -2870,8 +2865,8 @@ static void sge_timer_tx(unsigned long data)
}
next_period = TX_RECLAIM_PERIOD >>
- (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
- TX_RECLAIM_TIMER_CHUNK);
+ (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
+ TX_RECLAIM_TIMER_CHUNK);
mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
}