aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/mlx4/en_tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/mlx4/en_tx.c')
-rw-r--r--drivers/net/mlx4/en_tx.c88
1 files changed, 48 insertions, 40 deletions
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 8592f8fb847..4afd5993e31 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -203,19 +203,21 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
/* Optimize the common case when there are no wraparounds */
if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
- if (tx_info->linear) {
- pci_unmap_single(mdev->pdev,
- (dma_addr_t) be64_to_cpu(data->addr),
+ if (!tx_info->inl) {
+ if (tx_info->linear) {
+ pci_unmap_single(mdev->pdev,
+ (dma_addr_t) be64_to_cpu(data->addr),
be32_to_cpu(data->byte_count),
PCI_DMA_TODEVICE);
- ++data;
- }
+ ++data;
+ }
- for (i = 0; i < frags; i++) {
- frag = &skb_shinfo(skb)->frags[i];
- pci_unmap_page(mdev->pdev,
- (dma_addr_t) be64_to_cpu(data[i].addr),
- frag->size, PCI_DMA_TODEVICE);
+ for (i = 0; i < frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ pci_unmap_page(mdev->pdev,
+ (dma_addr_t) be64_to_cpu(data[i].addr),
+ frag->size, PCI_DMA_TODEVICE);
+ }
}
/* Stamp the freed descriptor */
for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
@@ -224,27 +226,29 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
}
} else {
- if ((void *) data >= end) {
- data = (struct mlx4_wqe_data_seg *)
- (ring->buf + ((void *) data - end));
- }
+ if (!tx_info->inl) {
+ if ((void *) data >= end) {
+ data = (struct mlx4_wqe_data_seg *)
+ (ring->buf + ((void *) data - end));
+ }
- if (tx_info->linear) {
- pci_unmap_single(mdev->pdev,
- (dma_addr_t) be64_to_cpu(data->addr),
+ if (tx_info->linear) {
+ pci_unmap_single(mdev->pdev,
+ (dma_addr_t) be64_to_cpu(data->addr),
be32_to_cpu(data->byte_count),
PCI_DMA_TODEVICE);
- ++data;
- }
+ ++data;
+ }
- for (i = 0; i < frags; i++) {
- /* Check for wraparound before unmapping */
- if ((void *) data >= end)
- data = (struct mlx4_wqe_data_seg *) ring->buf;
- frag = &skb_shinfo(skb)->frags[i];
- pci_unmap_page(mdev->pdev,
+ for (i = 0; i < frags; i++) {
+ /* Check for wraparound before unmapping */
+ if ((void *) data >= end)
+ data = (struct mlx4_wqe_data_seg *) ring->buf;
+ frag = &skb_shinfo(skb)->frags[i];
+ pci_unmap_page(mdev->pdev,
(dma_addr_t) be64_to_cpu(data->addr),
frag->size, PCI_DMA_TODEVICE);
+ }
}
/* Stamp the freed descriptor */
for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
@@ -379,8 +383,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
/* Wakeup Tx queue if this ring stopped it */
if (unlikely(ring->blocked)) {
- if (((u32) (ring->prod - ring->cons) <=
- ring->size - HEADROOM - MAX_DESC_TXBBS) && !cq->armed) {
+ if ((u32) (ring->prod - ring->cons) <=
+ ring->size - HEADROOM - MAX_DESC_TXBBS) {
/* TODO: support multiqueue netdevs. Currently, we block
* when *any* ring is full. Note that:
@@ -404,14 +408,11 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq)
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
- spin_lock_irq(&ring->comp_lock);
- cq->armed = 0;
+ if (!spin_trylock(&ring->comp_lock))
+ return;
mlx4_en_process_tx_cq(cq->dev, cq);
- if (ring->blocked)
- mlx4_en_arm_cq(priv, cq);
- else
- mod_timer(&cq->timer, jiffies + 1);
- spin_unlock_irq(&ring->comp_lock);
+ mod_timer(&cq->timer, jiffies + 1);
+ spin_unlock(&ring->comp_lock);
}
@@ -424,8 +425,10 @@ void mlx4_en_poll_tx_cq(unsigned long data)
INC_PERF_COUNTER(priv->pstats.tx_poll);
- netif_tx_lock(priv->dev);
- spin_lock_irq(&ring->comp_lock);
+ if (!spin_trylock(&ring->comp_lock)) {
+ mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
+ return;
+ }
mlx4_en_process_tx_cq(cq->dev, cq);
inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
@@ -435,8 +438,7 @@ void mlx4_en_poll_tx_cq(unsigned long data)
if (inflight && priv->port_up)
mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
- spin_unlock_irq(&ring->comp_lock);
- netif_tx_unlock(priv->dev);
+ spin_unlock(&ring->comp_lock);
}
static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
@@ -479,7 +481,10 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
/* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
- mlx4_en_process_tx_cq(priv->dev, cq);
+ if (spin_trylock(&ring->comp_lock)) {
+ mlx4_en_process_tx_cq(priv->dev, cq);
+ spin_unlock(&ring->comp_lock);
+ }
}
static void *get_frag_ptr(struct sk_buff *skb)
@@ -789,8 +794,11 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
}
- } else
+ tx_info->inl = 0;
+ } else {
build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
+ tx_info->inl = 1;
+ }
ring->prod += nr_txbb;