aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/cxgb3/sge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3/sge.c')
-rw-r--r--drivers/net/cxgb3/sge.c38
1 files changed, 25 insertions, 13 deletions
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 49f3de79118..04820590374 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -480,6 +480,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
if (q->pend_cred >= q->credits / 4) {
q->pend_cred = 0;
+ wmb();
t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
}
}
@@ -1285,7 +1286,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
/*
* We do not use Tx completion interrupts to free DMAd Tx packets.
- * This is good for performamce but means that we rely on new Tx
+ * This is good for performance but means that we rely on new Tx
* packets arriving to run the destructors of completed packets,
* which open up space in their sockets' send queues. Sometimes
* we do not get such new packets causing Tx to stall. A single
@@ -2079,6 +2080,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
struct sge_fl *fl, int len, int complete)
{
struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+ struct port_info *pi = netdev_priv(qs->netdev);
struct sk_buff *skb = NULL;
struct cpl_rx_pkt *cpl;
struct skb_frag_struct *rx_frag;
@@ -2116,11 +2118,18 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
if (!nr_frags) {
offset = 2 + sizeof(struct cpl_rx_pkt);
- qs->lro_va = sd->pg_chunk.va + 2;
- }
- len -= offset;
+ cpl = qs->lro_va = sd->pg_chunk.va + 2;
+
+ if ((pi->rx_offload & T3_RX_CSUM) &&
+ cpl->csum_valid && cpl->csum == htons(0xffff)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
+ } else
+ skb->ip_summed = CHECKSUM_NONE;
+ } else
+ cpl = qs->lro_va;
- prefetch(qs->lro_va);
+ len -= offset;
rx_frag += nr_frags;
rx_frag->page = sd->pg_chunk.page;
@@ -2136,12 +2145,8 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
return;
skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- cpl = qs->lro_va;
if (unlikely(cpl->vlan_valid)) {
- struct net_device *dev = qs->netdev;
- struct port_info *pi = netdev_priv(dev);
struct vlan_group *grp = pi->vlan_grp;
if (likely(grp != NULL)) {
@@ -2282,11 +2287,14 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
while (likely(budget_left && is_new_response(r, q))) {
int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
struct sk_buff *skb = NULL;
- u32 len, flags = ntohl(r->flags);
- __be32 rss_hi = *(const __be32 *)r,
- rss_lo = r->rss_hdr.rss_hash_val;
+ u32 len, flags;
+ __be32 rss_hi, rss_lo;
+ rmb();
eth = r->rss_hdr.opcode == CPL_RX_PKT;
+ rss_hi = *(const __be32 *)r;
+ rss_lo = r->rss_hdr.rss_hash_val;
+ flags = ntohl(r->flags);
if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
@@ -2497,7 +2505,10 @@ static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
refill_rspq(adap, q, q->credits);
q->credits = 0;
}
- } while (is_new_response(r, q) && is_pure_response(r));
+ if (!is_new_response(r, q))
+ break;
+ rmb();
+ } while (is_pure_response(r));
if (sleeping)
check_ring_db(adap, qs, sleeping);
@@ -2531,6 +2542,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
if (!is_new_response(r, q))
return -1;
+ rmb();
if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));