aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/cxgb3/sge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3/sge.c')
-rw-r--r--drivers/net/cxgb3/sge.c315
1 files changed, 150 insertions, 165 deletions
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index d31791f6029..a7555cb3fa4 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -61,12 +61,19 @@
#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
#define SGE_RX_DROP_THRES 16
+#define RX_RECLAIM_PERIOD (HZ/4)
/*
+ * Max number of Rx buffers we replenish at a time.
+ */
+#define MAX_RX_REFILL 16U
+/*
* Period of the Tx buffer reclaim timer. This timer does not need to run
* frequently as Tx buffers are usually reclaimed by new Tx packets.
*/
#define TX_RECLAIM_PERIOD (HZ / 4)
+#define TX_RECLAIM_TIMER_CHUNK 64U
+#define TX_RECLAIM_CHUNK 16U
/* WR size in bytes */
#define WR_LEN (WR_FLITS * 8)
@@ -304,21 +311,25 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
* reclaim_completed_tx - reclaims completed Tx descriptors
* @adapter: the adapter
* @q: the Tx queue to reclaim completed descriptors from
+ * @chunk: maximum number of descriptors to reclaim
*
* Reclaims Tx descriptors that the SGE has indicated it has processed,
* and frees the associated buffers if possible. Called with the Tx
* queue's lock held.
*/
-static inline void reclaim_completed_tx(struct adapter *adapter,
- struct sge_txq *q)
+static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
+ struct sge_txq *q,
+ unsigned int chunk)
{
unsigned int reclaim = q->processed - q->cleaned;
+ reclaim = min(chunk, reclaim);
if (reclaim) {
free_tx_desc(adapter, q, reclaim);
q->cleaned += reclaim;
q->in_use -= reclaim;
}
+ return q->processed - q->cleaned;
}
/**
@@ -334,6 +345,18 @@ static inline int should_restart_tx(const struct sge_txq *q)
return q->in_use - r < (q->size >> 1);
}
+static void clear_rx_desc(const struct sge_fl *q, struct rx_sw_desc *d)
+{
+ if (q->use_pages) {
+ if (d->pg_chunk.page)
+ put_page(d->pg_chunk.page);
+ d->pg_chunk.page = NULL;
+ } else {
+ kfree_skb(d->skb);
+ d->skb = NULL;
+ }
+}
+
/**
* free_rx_bufs - free the Rx buffers on an SGE free list
* @pdev: the PCI device associated with the adapter
@@ -351,14 +374,7 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
q->buf_size, PCI_DMA_FROMDEVICE);
- if (q->use_pages) {
- if (d->pg_chunk.page)
- put_page(d->pg_chunk.page);
- d->pg_chunk.page = NULL;
- } else {
- kfree_skb(d->skb);
- d->skb = NULL;
- }
+ clear_rx_desc(q, d);
if (++cidx == q->size)
cidx = 0;
}
@@ -423,6 +439,14 @@ static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp,
return 0;
}
+static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
+{
+ if (q->pend_cred >= q->credits / 4) {
+ q->pend_cred = 0;
+ t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
+ }
+}
+
/**
* refill_fl - refill an SGE free-buffer list
* @adapter: the adapter
@@ -463,10 +487,7 @@ nomem: q->alloc_failed++;
err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
adap->pdev);
if (unlikely(err)) {
- if (!q->use_pages) {
- kfree_skb(sd->skb);
- sd->skb = NULL;
- }
+ clear_rx_desc(q, sd);
break;
}
@@ -478,19 +499,19 @@ nomem: q->alloc_failed++;
sd = q->sdesc;
d = q->desc;
}
- q->credits++;
count++;
}
- wmb();
- if (likely(count))
- t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
+
+ q->credits += count;
+ q->pend_cred += count;
+ ring_fl_db(adap, q);
return count;
}
static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
{
- refill_fl(adap, fl, min(16U, fl->size - fl->credits),
+ refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
GFP_ATOMIC | __GFP_COMP);
}
@@ -515,13 +536,15 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
wmb();
to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
- q->credits++;
if (++q->pidx == q->size) {
q->pidx = 0;
q->gen ^= 1;
}
- t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
+
+ q->credits++;
+ q->pend_cred++;
+ ring_fl_db(adap, q);
}
/**
@@ -585,8 +608,8 @@ static void t3_reset_qset(struct sge_qset *q)
memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
q->txq_stopped = 0;
q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
- kfree(q->lro_frag_tbl);
- q->lro_nfrags = q->lro_frag_len = 0;
+ q->rx_reclaim_timer.function = NULL;
+ q->lro_frag_tbl.nr_frags = q->lro_frag_tbl.len = 0;
}
@@ -733,7 +756,9 @@ recycle:
return skb;
}
- if (unlikely(fl->credits < drop_thres))
+ if (unlikely(fl->credits < drop_thres) &&
+ refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
+ GFP_ATOMIC | __GFP_COMP) == 0)
goto recycle;
use_orig_buf:
@@ -813,14 +838,15 @@ recycle:
len - SGE_RX_PULL_LEN);
newskb->len = len;
newskb->data_len = len - SGE_RX_PULL_LEN;
+ newskb->truesize += newskb->data_len;
} else {
skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
sd->pg_chunk.page,
sd->pg_chunk.offset, len);
newskb->len += len;
newskb->data_len += len;
+ newskb->truesize += len;
}
- newskb->truesize += newskb->data_len;
fl->credits--;
/*
@@ -1161,7 +1187,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
txq = netdev_get_tx_queue(dev, qidx);
spin_lock(&q->lock);
- reclaim_completed_tx(adap, q);
+ reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
credits = q->size - q->in_use;
ndesc = calc_tx_descs(skb);
@@ -1570,7 +1596,7 @@ static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
spin_lock(&q->lock);
- again:reclaim_completed_tx(adap, q);
+again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
if (unlikely(ret)) {
@@ -1612,7 +1638,7 @@ static void restart_offloadq(unsigned long data)
struct adapter *adap = pi->adapter;
spin_lock(&q->lock);
- again:reclaim_completed_tx(adap, q);
+again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
while ((skb = skb_peek(&q->sendq)) != NULL) {
unsigned int gen, pidx;
@@ -1938,6 +1964,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
skb->ip_summed = CHECKSUM_NONE;
+ skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
if (unlikely(p->vlan_valid)) {
struct vlan_group *grp = pi->vlan_grp;
@@ -1945,10 +1972,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
qs->port_stats[SGE_PSTAT_VLANEX]++;
if (likely(grp))
if (lro)
- lro_vlan_hwaccel_receive_skb(&qs->lro_mgr, skb,
- grp,
- ntohs(p->vlan),
- p);
+ vlan_gro_receive(&qs->napi, grp,
+ ntohs(p->vlan), skb);
else {
if (unlikely(pi->iscsi_ipv4addr &&
is_arp(skb))) {
@@ -1965,7 +1990,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
dev_kfree_skb_any(skb);
} else if (rq->polling) {
if (lro)
- lro_receive_skb(&qs->lro_mgr, skb, p);
+ napi_gro_receive(&qs->napi, skb);
else {
if (unlikely(pi->iscsi_ipv4addr && is_arp(skb)))
cxgb3_arp_process(adap, skb);
@@ -1981,59 +2006,6 @@ static inline int is_eth_tcp(u32 rss)
}
/**
- * lro_frame_ok - check if an ingress packet is eligible for LRO
- * @p: the CPL header of the packet
- *
- * Returns true if a received packet is eligible for LRO.
- * The following conditions must be true:
- * - packet is TCP/IP Ethernet II (checked elsewhere)
- * - not an IP fragment
- * - no IP options
- * - TCP/IP checksums are correct
- * - the packet is for this host
- */
-static inline int lro_frame_ok(const struct cpl_rx_pkt *p)
-{
- const struct ethhdr *eh = (struct ethhdr *)(p + 1);
- const struct iphdr *ih = (struct iphdr *)(eh + 1);
-
- return (*((u8 *)p + 1) & 0x90) == 0x10 && p->csum == htons(0xffff) &&
- eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2);
-}
-
-static int t3_get_lro_header(void **eh, void **iph, void **tcph,
- u64 *hdr_flags, void *priv)
-{
- const struct cpl_rx_pkt *cpl = priv;
-
- if (!lro_frame_ok(cpl))
- return -1;
-
- *eh = (struct ethhdr *)(cpl + 1);
- *iph = (struct iphdr *)((struct ethhdr *)*eh + 1);
- *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1);
-
- *hdr_flags = LRO_IPV4 | LRO_TCP;
- return 0;
-}
-
-static int t3_get_skb_header(struct sk_buff *skb,
- void **iph, void **tcph, u64 *hdr_flags,
- void *priv)
-{
- void *eh;
-
- return t3_get_lro_header(&eh, iph, tcph, hdr_flags, priv);
-}
-
-static int t3_get_frag_header(struct skb_frag_struct *frag, void **eh,
- void **iph, void **tcph, u64 *hdr_flags,
- void *priv)
-{
- return t3_get_lro_header(eh, iph, tcph, hdr_flags, priv);
-}
-
-/**
* lro_add_page - add a page chunk to an LRO session
* @adap: the adapter
* @qs: the associated queue set
@@ -2049,8 +2021,9 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
{
struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
struct cpl_rx_pkt *cpl;
- struct skb_frag_struct *rx_frag = qs->lro_frag_tbl;
- int nr_frags = qs->lro_nfrags, frag_len = qs->lro_frag_len;
+ struct skb_frag_struct *rx_frag = qs->lro_frag_tbl.frags;
+ int nr_frags = qs->lro_frag_tbl.nr_frags;
+ int frag_len = qs->lro_frag_tbl.len;
int offset = 0;
if (!nr_frags) {
@@ -2064,18 +2037,20 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
fl->buf_size, PCI_DMA_FROMDEVICE);
+ prefetch(&qs->lro_frag_tbl);
+
rx_frag += nr_frags;
rx_frag->page = sd->pg_chunk.page;
rx_frag->page_offset = sd->pg_chunk.offset + offset;
rx_frag->size = len;
frag_len += len;
- qs->lro_nfrags++;
- qs->lro_frag_len = frag_len;
+ qs->lro_frag_tbl.nr_frags++;
+ qs->lro_frag_tbl.len = frag_len;
if (!complete)
return;
- qs->lro_nfrags = qs->lro_frag_len = 0;
+ qs->lro_frag_tbl.ip_summed = CHECKSUM_UNNECESSARY;
cpl = qs->lro_va;
if (unlikely(cpl->vlan_valid)) {
@@ -2084,36 +2059,15 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
struct vlan_group *grp = pi->vlan_grp;
if (likely(grp != NULL)) {
- lro_vlan_hwaccel_receive_frags(&qs->lro_mgr,
- qs->lro_frag_tbl,
- frag_len, frag_len,
- grp, ntohs(cpl->vlan),
- cpl, 0);
- return;
+ vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan),
+ &qs->lro_frag_tbl);
+ goto out;
}
}
- lro_receive_frags(&qs->lro_mgr, qs->lro_frag_tbl,
- frag_len, frag_len, cpl, 0);
-}
+ napi_gro_frags(&qs->napi, &qs->lro_frag_tbl);
-/**
- * init_lro_mgr - initialize a LRO manager object
- * @lro_mgr: the LRO manager object
- */
-static void init_lro_mgr(struct sge_qset *qs, struct net_lro_mgr *lro_mgr)
-{
- lro_mgr->dev = qs->netdev;
- lro_mgr->features = LRO_F_NAPI;
- lro_mgr->frag_align_pad = NET_IP_ALIGN;
- lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
- lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
- lro_mgr->max_desc = T3_MAX_LRO_SES;
- lro_mgr->lro_arr = qs->lro_desc;
- lro_mgr->get_frag_header = t3_get_frag_header;
- lro_mgr->get_skb_header = t3_get_skb_header;
- lro_mgr->max_aggr = T3_MAX_LRO_MAX_PKTS;
- if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
- lro_mgr->max_aggr = MAX_SKB_FRAGS;
+out:
+ qs->lro_frag_tbl.nr_frags = qs->lro_frag_tbl.len = 0;
}
/**
@@ -2356,10 +2310,6 @@ next_fl:
}
deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
- lro_flush_all(&qs->lro_mgr);
- qs->port_stats[SGE_PSTAT_LRO_AGGR] = qs->lro_mgr.stats.aggregated;
- qs->port_stats[SGE_PSTAT_LRO_FLUSHED] = qs->lro_mgr.stats.flushed;
- qs->port_stats[SGE_PSTAT_LRO_NO_DESC] = qs->lro_mgr.stats.no_desc;
if (sleeping)
check_ring_db(adap, qs, sleeping);
@@ -2775,7 +2725,8 @@ irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
*/
void t3_sge_err_intr_handler(struct adapter *adapter)
{
- unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
+ unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
+ ~F_FLEMPTY;
if (status & SGE_PARERR)
CH_ALERT(adapter, "SGE parity error (0x%x)\n",
@@ -2805,13 +2756,13 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
}
/**
- * sge_timer_cb - perform periodic maintenance of an SGE qset
+ * sge_timer_tx - perform periodic maintenance of an SGE qset
* @data: the SGE queue set to maintain
*
* Runs periodically from a timer to perform maintenance of an SGE queue
* set. It performs two tasks:
*
- * a) Cleans up any completed Tx descriptors that may still be pending.
+ * Cleans up any completed Tx descriptors that may still be pending.
* Normal descriptor cleanup happens when new packets are added to a Tx
* queue so this timer is relatively infrequent and does any cleanup only
* if the Tx queue has not seen any new packets in a while. We make a
@@ -2821,51 +2772,87 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
* up). Since control queues use immediate data exclusively we don't
* bother cleaning them up here.
*
- * b) Replenishes Rx queues that have run out due to memory shortage.
- * Normally new Rx buffers are added when existing ones are consumed but
- * when out of memory a queue can become empty. We try to add only a few
- * buffers here, the queue will be replenished fully as these new buffers
- * are used up if memory shortage has subsided.
*/
-static void sge_timer_cb(unsigned long data)
+static void sge_timer_tx(unsigned long data)
{
- spinlock_t *lock;
struct sge_qset *qs = (struct sge_qset *)data;
- struct adapter *adap = qs->adap;
+ struct port_info *pi = netdev_priv(qs->netdev);
+ struct adapter *adap = pi->adapter;
+ unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
+ unsigned long next_period;
if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
- reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
+ tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
+ TX_RECLAIM_TIMER_CHUNK);
spin_unlock(&qs->txq[TXQ_ETH].lock);
}
if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
- reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
+ tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
+ TX_RECLAIM_TIMER_CHUNK);
spin_unlock(&qs->txq[TXQ_OFLD].lock);
}
- lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
- &adap->sge.qs[0].rspq.lock;
- if (spin_trylock_irq(lock)) {
- if (!napi_is_scheduled(&qs->napi)) {
- u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
-
- if (qs->fl[0].credits < qs->fl[0].size)
- __refill_fl(adap, &qs->fl[0]);
- if (qs->fl[1].credits < qs->fl[1].size)
- __refill_fl(adap, &qs->fl[1]);
-
- if (status & (1 << qs->rspq.cntxt_id)) {
- qs->rspq.starved++;
- if (qs->rspq.credits) {
- refill_rspq(adap, &qs->rspq, 1);
- qs->rspq.credits--;
- qs->rspq.restarted++;
- t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
- 1 << qs->rspq.cntxt_id);
- }
+
+ next_period = TX_RECLAIM_PERIOD >>
+ (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
+ TX_RECLAIM_TIMER_CHUNK);
+ mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
+}
+
+/*
+ * sge_timer_rx - perform periodic maintenance of an SGE qset
+ * @data: the SGE queue set to maintain
+ *
+ * a) Replenishes Rx queues that have run out due to memory shortage.
+ * Normally new Rx buffers are added when existing ones are consumed but
+ * when out of memory a queue can become empty. We try to add only a few
+ * buffers here, the queue will be replenished fully as these new buffers
+ * are used up if memory shortage has subsided.
+ *
+ * b) Return coalesced response queue credits in case a response queue is
+ * starved.
+ *
+ */
+static void sge_timer_rx(unsigned long data)
+{
+ spinlock_t *lock;
+ struct sge_qset *qs = (struct sge_qset *)data;
+ struct port_info *pi = netdev_priv(qs->netdev);
+ struct adapter *adap = pi->adapter;
+ u32 status;
+
+ lock = adap->params.rev > 0 ?
+ &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
+
+ if (!spin_trylock_irq(lock))
+ goto out;
+
+ if (napi_is_scheduled(&qs->napi))
+ goto unlock;
+
+ if (adap->params.rev < 4) {
+ status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
+
+ if (status & (1 << qs->rspq.cntxt_id)) {
+ qs->rspq.starved++;
+ if (qs->rspq.credits) {
+ qs->rspq.credits--;
+ refill_rspq(adap, &qs->rspq, 1);
+ qs->rspq.restarted++;
+ t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
+ 1 << qs->rspq.cntxt_id);
}
}
- spin_unlock_irq(lock);
}
- mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+
+ if (qs->fl[0].credits < qs->fl[0].size)
+ __refill_fl(adap, &qs->fl[0]);
+ if (qs->fl[1].credits < qs->fl[1].size)
+ __refill_fl(adap, &qs->fl[1]);
+
+unlock:
+ spin_unlock_irq(lock);
+out:
+ mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
}
/**
@@ -2906,10 +2893,10 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
{
int i, avail, ret = -ENOMEM;
struct sge_qset *q = &adapter->sge.qs[id];
- struct net_lro_mgr *lro_mgr = &q->lro_mgr;
init_qset_cntxt(q, id);
- setup_timer(&q->tx_reclaim_timer, sge_timer_cb, (unsigned long)q);
+ setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q);
+ setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q);
q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
sizeof(struct rx_desc),
@@ -2986,10 +2973,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
q->fl[0].order = FL0_PG_ORDER;
q->fl[1].order = FL1_PG_ORDER;
- q->lro_frag_tbl = kcalloc(MAX_FRAME_SIZE / FL1_PG_CHUNK_SIZE + 1,
- sizeof(struct skb_frag_struct),
- GFP_KERNEL);
- q->lro_nfrags = q->lro_frag_len = 0;
spin_lock_irq(&adapter->sge.reg_lock);
/* FL threshold comparison uses < */
@@ -3041,8 +3024,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
q->tx_q = netdevq;
t3_update_qset_coalesce(q, p);
- init_lro_mgr(q, lro_mgr);
-
avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
GFP_KERNEL | __GFP_COMP);
if (!avail) {
@@ -3064,6 +3045,8 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
V_NEWTIMER(q->rspq.holdoff_tmr));
mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+ mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
+
return 0;
err_unlock:
@@ -3088,6 +3071,8 @@ void t3_stop_sge_timers(struct adapter *adap)
if (q->tx_reclaim_timer.function)
del_timer_sync(&q->tx_reclaim_timer);
+ if (q->rx_reclaim_timer.function)
+ del_timer_sync(&q->rx_reclaim_timer);
}
}