aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatt Mackall <mpm@selenic.com>2005-08-11 19:25:54 -0700
committerDavid S. Miller <davem@davemloft.net>2005-08-11 19:25:54 -0700
commit0db1d6fc1ea051af49ebe03c503d23996a7c5bbb (patch)
tree6afab02002a46b045a3b8769342ad277402f0d95
parentf0d3459d0722782c7d9d0e35a1ed0815e75fcde5 (diff)
[NETPOLL]: add retry timeout
Add limited retry logic to netpoll_send_skb Each time we attempt to send, decrement our per-device retry counter. On every successful send, we reset the counter. We delay 50us between attempts with up to 20000 retries for a total of 1 second. After we've exhausted our retries, subsequent failed attempts will try only once until reset by success. Signed-off-by: Matt Mackall <mpm@selenic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/netpoll.h1
-rw-r--r--net/core/netpoll.c13
2 files changed, 11 insertions, 3 deletions
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index bcd0ac33f59..be68d94b03d 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -26,6 +26,7 @@ struct netpoll {
struct netpoll_info {
spinlock_t poll_lock;
int poll_owner;
+ int tries;
int rx_flags;
spinlock_t rx_lock;
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 59ed186e4f4..d09affdbad3 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -33,6 +33,7 @@
#define MAX_UDP_CHUNK 1460
#define MAX_SKBS 32
#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
+#define MAX_RETRIES 20000
static DEFINE_SPINLOCK(skb_list_lock);
static int nr_skbs;
@@ -265,7 +266,8 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
return;
}
- while (1) {
+ do {
+ npinfo->tries--;
spin_lock(&np->dev->xmit_lock);
np->dev->xmit_lock_owner = smp_processor_id();
@@ -277,6 +279,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
np->dev->xmit_lock_owner = -1;
spin_unlock(&np->dev->xmit_lock);
netpoll_poll(np);
+ udelay(50);
continue;
}
@@ -285,12 +288,15 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
spin_unlock(&np->dev->xmit_lock);
/* success */
- if(!status)
+ if(!status) {
+ npinfo->tries = MAX_RETRIES; /* reset */
return;
+ }
/* transmit busy */
netpoll_poll(np);
- }
+ udelay(50);
+ } while (npinfo->tries > 0);
}
void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
@@ -642,6 +648,7 @@ int netpoll_setup(struct netpoll *np)
npinfo->rx_np = NULL;
npinfo->poll_lock = SPIN_LOCK_UNLOCKED;
npinfo->poll_owner = -1;
+ npinfo->tries = MAX_RETRIES;
npinfo->rx_lock = SPIN_LOCK_UNLOCKED;
} else
npinfo = ndev->npinfo;