aboutsummaryrefslogtreecommitdiff
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/devinet.c5
-rw-r--r--net/ipv4/ip_options.c7
-rw-r--r--net/ipv4/ipvs/ip_vs_xmit.c2
-rw-r--r--net/ipv4/tcp_bic.c2
-rw-r--r--net/ipv4/tcp_cubic.c48
-rw-r--r--net/ipv4/tcp_htcp.c10
-rw-r--r--net/ipv4/tcp_illinois.c8
-rw-r--r--net/ipv4/tcp_input.c47
-rw-r--r--net/ipv4/tcp_lp.c6
-rw-r--r--net/ipv4/tcp_vegas.c6
-rw-r--r--net/ipv4/tcp_vegas.h2
-rw-r--r--net/ipv4/tcp_veno.c6
-rw-r--r--net/ipv4/tcp_westwood.c7
-rw-r--r--net/ipv4/tcp_yeah.c4
14 files changed, 81 insertions, 79 deletions
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index abf6352f990..5b77bdaa57d 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1056,10 +1056,9 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
if (!in_dev) {
if (event == NETDEV_REGISTER) {
in_dev = inetdev_init(dev);
+ if (!in_dev)
+ return notifier_from_errno(-ENOMEM);
if (dev == &loopback_dev) {
- if (!in_dev)
- panic("devinet: "
- "Failed to create loopback\n");
IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
}
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 251346828cb..2f14745a9e1 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -513,11 +513,8 @@ void ip_options_undo(struct ip_options * opt)
static struct ip_options *ip_options_get_alloc(const int optlen)
{
- struct ip_options *opt = kmalloc(sizeof(*opt) + ((optlen + 3) & ~3),
- GFP_KERNEL);
- if (opt)
- memset(opt, 0, sizeof(*opt));
- return opt;
+ return kzalloc(sizeof(struct ip_options) + ((optlen + 3) & ~3),
+ GFP_KERNEL);
}
static int ip_options_get_finish(struct ip_options **optp,
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c
index 900ce29db38..666e080a74a 100644
--- a/net/ipv4/ipvs/ip_vs_xmit.c
+++ b/net/ipv4/ipvs/ip_vs_xmit.c
@@ -128,7 +128,7 @@ ip_vs_dst_reset(struct ip_vs_dest *dest)
#define IP_VS_XMIT(skb, rt) \
do { \
(skb)->ipvs_property = 1; \
- (skb)->ip_summed = CHECKSUM_NONE; \
+ skb_forward_csum(skb); \
NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, (skb), NULL, \
(rt)->u.dst.dev, dst_output); \
} while (0)
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index 519de091a94..4586211e375 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -206,7 +206,7 @@ static void bictcp_state(struct sock *sk, u8 new_state)
/* Track delayed acknowledgment ratio using sliding window
* ratio = (15*ratio + sample) / 16
*/
-static void bictcp_acked(struct sock *sk, u32 cnt, ktime_t last)
+static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index d17da30d82d..485d7ea35f7 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -246,38 +246,12 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
ca->cnt = 1;
}
-
-/* Keep track of minimum rtt */
-static inline void measure_delay(struct sock *sk)
-{
- const struct tcp_sock *tp = tcp_sk(sk);
- struct bictcp *ca = inet_csk_ca(sk);
- u32 delay;
-
- /* No time stamp */
- if (!(tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) ||
- /* Discard delay samples right after fast recovery */
- (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
- return;
-
- delay = (tcp_time_stamp - tp->rx_opt.rcv_tsecr)<<3;
- if (delay == 0)
- delay = 1;
-
- /* first time call or link delay decreases */
- if (ca->delay_min == 0 || ca->delay_min > delay)
- ca->delay_min = delay;
-}
-
static void bictcp_cong_avoid(struct sock *sk, u32 ack,
u32 in_flight, int data_acked)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
- if (data_acked)
- measure_delay(sk);
-
if (!tcp_is_cwnd_limited(sk, in_flight))
return;
@@ -334,17 +308,33 @@ static void bictcp_state(struct sock *sk, u8 new_state)
/* Track delayed acknowledgment ratio using sliding window
* ratio = (15*ratio + sample) / 16
*/
-static void bictcp_acked(struct sock *sk, u32 cnt, ktime_t last)
+static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct bictcp *ca = inet_csk_ca(sk);
+ u32 delay;
if (cnt > 0 && icsk->icsk_ca_state == TCP_CA_Open) {
- struct bictcp *ca = inet_csk_ca(sk);
cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT;
ca->delayed_ack += cnt;
}
-}
+ /* Some calls are for duplicates without timetamps */
+ if (rtt_us < 0)
+ return;
+
+ /* Discard delay samples right after fast recovery */
+ if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ)
+ return;
+
+ delay = usecs_to_jiffies(rtt_us) << 3;
+ if (delay == 0)
+ delay = 1;
+
+ /* first time call or link delay decreases */
+ if (ca->delay_min == 0 || ca->delay_min > delay)
+ ca->delay_min = delay;
+}
static struct tcp_congestion_ops cubictcp = {
.init = bictcp_init,
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 08a02e6045c..b66556c0a5b 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -76,12 +76,11 @@ static u32 htcp_cwnd_undo(struct sock *sk)
return max(tp->snd_cwnd, (tp->snd_ssthresh << 7) / ca->beta);
}
-static inline void measure_rtt(struct sock *sk)
+static inline void measure_rtt(struct sock *sk, u32 srtt)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
struct htcp *ca = inet_csk_ca(sk);
- u32 srtt = tp->srtt >> 3;
/* keep track of minimum RTT seen so far, minRTT is zero at first */
if (ca->minRTT > srtt || !ca->minRTT)
@@ -98,7 +97,7 @@ static inline void measure_rtt(struct sock *sk)
}
}
-static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, ktime_t last)
+static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, s32 rtt)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
@@ -108,6 +107,9 @@ static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, ktime_t
if (icsk->icsk_ca_state == TCP_CA_Open)
ca->pkts_acked = pkts_acked;
+ if (rtt > 0)
+ measure_rtt(sk, usecs_to_jiffies(rtt));
+
if (!use_bandwidth_switch)
return;
@@ -237,8 +239,6 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack,
if (tp->snd_cwnd <= tp->snd_ssthresh)
tcp_slow_start(tp);
else {
- measure_rtt(sk);
-
/* In dangerous area, increase slowly.
* In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd
*/
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index cc5de6f69d4..64f1cbaf96e 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -83,18 +83,16 @@ static void tcp_illinois_init(struct sock *sk)
}
/* Measure RTT for each ack. */
-static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, ktime_t last)
+static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, s32 rtt)
{
struct illinois *ca = inet_csk_ca(sk);
- u32 rtt;
ca->acked = pkts_acked;
- if (ktime_equal(last, net_invalid_timestamp()))
+ /* dup ack, no rtt sample */
+ if (rtt < 0)
return;
- rtt = ktime_to_us(net_timedelta(last));
-
/* ignore bogus values, this prevents wraparound in alpha math */
if (rtt > RTT_MAX)
rtt = RTT_MAX;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index fec8a7a4dba..378ca8a086a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1851,19 +1851,22 @@ static inline u32 tcp_cwnd_min(const struct sock *sk)
}
/* Decrease cwnd each second ack. */
-static void tcp_cwnd_down(struct sock *sk)
+static void tcp_cwnd_down(struct sock *sk, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
int decr = tp->snd_cwnd_cnt + 1;
- tp->snd_cwnd_cnt = decr&1;
- decr >>= 1;
+ if ((flag&FLAG_FORWARD_PROGRESS) ||
+ (IsReno(tp) && !(flag&FLAG_NOT_DUP))) {
+ tp->snd_cwnd_cnt = decr&1;
+ decr >>= 1;
- if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
- tp->snd_cwnd -= decr;
+ if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
+ tp->snd_cwnd -= decr;
- tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
+ tp->snd_cwnd_stamp = tcp_time_stamp;
+ }
}
/* Nothing was retransmitted or returned timestamp is less
@@ -2060,7 +2063,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
}
tcp_moderate_cwnd(tp);
} else {
- tcp_cwnd_down(sk);
+ tcp_cwnd_down(sk, flag);
}
}
@@ -2109,7 +2112,10 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- int is_dupack = (tp->snd_una == prior_snd_una && !(flag&FLAG_NOT_DUP));
+ int is_dupack = (tp->snd_una == prior_snd_una &&
+ (!(flag&FLAG_NOT_DUP) ||
+ ((flag&FLAG_DATA_SACKED) &&
+ (tp->fackets_out > tp->reordering))));
/* Some technical things:
* 1. Reno does not count dupacks (sacked_out) automatically. */
@@ -2260,7 +2266,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
if (is_dupack || tcp_head_timedout(sk))
tcp_update_scoreboard(sk);
- tcp_cwnd_down(sk);
+ tcp_cwnd_down(sk, flag);
tcp_xmit_retransmit_queue(sk);
}
@@ -2490,12 +2496,23 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
tcp_ack_update_rtt(sk, acked, seq_rtt);
tcp_ack_packets_out(sk);
- /* Is the ACK triggering packet unambiguous? */
- if (acked & FLAG_RETRANS_DATA_ACKED)
- last_ackt = net_invalid_timestamp();
+ if (ca_ops->pkts_acked) {
+ s32 rtt_us = -1;
+
+ /* Is the ACK triggering packet unambiguous? */
+ if (!(acked & FLAG_RETRANS_DATA_ACKED)) {
+ /* High resolution needed and available? */
+ if (ca_ops->flags & TCP_CONG_RTT_STAMP &&
+ !ktime_equal(last_ackt,
+ net_invalid_timestamp()))
+ rtt_us = ktime_us_delta(ktime_get_real(),
+ last_ackt);
+ else if (seq_rtt > 0)
+ rtt_us = jiffies_to_usecs(seq_rtt);
+ }
- if (ca_ops->pkts_acked)
- ca_ops->pkts_acked(sk, pkts_acked, last_ackt);
+ ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
+ }
}
#if FASTRETRANS_DEBUG > 0
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index 80e140e3ec2..e7f5ef92cbd 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -260,13 +260,13 @@ static void tcp_lp_rtt_sample(struct sock *sk, u32 rtt)
* newReno in increase case.
* We work it out by following the idea from TCP-LP's paper directly
*/
-static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, ktime_t last)
+static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
{
struct tcp_sock *tp = tcp_sk(sk);
struct lp *lp = inet_csk_ca(sk);
- if (!ktime_equal(last, net_invalid_timestamp()))
- tcp_lp_rtt_sample(sk, ktime_to_us(net_timedelta(last)));
+ if (rtt_us > 0)
+ tcp_lp_rtt_sample(sk, rtt_us);
/* calc inference */
if (tcp_time_stamp > tp->rx_opt.rcv_tsecr)
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 914e0307f7a..b49dedcda52 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -112,16 +112,16 @@ EXPORT_SYMBOL_GPL(tcp_vegas_init);
* o min-filter RTT samples from a much longer window (forever for now)
* to find the propagation delay (baseRTT)
*/
-void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, ktime_t last)
+void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us)
{
struct vegas *vegas = inet_csk_ca(sk);
u32 vrtt;
- if (ktime_equal(last, net_invalid_timestamp()))
+ if (rtt_us < 0)
return;
/* Never allow zero rtt or baseRTT */
- vrtt = ktime_to_us(net_timedelta(last)) + 1;
+ vrtt = rtt_us + 1;
/* Filter to find propagation delay: */
if (vrtt < vegas->baseRTT)
diff --git a/net/ipv4/tcp_vegas.h b/net/ipv4/tcp_vegas.h
index 502fa818363..6c0eea2f824 100644
--- a/net/ipv4/tcp_vegas.h
+++ b/net/ipv4/tcp_vegas.h
@@ -17,7 +17,7 @@ struct vegas {
extern void tcp_vegas_init(struct sock *sk);
extern void tcp_vegas_state(struct sock *sk, u8 ca_state);
-extern void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, ktime_t last);
+extern void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us);
extern void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event);
extern void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb);
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index 7a55ddf8603..8fb2aee0b1a 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -69,16 +69,16 @@ static void tcp_veno_init(struct sock *sk)
}
/* Do rtt sampling needed for Veno. */
-static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, ktime_t last)
+static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us)
{
struct veno *veno = inet_csk_ca(sk);
u32 vrtt;
- if (ktime_equal(last, net_invalid_timestamp()))
+ if (rtt_us < 0)
return;
/* Never allow zero rtt or baseRTT */
- vrtt = ktime_to_us(net_timedelta(last)) + 1;
+ vrtt = rtt_us + 1;
/* Filter to find propagation delay: */
if (vrtt < veno->basertt)
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index e61e09dd513..20151d6a624 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -100,11 +100,12 @@ static void westwood_filter(struct westwood *w, u32 delta)
* Called after processing group of packets.
* but all westwood needs is the last sample of srtt.
*/
-static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, ktime_t last)
+static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt)
{
struct westwood *w = inet_csk_ca(sk);
- if (cnt > 0)
- w->rtt = tcp_sk(sk)->srtt >> 3;
+
+ if (rtt > 0)
+ w->rtt = usecs_to_jiffies(rtt);
}
/*
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index c04b7c6ec70..c107fba7430 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -58,7 +58,7 @@ static void tcp_yeah_init(struct sock *sk)
}
-static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, ktime_t last)
+static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct yeah *yeah = inet_csk_ca(sk);
@@ -66,7 +66,7 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, ktime_t last)
if (icsk->icsk_ca_state == TCP_CA_Open)
yeah->pkts_acked = pkts_acked;
- tcp_vegas_pkts_acked(sk, pkts_acked, last);
+ tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us);
}
static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack,