aboutsummaryrefslogtreecommitdiff
path: root/net/sctp
diff options
context:
space:
mode:
Diffstat (limited to 'net/sctp')
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/sm_statefuns.c2
-rw-r--r--net/sctp/socket.c11
-rw-r--r--net/sctp/ulpevent.c2
-rw-r--r--net/sctp/ulpqueue.c2
5 files changed, 10 insertions, 9 deletions
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index e466e00b9a9..b9219649502 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1109,7 +1109,7 @@ SCTP_STATIC __init int sctp_init(void)
sysctl_sctp_rmem[1] = (1500 *(sizeof(struct sk_buff) + 1));
sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share);
- sysctl_sctp_wmem[0] = SK_STREAM_MEM_QUANTUM;
+ sysctl_sctp_wmem[0] = SK_MEM_QUANTUM;
sysctl_sctp_wmem[1] = 16*1024;
sysctl_sctp_wmem[2] = max(64*1024, max_share);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 511d8c9a171..b1267519183 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -5844,7 +5844,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
/*
* Also try to renege to limit our memory usage in the event that
* we are under memory pressure
- * If we can't renege, don't worry about it, the sk_stream_rmem_schedule
+ * If we can't renege, don't worry about it, the sk_rmem_schedule
* in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our
* memory usage too much
*/
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 7a8650f01d0..710df67a678 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -174,7 +174,8 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
sizeof(struct sctp_chunk);
atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
- sk_charge_skb(sk, chunk->skb);
+ sk->sk_wmem_queued += chunk->skb->truesize;
+ sk_mem_charge(sk, chunk->skb->truesize);
}
/* Verify that this is a valid address. */
@@ -6035,10 +6036,10 @@ static void sctp_wfree(struct sk_buff *skb)
atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
/*
- * This undoes what is done via sk_charge_skb
+ * This undoes what is done via sctp_set_owner_w and sk_mem_charge
*/
sk->sk_wmem_queued -= skb->truesize;
- sk->sk_forward_alloc += skb->truesize;
+ sk_mem_uncharge(sk, skb->truesize);
sock_wfree(skb);
__sctp_write_space(asoc);
@@ -6059,9 +6060,9 @@ void sctp_sock_rfree(struct sk_buff *skb)
atomic_sub(event->rmem_len, &sk->sk_rmem_alloc);
/*
- * Mimic the behavior of sk_stream_rfree
+ * Mimic the behavior of sock_rfree
*/
- sk->sk_forward_alloc += event->rmem_len;
+ sk_mem_uncharge(sk, event->rmem_len);
}
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 307314356e1..047c27df98f 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -700,7 +700,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
if (rx_count >= asoc->base.sk->sk_rcvbuf) {
if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) ||
- (!sk_stream_rmem_schedule(asoc->base.sk, chunk->skb)))
+ (!sk_rmem_schedule(asoc->base.sk, chunk->skb->truesize)))
goto fail;
}
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 1733fa29a50..c25caefa3bc 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -1046,7 +1046,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
}
- sk_stream_mem_reclaim(asoc->base.sk);
+ sk_mem_reclaim(asoc->base.sk);
return;
}