aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/fddi.c2
-rw-r--r--net/802/tr.c3
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/ax25/ax25_uid.c12
-rw-r--r--net/bluetooth/hci_event.c38
-rw-r--r--net/bluetooth/rfcomm/core.c2
-rw-r--r--net/can/af_can.c4
-rw-r--r--net/core/dev.c22
-rw-r--r--net/ipv4/tcp.c6
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv6/ipv6_sockglue.c4
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/ipv6/xfrm6_output.c1
-rw-r--r--net/iucv/af_iucv.c24
-rw-r--r--net/mac80211/Kconfig7
-rw-r--r--net/mac80211/main.c2
-rw-r--r--net/mac80211/mlme.c38
-rw-r--r--net/mac80211/pm.c15
-rw-r--r--net/mac80211/rx.c15
-rw-r--r--net/mac80211/wext.c43
-rw-r--r--net/netfilter/nf_conntrack_netlink.c4
-rw-r--r--net/netlabel/netlabel_addrlist.c26
-rw-r--r--net/netrom/af_netrom.c6
-rw-r--r--net/packet/af_packet.c5
-rw-r--r--net/rose/af_rose.c10
-rw-r--r--net/sched/em_meta.c6
-rw-r--r--net/sched/sch_netem.c8
-rw-r--r--net/socket.c6
-rw-r--r--net/sunrpc/svc.c3
-rw-r--r--net/sunrpc/svc_xprt.c127
-rw-r--r--net/sunrpc/svcsock.c35
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/wireless/reg.c5
-rw-r--r--net/wireless/scan.c40
38 files changed, 382 insertions, 164 deletions
diff --git a/net/802/fddi.c b/net/802/fddi.c
index f1611a1e06a..539e6064e6d 100644
--- a/net/802/fddi.c
+++ b/net/802/fddi.c
@@ -215,3 +215,5 @@ struct net_device *alloc_fddidev(int sizeof_priv)
return alloc_netdev(sizeof_priv, "fddi%d", fddi_setup);
}
EXPORT_SYMBOL(alloc_fddidev);
+
+MODULE_LICENSE("GPL");
diff --git a/net/802/tr.c b/net/802/tr.c
index e7eb13084d7..e874447ad14 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -561,6 +561,9 @@ static int rif_seq_show(struct seq_file *seq, void *v)
}
seq_putc(seq, '\n');
}
+
+ if (dev)
+ dev_put(dev);
}
return 0;
}
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 654e45f5719..c67fe6f7565 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -121,8 +121,10 @@ int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
if (!skb)
return NET_RX_DROP;
- if (netpoll_rx_on(skb))
+ if (netpoll_rx_on(skb)) {
+ skb->protocol = eth_type_trans(skb, skb->dev);
return vlan_hwaccel_receive_skb(skb, grp, vlan_tci);
+ }
return napi_frags_finish(napi, skb,
vlan_gro_common(napi, grp, vlan_tci, skb));
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 1b34135cf99..6b092136401 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -668,7 +668,8 @@ static int vlan_ethtool_get_settings(struct net_device *dev,
const struct vlan_dev_info *vlan = vlan_dev_info(dev);
struct net_device *real_dev = vlan->real_dev;
- if (!real_dev->ethtool_ops->get_settings)
+ if (!real_dev->ethtool_ops ||
+ !real_dev->ethtool_ops->get_settings)
return -EOPNOTSUPP;
return real_dev->ethtool_ops->get_settings(real_dev, cmd);
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index 57aeba729ba..832bcf092a0 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -148,9 +148,13 @@ static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos)
{
struct ax25_uid_assoc *pt;
struct hlist_node *node;
- int i = 0;
+ int i = 1;
read_lock(&ax25_uid_lock);
+
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
ax25_uid_for_each(pt, node, &ax25_uid_list) {
if (i == *pos)
return pt;
@@ -162,8 +166,10 @@ static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos)
static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
-
- return hlist_entry(((ax25_uid_assoc *)v)->uid_node.next,
+ if (v == SEQ_START_TOKEN)
+ return ax25_uid_list.first;
+ else
+ return hlist_entry(((ax25_uid_assoc *)v)->uid_node.next,
ax25_uid_assoc, uid_node);
}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 55534244c3a..15f40ea8d54 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -866,8 +866,16 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
- if (!conn)
- goto unlock;
+ if (!conn) {
+ if (ev->link_type != SCO_LINK)
+ goto unlock;
+
+ conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
+ if (!conn)
+ goto unlock;
+
+ conn->type = SCO_LINK;
+ }
if (!ev->status) {
conn->handle = __le16_to_cpu(ev->handle);
@@ -1646,20 +1654,28 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
conn->type = SCO_LINK;
}
- if (conn->out && ev->status == 0x1c && conn->attempt < 2) {
- conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
- (hdev->esco_type & EDR_ESCO_MASK);
- hci_setup_sync(conn, conn->link->handle);
- goto unlock;
- }
-
- if (!ev->status) {
+ switch (ev->status) {
+ case 0x00:
conn->handle = __le16_to_cpu(ev->handle);
conn->state = BT_CONNECTED;
hci_conn_add_sysfs(conn);
- } else
+ break;
+
+ case 0x1c: /* SCO interval rejected */
+ case 0x1f: /* Unspecified error */
+ if (conn->out && conn->attempt < 2) {
+ conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
+ (hdev->esco_type & EDR_ESCO_MASK);
+ hci_setup_sync(conn, conn->link->handle);
+ goto unlock;
+ }
+ /* fall through */
+
+ default:
conn->state = BT_CLOSED;
+ break;
+ }
hci_proto_connect_cfm(conn, ev->status);
if (ev->status)
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 1d0fb0f23c6..374536e050a 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1194,6 +1194,8 @@ void rfcomm_dlc_accept(struct rfcomm_dlc *d)
rfcomm_send_ua(d->session, d->dlci);
+ rfcomm_dlc_clear_timer(d);
+
rfcomm_dlc_lock(d);
d->state = BT_CONNECTED;
d->state_change(d, 0);
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 547bafc79e2..10f0528c3bf 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -674,8 +674,8 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
rcu_read_unlock();
- /* free the skbuff allocated by the netdevice driver */
- kfree_skb(skb);
+ /* consume the skbuff allocated by the netdevice driver */
+ consume_skb(skb);
if (matches > 0) {
can_stats.matches++;
diff --git a/net/core/dev.c b/net/core/dev.c
index 91d792d17e0..308a7d0c277 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1336,7 +1336,12 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
{
struct packet_type *ptype;
+#ifdef CONFIG_NET_CLS_ACT
+ if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
+ net_timestamp(skb);
+#else
net_timestamp(skb);
+#endif
rcu_read_lock();
list_for_each_entry_rcu(ptype, &ptype_all, list) {
@@ -1430,7 +1435,7 @@ void netif_device_detach(struct net_device *dev)
{
if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
netif_running(dev)) {
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
}
}
EXPORT_SYMBOL(netif_device_detach);
@@ -1445,7 +1450,7 @@ void netif_device_attach(struct net_device *dev)
{
if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
netif_running(dev)) {
- netif_wake_queue(dev);
+ netif_tx_wake_all_queues(dev);
__netdev_watchdog_up(dev);
}
}
@@ -2328,8 +2333,10 @@ static int napi_gro_complete(struct sk_buff *skb)
struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
int err = -ENOENT;
- if (NAPI_GRO_CB(skb)->count == 1)
+ if (NAPI_GRO_CB(skb)->count == 1) {
+ skb_shinfo(skb)->gso_size = 0;
goto out;
+ }
rcu_read_lock();
list_for_each_entry_rcu(ptype, head, list) {
@@ -2348,7 +2355,6 @@ static int napi_gro_complete(struct sk_buff *skb)
}
out:
- skb_shinfo(skb)->gso_size = 0;
return netif_receive_skb(skb);
}
@@ -2539,9 +2545,9 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
}
BUG_ON(info->nr_frags > MAX_SKB_FRAGS);
- frag = &info->frags[info->nr_frags - 1];
+ frag = info->frags;
- for (i = skb_shinfo(skb)->nr_frags; i < info->nr_frags; i++) {
+ for (i = 0; i < info->nr_frags; i++) {
skb_fill_page_desc(skb, i, frag->page, frag->page_offset,
frag->size);
frag++;
@@ -4399,7 +4405,7 @@ int register_netdevice(struct net_device *dev)
dev->iflink = -1;
#ifdef CONFIG_COMPAT_NET_DEV_OPS
- /* Netdevice_ops API compatiability support.
+ /* Netdevice_ops API compatibility support.
* This is temporary until all network devices are converted.
*/
if (dev->netdev_ops) {
@@ -4410,7 +4416,7 @@ int register_netdevice(struct net_device *dev)
dev->name, netdev_drivername(dev, drivername, 64));
/* This works only because net_device_ops and the
- compatiablity structure are the same. */
+ compatibility structure are the same. */
dev->netdev_ops = (void *) &(dev->init);
}
#endif
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index fafbec8b073..1d7f49c6f0c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2511,6 +2511,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
struct sk_buff *p;
struct tcphdr *th;
struct tcphdr *th2;
+ unsigned int len;
unsigned int thlen;
unsigned int flags;
unsigned int mss = 1;
@@ -2531,6 +2532,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
skb_gro_pull(skb, thlen);
+ len = skb_gro_len(skb);
flags = tcp_flag_word(th);
for (; (p = *head); head = &p->next) {
@@ -2561,7 +2563,7 @@ found:
mss = skb_shinfo(p)->gso_size;
- flush |= (skb_gro_len(skb) > mss) | !skb_gro_len(skb);
+ flush |= (len > mss) | !len;
flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
if (flush || skb_gro_receive(head, skb)) {
@@ -2574,7 +2576,7 @@ found:
tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
out_check_final:
- flush = skb_gro_len(skb) < mss;
+ flush = len < mss;
flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST |
TCP_FLAG_SYN | TCP_FLAG_FIN);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2bc8e27a163..c96a6bb2543 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -928,6 +928,8 @@ static void tcp_init_metrics(struct sock *sk)
tcp_set_rto(sk);
if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
goto reset;
+
+cwnd:
tp->snd_cwnd = tcp_init_cwnd(tp, dst);
tp->snd_cwnd_stamp = tcp_time_stamp;
return;
@@ -942,6 +944,7 @@ reset:
tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
}
+ goto cwnd;
}
static void tcp_update_reordering(struct sock *sk, const int metric,
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 53300fa2359..59aec609cec 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -778,7 +778,7 @@ static void tcp_adjust_pcount(struct sock *sk, struct sk_buff *skb, int decr)
if (tp->lost_skb_hint &&
before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
- (tcp_is_fack(tp) || TCP_SKB_CB(skb)->sacked))
+ (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
tp->lost_cnt_hint -= decr;
tcp_verify_left_out(tp);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index bda08a09357..7a1d1ce22e6 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -222,7 +222,7 @@ fail:
return error;
}
-int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
+static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
{
struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
@@ -1823,7 +1823,6 @@ EXPORT_SYMBOL(udp_lib_getsockopt);
EXPORT_SYMBOL(udp_lib_setsockopt);
EXPORT_SYMBOL(udp_poll);
EXPORT_SYMBOL(udp_lib_get_port);
-EXPORT_SYMBOL(ipv4_rcv_saddr_equal);
#ifdef CONFIG_PROC_FS
EXPORT_SYMBOL(udp_proc_register);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index d31df0f4bc9..a7fdf9a27f1 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -380,10 +380,6 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
default:
goto sticky_done;
}
-
- if ((rthdr->hdrlen & 1) ||
- (rthdr->hdrlen >> 1) != rthdr->segments_left)
- goto sticky_done;
}
retv = 0;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 6842dd2edd5..8905712cfbb 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -53,6 +53,8 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
{
const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
+ __be32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr;
+ __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
int sk_ipv6only = ipv6_only_sock(sk);
int sk2_ipv6only = inet_v6_ipv6only(sk2);
int addr_type = ipv6_addr_type(sk_rcv_saddr6);
@@ -60,7 +62,9 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
/* if both are mapped, treat as IPv4 */
if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
- return ipv4_rcv_saddr_equal(sk, sk2);
+ return (!sk2_ipv6only &&
+ (!sk_rcv_saddr || !sk2_rcv_saddr ||
+ sk_rcv_saddr == sk2_rcv_saddr));
if (addr_type2 == IPV6_ADDR_ANY &&
!(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 0af823cf7f1..5ee5a031bc9 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -72,6 +72,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
#endif
skb->protocol = htons(ETH_P_IPV6);
+ skb->local_df = 1;
return x->outer_mode->output2(x, skb);
}
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 49e786535dc..b51c9187c34 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -172,6 +172,7 @@ static void iucv_sock_close(struct sock *sk)
err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
}
+ case IUCV_CLOSING: /* fall through */
sk->sk_state = IUCV_CLOSED;
sk->sk_state_change(sk);
@@ -224,6 +225,8 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
spin_lock_init(&iucv_sk(sk)->message_q.lock);
skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
iucv_sk(sk)->send_tag = 0;
+ iucv_sk(sk)->path = NULL;
+ memset(&iucv_sk(sk)->src_user_id , 0, 32);
sk->sk_destruct = iucv_sock_destruct;
sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
@@ -811,6 +814,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+ /* receive/dequeue next skb:
+ * the function understands MSG_PEEK and, thus, does not dequeue skb */
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -858,9 +863,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
iucv_process_message_q(sk);
spin_unlock_bh(&iucv->message_q.lock);
}
-
- } else
- skb_queue_head(&sk->sk_receive_queue, skb);
+ }
done:
return err ? : copied;
@@ -934,6 +937,9 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
lock_sock(sk);
switch (sk->sk_state) {
+ case IUCV_DISCONN:
+ case IUCV_CLOSING:
+ case IUCV_SEVERED:
case IUCV_CLOSED:
err = -ENOTCONN;
goto fail;
@@ -1113,8 +1119,12 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
struct sock_msg_q *save_msg;
int len;
- if (sk->sk_shutdown & RCV_SHUTDOWN)
+ if (sk->sk_shutdown & RCV_SHUTDOWN) {
+ iucv_message_reject(path, msg);
return;
+ }
+
+ spin_lock(&iucv->message_q.lock);
if (!list_empty(&iucv->message_q.list) ||
!skb_queue_empty(&iucv->backlog_skb_q))
@@ -1129,9 +1139,8 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
if (!skb)
goto save_message;
- spin_lock(&iucv->message_q.lock);
iucv_process_message(sk, skb, path, msg);
- spin_unlock(&iucv->message_q.lock);
+ goto out_unlock;
return;
@@ -1142,8 +1151,9 @@ save_message:
save_msg->path = path;
save_msg->msg = *msg;
- spin_lock(&iucv->message_q.lock);
list_add_tail(&save_msg->list, &iucv->message_q.list);
+
+out_unlock:
spin_unlock(&iucv->message_q.lock);
}
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index f3d9ae350fb..ecc3faf9f11 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -202,10 +202,3 @@ config MAC80211_DEBUG_COUNTERS
and show them in debugfs.
If unsure, say N.
-
-config MAC80211_VERBOSE_SPECT_MGMT_DEBUG
- bool "Verbose Spectrum Management (IEEE 802.11h)debugging"
- depends on MAC80211_DEBUG_MENU
- ---help---
- Say Y here to print out verbose Spectrum Management (IEEE 802.11h)
- debug messages.
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index a6f1d8a869b..fbcbed6cad0 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -258,7 +258,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
(chan->max_power - local->power_constr_level) :
chan->max_power;
- if (local->user_power_level)
+ if (local->user_power_level >= 0)
power = min(power, local->user_power_level);
if (local->hw.conf.power_level != power) {
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 7ecda9d59d8..132938b073d 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -441,6 +441,9 @@ static bool ieee80211_check_tim(struct ieee802_11_elems *elems, u16 aid)
u8 index, indexn1, indexn2;
struct ieee80211_tim_ie *tim = (struct ieee80211_tim_ie *) elems->tim;
+ if (unlikely(!tim || elems->tim_len < 4))
+ return false;
+
aid &= 0x3fff;
index = aid / 8;
mask = 1 << (aid & 7);
@@ -945,9 +948,13 @@ void ieee80211_beacon_loss_work(struct work_struct *work)
u.mgd.beacon_loss_work);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM "
- "- sending probe request\n", sdata->dev->name,
- sdata->u.mgd.bssid);
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ if (net_ratelimit()) {
+ printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM "
+ "- sending probe request\n", sdata->dev->name,
+ sdata->u.mgd.bssid);
+ }
+#endif
ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
@@ -1007,9 +1014,13 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
(local->hw.conf.flags & IEEE80211_CONF_PS)) &&
time_after(jiffies,
ifmgd->last_beacon + IEEE80211_MONITORING_INTERVAL)) {
- printk(KERN_DEBUG "%s: beacon loss from AP %pM "
- "- sending probe request\n",
- sdata->dev->name, ifmgd->bssid);
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ if (net_ratelimit()) {
+ printk(KERN_DEBUG "%s: beacon loss from AP %pM "
+ "- sending probe request\n",
+ sdata->dev->name, ifmgd->bssid);
+ }
+#endif
ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
ifmgd->ssid_len, NULL, 0);
@@ -1355,7 +1366,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
for (i = 0; i < elems.ext_supp_rates_len; i++) {
int rate = (elems.ext_supp_rates[i] & 0x7f) * 5;
- bool is_basic = !!(elems.supp_rates[i] & 0x80);
+ bool is_basic = !!(elems.ext_supp_rates[i] & 0x80);
if (rate > 110)
have_higher_than_11mbit = true;
@@ -1902,9 +1913,17 @@ static void ieee80211_sta_work(struct work_struct *work)
static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
{
- if (sdata->vif.type == NL80211_IFTYPE_STATION)
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ /*
+ * Need to update last_beacon to avoid beacon loss
+ * test to trigger.
+ */
+ sdata->u.mgd.last_beacon = jiffies;
+
+
queue_work(sdata->local->hw.workqueue,
&sdata->u.mgd.work);
+ }
}
/* interface setup */
@@ -2105,12 +2124,13 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
struct ieee80211_local *local =
container_of(work, struct ieee80211_local,
dynamic_ps_enable_work);
+ /* XXX: using scan_sdata is completely broken! */
struct ieee80211_sub_if_data *sdata = local->scan_sdata;
if (local->hw.conf.flags & IEEE80211_CONF_PS)
return;
- if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
+ if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK && sdata)
ieee80211_send_nullfunc(local, sdata, 1);
local->hw.conf.flags |= IEEE80211_CONF_PS;
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 02730232649..81985d27cbd 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -156,8 +156,19 @@ int __ieee80211_resume(struct ieee80211_hw *hw)
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
- WARN_ON(ieee80211_if_config(sdata, changed));
- ieee80211_bss_info_change_notify(sdata, ~0);
+ /*
+ * Driver's config_interface can fail if rfkill is
+ * enabled. Accommodate this return code.
+ * FIXME: When mac80211 has knowledge of rfkill
+ * state the code below can change back to:
+ * WARN(ieee80211_if_config(sdata, changed));
+ * ieee80211_bss_info_change_notify(sdata, ~0);
+ */
+ if (ieee80211_if_config(sdata, changed))
+ printk(KERN_DEBUG "%s: failed to configure interface during resume\n",
+ sdata->dev->name);
+ else
+ ieee80211_bss_info_change_notify(sdata, ~0);
break;
case NL80211_IFTYPE_WDS:
break;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 64ebe664eff..9776f73c51a 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -29,6 +29,7 @@
static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
struct tid_ampdu_rx *tid_agg_rx,
struct sk_buff *skb,
+ struct ieee80211_rx_status *status,
u16 mpdu_seq_num,
int bar_req);
/*
@@ -1396,7 +1397,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
* mac80211. That also explains the __skb_push()
* below.
*/
- align = (unsigned long)skb->data & 4;
+ align = (unsigned long)skb->data & 3;
if (align) {
if (WARN_ON(skb_headroom(skb) < 3)) {
dev_kfree_skb(skb);
@@ -1688,7 +1689,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
/* manage reordering buffer according to requested */
/* sequence number */
rcu_read_lock();
- ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL,
+ ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, NULL,
start_seq_num, 1);
rcu_read_unlock();
return RX_DROP_UNUSABLE;
@@ -2293,6 +2294,7 @@ static inline u16 seq_sub(u16 sq1, u16 sq2)
static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
struct tid_ampdu_rx *tid_agg_rx,
struct sk_buff *skb,
+ struct ieee80211_rx_status *rxstatus,
u16 mpdu_seq_num,
int bar_req)
{
@@ -2374,6 +2376,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
/* put the frame in the reordering buffer */
tid_agg_rx->reorder_buf[index] = skb;
+ memcpy(tid_agg_rx->reorder_buf[index]->cb, rxstatus,
+ sizeof(*rxstatus));
tid_agg_rx->stored_mpdu_num++;
/* release the buffer until next missing frame */
index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn)
@@ -2399,7 +2403,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
}
static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct ieee80211_rx_status *status)
{
struct ieee80211_hw *hw = &local->hw;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -2448,7 +2453,7 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
/* according to mpdu sequence number deal with reordering buffer */
mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
- ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb,
+ ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, status,
mpdu_seq_num, 0);
end_reorder:
return ret;
@@ -2512,7 +2517,7 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
return;
}
- if (!ieee80211_rx_reorder_ampdu(local, skb))
+ if (!ieee80211_rx_reorder_ampdu(local, skb, status))
__ieee80211_rx_handle_packet(hw, skb, status, rate);
rcu_read_unlock();
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index deb4ecec122..959aa8379cc 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -417,6 +417,7 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev,
{
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
struct ieee80211_channel* chan = local->hw.conf.channel;
+ bool reconf = false;
u32 reconf_flags = 0;
int new_power_level;
@@ -427,14 +428,38 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev,
if (!chan)
return -EINVAL;
- if (data->txpower.fixed)
- new_power_level = min(data->txpower.value, chan->max_power);
- else /* Automatic power level setting */
- new_power_level = chan->max_power;
+ /* only change when not disabling */
+ if (!data->txpower.disabled) {
+ if (data->txpower.fixed) {
+ if (data->txpower.value < 0)
+ return -EINVAL;
+ new_power_level = data->txpower.value;
+ /*
+ * Debatable, but we cannot do a fixed power
+ * level above the regulatory constraint.
+ * Use "iwconfig wlan0 txpower 15dBm" instead.
+ */
+ if (new_power_level > chan->max_power)
+ return -EINVAL;
+ } else {
+ /*
+ * Automatic power level setting, max being the value
+ * passed in from userland.
+ */
+ if (data->txpower.value < 0)
+ new_power_level = -1;
+ else
+ new_power_level = data->txpower.value;
+ }
+
+ reconf = true;
- local->user_power_level = new_power_level;
- if (local->hw.conf.power_level != new_power_level)
- reconf_flags |= IEEE80211_CONF_CHANGE_POWER;
+ /*
+ * ieee80211_hw_config() will limit to the channel's
+ * max power and possibly power constraint from AP.
+ */
+ local->user_power_level = new_power_level;
+ }
if (local->hw.conf.radio_enabled != !(data->txpower.disabled)) {
local->hw.conf.radio_enabled = !(data->txpower.disabled);
@@ -442,7 +467,7 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev,
ieee80211_led_radio(local, local->hw.conf.radio_enabled);
}
- if (reconf_flags)
+ if (reconf || reconf_flags)
ieee80211_hw_config(local, reconf_flags);
return 0;
@@ -530,7 +555,7 @@ static int ieee80211_ioctl_giwfrag(struct net_device *dev,
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
frag->value = local->fragmentation_threshold;
- frag->disabled = (frag->value >= IEEE80211_MAX_RTS_THRESHOLD);
+ frag->disabled = (frag->value >= IEEE80211_MAX_FRAG_THRESHOLD);
frag->fixed = 1;
return 0;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 0ea36e0c8a0..f13fc57e1ec 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -988,7 +988,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[])
{
struct nf_conntrack_helper *helper;
struct nf_conn_help *help = nfct_help(ct);
- char *helpname;
+ char *helpname = NULL;
int err;
/* don't change helper of sibling connections */
@@ -1231,7 +1231,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
rcu_read_lock();
if (cda[CTA_HELP]) {
- char *helpname;
+ char *helpname = NULL;
err = ctnetlink_parse_help(cda[CTA_HELP], &helpname);
if (err < 0)
diff --git a/net/netlabel/netlabel_addrlist.c b/net/netlabel/netlabel_addrlist.c
index 834c6eb7f48..c0519139679 100644
--- a/net/netlabel/netlabel_addrlist.c
+++ b/net/netlabel/netlabel_addrlist.c
@@ -256,13 +256,11 @@ struct netlbl_af4list *netlbl_af4list_remove(__be32 addr, __be32 mask,
{
struct netlbl_af4list *entry;
- entry = netlbl_af4list_search(addr, head);
- if (entry != NULL && entry->addr == addr && entry->mask == mask) {
- netlbl_af4list_remove_entry(entry);
- return entry;
- }
-
- return NULL;
+ entry = netlbl_af4list_search_exact(addr, mask, head);
+ if (entry == NULL)
+ return NULL;
+ netlbl_af4list_remove_entry(entry);
+ return entry;
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -299,15 +297,11 @@ struct netlbl_af6list *netlbl_af6list_remove(const struct in6_addr *addr,
{
struct netlbl_af6list *entry;
- entry = netlbl_af6list_search(addr, head);
- if (entry != NULL &&
- ipv6_addr_equal(&entry->addr, addr) &&
- ipv6_addr_equal(&entry->mask, mask)) {
- netlbl_af6list_remove_entry(entry);
- return entry;
- }
-
- return NULL;
+ entry = netlbl_af6list_search_exact(addr, mask, head);
+ if (entry == NULL)
+ return NULL;
+ netlbl_af6list_remove_entry(entry);
+ return entry;
}
#endif /* IPv6 */
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 4e705f87969..3be0e016ab7 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1084,8 +1084,10 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
/* Build a packet - the conventional user limit is 236 bytes. We can
do ludicrously large NetROM frames but must not overflow */
- if (len > 65536)
- return -EMSGSIZE;
+ if (len > 65536) {
+ err = -EMSGSIZE;
+ goto out;
+ }
SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n");
size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 74776de523e..f546e81acc4 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1758,8 +1758,9 @@ static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
static inline char *alloc_one_pg_vec_page(unsigned long order)
{
- return (char *) __get_free_pages(GFP_KERNEL | __GFP_COMP | __GFP_ZERO,
- order);
+ gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN;
+
+ return (char *) __get_free_pages(gfp_flags, order);
}
static char **alloc_pg_vec(struct tpacket_req *req, int order)
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 0f36e8d59b2..877a7f65f70 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1072,10 +1072,6 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
unsigned char *asmptr;
int n, size, qbit = 0;
- /* ROSE empty frame has no meaning : don't send */
- if (len == 0)
- return 0;
-
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
return -EINVAL;
@@ -1273,12 +1269,6 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
skb_reset_transport_header(skb);
copied = skb->len;
- /* ROSE empty frame has no meaning : ignore it */
- if (copied == 0) {
- skb_free_datagram(sk, skb);
- return copied;
- }
-
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 72cf86e3c09..fad596bf32d 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -176,8 +176,10 @@ META_COLLECTOR(var_dev)
META_COLLECTOR(int_vlan_tag)
{
- unsigned short uninitialized_var(tag);
- if (vlan_get_tag(skb, &tag) < 0)
+ unsigned short tag;
+
+ tag = vlan_tx_tag_get(skb);
+ if (!tag && __vlan_get_tag(skb, &tag))
*err = -1;
else
dst->value = tag;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index d876b873484..2b88295cb7b 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -280,6 +280,14 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
if (unlikely(!skb))
return NULL;
+#ifdef CONFIG_NET_CLS_ACT
+ /*
+ * If it's at ingress let's pretend the delay is
+ * from the network (tstamp will be updated).
+ */
+ if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
+ skb->tstamp.tv64 = 0;
+#endif
pr_debug("netem_dequeue: return skb=%p\n", skb);
sch->q.qlen--;
return skb;
diff --git a/net/socket.c b/net/socket.c
index 91d0c0254ff..791d71a36a9 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -493,8 +493,7 @@ static struct socket *sock_alloc(void)
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
- get_cpu_var(sockets_in_use)++;
- put_cpu_var(sockets_in_use);
+ percpu_add(sockets_in_use, 1);
return sock;
}
@@ -536,8 +535,7 @@ void sock_release(struct socket *sock)
if (sock->fasync_list)
printk(KERN_ERR "sock_release: fasync list not empty!\n");
- get_cpu_var(sockets_in_use)--;
- put_cpu_var(sockets_in_use);
+ percpu_sub(sockets_in_use, 1);
if (!sock->file) {
iput(SOCK_INODE(sock));
return;
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 9b49a6ab8de..8847add6ca1 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1008,6 +1008,8 @@ svc_process(struct svc_rqst *rqstp)
rqstp->rq_res.tail[0].iov_len = 0;
/* Will be turned off only in gss privacy case: */
rqstp->rq_splice_ok = 1;
+ /* Will be turned off only when NFSv4 Sessions are used */
+ rqstp->rq_usedeferral = 1;
/* Setup reply header */
rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
@@ -1078,7 +1080,6 @@ svc_process(struct svc_rqst *rqstp)
procp = versp->vs_proc + proc;
if (proc >= versp->vs_nproc || !procp->pc_func)
goto err_bad_proc;
- rqstp->rq_server = serv;
rqstp->rq_procinfo = procp;
/* Syntactic check complete */
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 2819ee093f3..c200d92e57e 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -14,6 +14,8 @@
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
+#define SVC_MAX_WAKING 5
+
static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
static int svc_deferred_recv(struct svc_rqst *rqstp);
static struct cache_deferred_req *svc_defer(struct cache_req *req);
@@ -301,6 +303,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
struct svc_pool *pool;
struct svc_rqst *rqstp;
int cpu;
+ int thread_avail;
if (!(xprt->xpt_flags &
((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
@@ -312,18 +315,14 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
spin_lock_bh(&pool->sp_lock);
- if (!list_empty(&pool->sp_threads) &&
- !list_empty(&pool->sp_sockets))
- printk(KERN_ERR
- "svc_xprt_enqueue: "
- "threads and transports both waiting??\n");
-
if (test_bit(XPT_DEAD, &xprt->xpt_flags)) {
/* Don't enqueue dead transports */
dprintk("svc: transport %p is dead, not enqueued\n", xprt);
goto out_unlock;
}
+ pool->sp_stats.packets++;
+
/* Mark transport as busy. It will remain in this state until
* the provider calls svc_xprt_received. We update XPT_BUSY
* atomically because it also guards against trying to enqueue
@@ -356,7 +355,15 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
}
process:
- if (!list_empty(&pool->sp_threads)) {
+ /* Work out whether threads are available */
+ thread_avail = !list_empty(&pool->sp_threads); /* threads are asleep */
+ if (pool->sp_nwaking >= SVC_MAX_WAKING) {
+ /* too many threads are runnable and trying to wake up */
+ thread_avail = 0;
+ pool->sp_stats.overloads_avoided++;
+ }
+
+ if (thread_avail) {
rqstp = list_entry(pool->sp_threads.next,
struct svc_rqst,
rq_list);
@@ -371,11 +378,15 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
svc_xprt_get(xprt);
rqstp->rq_reserved = serv->sv_max_mesg;
atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
+ rqstp->rq_waking = 1;
+ pool->sp_nwaking++;
+ pool->sp_stats.threads_woken++;
BUG_ON(xprt->xpt_pool != pool);
wake_up(&rqstp->rq_wait);
} else {
dprintk("svc: transport %p put into queue\n", xprt);
list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
+ pool->sp_stats.sockets_queued++;
BUG_ON(xprt->xpt_pool != pool);
}
@@ -588,6 +599,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
int pages;
struct xdr_buf *arg;
DECLARE_WAITQUEUE(wait, current);
+ long time_left;
dprintk("svc: server %p waiting for data (to = %ld)\n",
rqstp, timeout);
@@ -636,6 +648,11 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
return -EINTR;
spin_lock_bh(&pool->sp_lock);
+ if (rqstp->rq_waking) {
+ rqstp->rq_waking = 0;
+ pool->sp_nwaking--;
+ BUG_ON(pool->sp_nwaking < 0);
+ }
xprt = svc_xprt_dequeue(pool);
if (xprt) {
rqstp->rq_xprt = xprt;
@@ -668,12 +685,14 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
add_wait_queue(&rqstp->rq_wait, &wait);
spin_unlock_bh(&pool->sp_lock);
- schedule_timeout(timeout);
+ time_left = schedule_timeout(timeout);
try_to_freeze();
spin_lock_bh(&pool->sp_lock);
remove_wait_queue(&rqstp->rq_wait, &wait);
+ if (!time_left)
+ pool->sp_stats.threads_timedout++;
xprt = rqstp->rq_xprt;
if (!xprt) {
@@ -958,7 +977,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
struct svc_deferred_req *dr;
- if (rqstp->rq_arg.page_len)
+ if (rqstp->rq_arg.page_len || !rqstp->rq_usedeferral)
return NULL; /* if more than a page, give up FIXME */
if (rqstp->rq_deferred) {
dr = rqstp->rq_deferred;
@@ -1112,3 +1131,93 @@ int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen)
return totlen;
}
EXPORT_SYMBOL_GPL(svc_xprt_names);
+
+
+/*----------------------------------------------------------------------------*/
+
+static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos)
+{
+ unsigned int pidx = (unsigned int)*pos;
+ struct svc_serv *serv = m->private;
+
+ dprintk("svc_pool_stats_start, *pidx=%u\n", pidx);
+
+ lock_kernel();
+ /* bump up the pseudo refcount while traversing */
+ svc_get(serv);
+ unlock_kernel();
+
+ if (!pidx)
+ return SEQ_START_TOKEN;
+ return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]);
+}
+
+static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ struct svc_pool *pool = p;
+ struct svc_serv *serv = m->private;
+
+ dprintk("svc_pool_stats_next, *pos=%llu\n", *pos);
+
+ if (p == SEQ_START_TOKEN) {
+ pool = &serv->sv_pools[0];
+ } else {
+ unsigned int pidx = (pool - &serv->sv_pools[0]);
+ if (pidx < serv->sv_nrpools-1)
+ pool = &serv->sv_pools[pidx+1];
+ else
+ pool = NULL;
+ }
+ ++*pos;
+ return pool;
+}
+
+static void svc_pool_stats_stop(struct seq_file *m, void *p)
+{
+ struct svc_serv *serv = m->private;
+
+ lock_kernel();
+ /* this function really, really should have been called svc_put() */
+ svc_destroy(serv);
+ unlock_kernel();
+}
+
+static int svc_pool_stats_show(struct seq_file *m, void *p)
+{
+ struct svc_pool *pool = p;
+
+ if (p == SEQ_START_TOKEN) {
+ seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken overloads-avoided threads-timedout\n");
+ return 0;
+ }
+
+ seq_printf(m, "%u %lu %lu %lu %lu %lu\n",
+ pool->sp_id,
+ pool->sp_stats.packets,
+ pool->sp_stats.sockets_queued,
+ pool->sp_stats.threads_woken,
+ pool->sp_stats.overloads_avoided,
+ pool->sp_stats.threads_timedout);
+
+ return 0;
+}
+
+static const struct seq_operations svc_pool_stats_seq_ops = {
+ .start = svc_pool_stats_start,
+ .next = svc_pool_stats_next,
+ .stop = svc_pool_stats_stop,
+ .show = svc_pool_stats_show,
+};
+
+int svc_pool_stats_open(struct svc_serv *serv, struct file *file)
+{
+ int err;
+
+ err = seq_open(file, &svc_pool_stats_seq_ops);
+ if (!err)
+ ((struct seq_file *) file->private_data)->private = serv;
+ return err;
+}
+EXPORT_SYMBOL(svc_pool_stats_open);
+
+/*----------------------------------------------------------------------------*/
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 9d504234af4..af3198814c1 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -345,7 +345,6 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
lock_sock(sock->sk);
sock->sk->sk_sndbuf = snd * 2;
sock->sk->sk_rcvbuf = rcv * 2;
- sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
release_sock(sock->sk);
#endif
}
@@ -797,23 +796,6 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
- if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
- /* sndbuf needs to have room for one request
- * per thread, otherwise we can stall even when the
- * network isn't a bottleneck.
- *
- * We count all threads rather than threads in a
- * particular pool, which provides an upper bound
- * on the number of threads which will access the socket.
- *
- * rcvbuf just needs to be able to hold a few requests.
- * Normally they will be removed from the queue
- * as soon a a complete request arrives.
- */
- svc_sock_setbufsize(svsk->sk_sock,
- (serv->sv_nrthreads+3) * serv->sv_max_mesg,
- 3 * serv->sv_max_mesg);
-
clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
/* Receive data. If we haven't got the record length yet, get
@@ -1061,15 +1043,6 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
- /* initialise setting must have enough space to
- * receive and respond to one request.
- * svc_tcp_recvfrom will re-adjust if necessary
- */
- svc_sock_setbufsize(svsk->sk_sock,
- 3 * svsk->sk_xprt.xpt_server->sv_max_mesg,
- 3 * svsk->sk_xprt.xpt_server->sv_max_mesg);
-
- set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
if (sk->sk_state != TCP_ESTABLISHED)
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
@@ -1139,8 +1112,14 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
/* Initialize the socket */
if (sock->type == SOCK_DGRAM)
svc_udp_init(svsk, serv);
- else
+ else {
+ /* initialise setting must have enough space to
+ * receive and respond to one request.
+ */
+ svc_sock_setbufsize(svsk->sk_sock, 4 * serv->sv_max_mesg,
+ 4 * serv->sv_max_mesg);
svc_tcp_init(svsk, serv);
+ }
dprintk("svc: svc_setup_socket created %p (inet %p)\n",
svsk, svsk->sk_sk);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index d43daa236ef..0a592e4295f 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -90,7 +90,7 @@ struct cfg80211_internal_bss {
struct rb_node rbn;
unsigned long ts;
struct kref ref;
- bool hold;
+ bool hold, ies_allocated;
/* must be last because of priv member */
struct cfg80211_bss pub;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 353e1a4ece8..2456e4ee445 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3334,7 +3334,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!msg)
return;
@@ -3353,7 +3353,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
return;
}
- genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_KERNEL);
+ genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_ATOMIC);
return;
nla_put_failure:
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 6327e1617ac..6c1993d9990 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2095,11 +2095,12 @@ int set_regdom(const struct ieee80211_regdomain *rd)
/* Caller must hold cfg80211_mutex */
void reg_device_remove(struct wiphy *wiphy)
{
- struct wiphy *request_wiphy;
+ struct wiphy *request_wiphy = NULL;
assert_cfg80211_lock();
- request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
+ if (last_request)
+ request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
kfree(wiphy->regd);
if (!last_request || !request_wiphy)
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 2a00e362f5f..2ae65b39b52 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -58,6 +58,10 @@ static void bss_release(struct kref *ref)
bss = container_of(ref, struct cfg80211_internal_bss, ref);
if (bss->pub.free_priv)
bss->pub.free_priv(&bss->pub);
+
+ if (bss->ies_allocated)
+ kfree(bss->pub.information_elements);
+
kfree(bss);
}
@@ -360,19 +364,41 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
found = rb_find_bss(dev, res);
- if (found && overwrite) {
- list_replace(&found->list, &res->list);
- rb_replace_node(&found->rbn, &res->rbn,
- &dev->bss_tree);
- kref_put(&found->ref, bss_release);
- found = res;
- } else if (found) {
+ if (found) {
kref_get(&found->ref);
found->pub.beacon_interval = res->pub.beacon_interval;
found->pub.tsf = res->pub.tsf;
found->pub.signal = res->pub.signal;
found->pub.capability = res->pub.capability;
found->ts = res->ts;
+
+ /* overwrite IEs */
+ if (overwrite) {
+ size_t used = dev->wiphy.bss_priv_size + sizeof(*res);
+ size_t ielen = res->pub.len_information_elements;
+
+ if (ksize(found) >= used + ielen) {
+ memcpy(found->pub.information_elements,
+ res->pub.information_elements, ielen);
+ found->pub.len_information_elements = ielen;
+ } else {
+ u8 *ies = found->pub.information_elements;
+
+ if (found->ies_allocated) {
+ if (ksize(ies) < ielen)
+ ies = krealloc(ies, ielen,
+ GFP_ATOMIC);
+ } else
+ ies = kmalloc(ielen, GFP_ATOMIC);
+
+ if (ies) {
+ memcpy(ies, res->pub.information_elements, ielen);
+ found->ies_allocated = true;
+ found->pub.information_elements = ies;
+ }
+ }
+ }
+
kref_put(&res->ref, bss_release);
} else {
/* this "consumes" the reference */