From 5e739d1752aca4e8f3e794d431503bfca3162df4 Mon Sep 17 00:00:00 2001 From: Vlad Yasevich Date: Thu, 21 Aug 2008 03:34:25 -0700 Subject: sctp: fix potential panics in the SCTP-AUTH API. All of the SCTP-AUTH socket options could cause a panic if the extension is disabled and the API is envoked. Additionally, there were some additional assumptions that certain pointers would always be valid which may not always be the case. This patch hardens the API and address all of the crash scenarios. Signed-off-by: Vlad Yasevich Signed-off-by: David S. Miller --- net/sctp/endpointola.c | 4 +-- net/sctp/socket.c | 85 ++++++++++++++++++++++++++++++++++++++------------ 2 files changed, 67 insertions(+), 22 deletions(-) diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index e39a0cdef18..4c8d9f45ce0 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -103,6 +103,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, /* Initialize the CHUNKS parameter */ auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS; + auth_chunks->param_hdr.length = htons(sizeof(sctp_paramhdr_t)); /* If the Add-IP functionality is enabled, we must * authenticate, ASCONF and ASCONF-ACK chunks @@ -110,8 +111,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, if (sctp_addip_enable) { auth_chunks->chunks[0] = SCTP_CID_ASCONF; auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK; - auth_chunks->param_hdr.length = - htons(sizeof(sctp_paramhdr_t) + 2); + auth_chunks->param_hdr.length += htons(2); } } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index dbb79adf8f3..bb5c9ef1304 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3055,6 +3055,9 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk, { struct sctp_authchunk val; + if (!sctp_auth_enable) + return -EACCES; + if (optlen != sizeof(struct sctp_authchunk)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) @@ -3085,6 +3088,9 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk, struct sctp_hmacalgo *hmacs; int err; + if (!sctp_auth_enable) + return -EACCES; + if (optlen < sizeof(struct sctp_hmacalgo)) return -EINVAL; @@ -3123,6 +3129,9 @@ static int sctp_setsockopt_auth_key(struct sock *sk, struct sctp_association *asoc; int ret; + if (!sctp_auth_enable) + return -EACCES; + if (optlen <= sizeof(struct sctp_authkey)) return -EINVAL; @@ -3160,6 +3169,9 @@ static int sctp_setsockopt_active_key(struct sock *sk, struct sctp_authkeyid val; struct sctp_association *asoc; + if (!sctp_auth_enable) + return -EACCES; + if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) @@ -3185,6 +3197,9 @@ static int sctp_setsockopt_del_key(struct sock *sk, struct sctp_authkeyid val; struct sctp_association *asoc; + if (!sctp_auth_enable) + return -EACCES; + if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) @@ -5197,19 +5212,29 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len, static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, char __user *optval, int __user *optlen) { + struct sctp_hmacalgo __user *p = (void __user *)optval; struct sctp_hmac_algo_param *hmacs; - __u16 param_len; + __u16 data_len = 0; + u32 num_idents; + + if (!sctp_auth_enable) + return -EACCES; hmacs = sctp_sk(sk)->ep->auth_hmacs_list; - param_len = ntohs(hmacs->param_hdr.length); + data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); - if (len < param_len) + if (len < sizeof(struct sctp_hmacalgo) + data_len) return -EINVAL; + + len = sizeof(struct sctp_hmacalgo) + data_len; + num_idents = data_len / sizeof(u16); + if (put_user(len, optlen)) return -EFAULT; - if (copy_to_user(optval, hmacs->hmac_ids, len)) + if (put_user(num_idents, &p->shmac_num_idents)) + return -EFAULT; + if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) return -EFAULT; - return 0; } @@ -5219,6 +5244,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len, struct sctp_authkeyid val; struct sctp_association *asoc; + if (!sctp_auth_enable) + return -EACCES; + if (len < sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) @@ -5233,6 +5261,12 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len, else val.scact_keynumber = sctp_sk(sk)->ep->active_key_id; + len = sizeof(struct sctp_authkeyid); + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + return 0; } @@ -5243,13 +5277,16 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; - u32 num_chunks; + u32 num_chunks = 0; char __user *to; - if (len <= sizeof(struct sctp_authchunks)) + if (!sctp_auth_enable) + return -EACCES; + + if (len < sizeof(struct sctp_authchunks)) return -EINVAL; - if (copy_from_user(&val, p, sizeof(struct sctp_authchunks))) + if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) return -EFAULT; to = p->gauth_chunks; @@ -5258,20 +5295,21 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, return -EINVAL; ch = asoc->peer.peer_chunks; + if (!ch) + goto num; /* See if the user provided enough room for all the data */ num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); if (len < num_chunks) return -EINVAL; - len = num_chunks; - if (put_user(len, optlen)) + if (copy_to_user(to, ch->chunks, num_chunks)) return -EFAULT; +num: + len = sizeof(struct sctp_authchunks) + num_chunks; + if (put_user(len, optlen)) return -EFAULT; if (put_user(num_chunks, &p->gauth_number_of_chunks)) return -EFAULT; - if (copy_to_user(to, ch->chunks, len)) - return -EFAULT; - return 0; } @@ -5282,13 +5320,16 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; - u32 num_chunks; + u32 num_chunks = 0; char __user *to; - if (len <= sizeof(struct sctp_authchunks)) + if (!sctp_auth_enable) + return -EACCES; + + if (len < sizeof(struct sctp_authchunks)) return -EINVAL; - if (copy_from_user(&val, p, sizeof(struct sctp_authchunks))) + if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) return -EFAULT; to = p->gauth_chunks; @@ -5301,17 +5342,21 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, else ch = sctp_sk(sk)->ep->auth_chunk_list; + if (!ch) + goto num; + num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); - if (len < num_chunks) + if (len < sizeof(struct sctp_authchunks) + num_chunks) return -EINVAL; - len = num_chunks; + if (copy_to_user(to, ch->chunks, num_chunks)) + return -EFAULT; +num: + len = sizeof(struct sctp_authchunks) + num_chunks; if (put_user(len, optlen)) return -EFAULT; if (put_user(num_chunks, &p->gauth_number_of_chunks)) return -EFAULT; - if (copy_to_user(to, ch->chunks, len)) - return -EFAULT; return 0; } -- cgit v1.2.3 From 2540e0511ea17e25831be543cdf9381e6209950d Mon Sep 17 00:00:00 2001 From: Jarek Poplawski Date: Thu, 21 Aug 2008 05:11:14 -0700 Subject: pkt_sched: Fix qdisc_watchdog() vs. dev_deactivate() race dev_deactivate() can skip rescheduling of a qdisc by qdisc_watchdog() or other timer calling netif_schedule() after dev_queue_deactivate(). We prevent this checking aliveness before scheduling the timer. Since during deactivation the root qdisc is available only as qdisc_sleeping additional accessor qdisc_root_sleeping() is created. With feedback from Herbert Xu Signed-off-by: Jarek Poplawski Signed-off-by: David S. Miller --- include/net/sch_generic.h | 5 +++++ net/sched/sch_api.c | 4 ++++ net/sched/sch_cbq.c | 4 ++++ 3 files changed, 13 insertions(+) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 84d25f2e618..b1d2cfea89c 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -193,6 +193,11 @@ static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc) return qdisc->dev_queue->qdisc; } +static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc) +{ + return qdisc->dev_queue->qdisc_sleeping; +} + /* The qdisc root lock is a mechanism by which to top level * of a qdisc tree can be locked from any qdisc node in the * forest. This allows changing the configuration of some diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index ef0efeca635..45f442d7de4 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -444,6 +444,10 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) { ktime_t time; + if (test_bit(__QDISC_STATE_DEACTIVATED, + &qdisc_root_sleeping(wd->qdisc)->state)) + return; + wd->qdisc->flags |= TCQ_F_THROTTLED; time = ktime_set(0, 0); time = ktime_add_ns(time, PSCHED_US2NS(expires)); diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 47ef492c4ff..8fa90d68ec6 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -521,6 +521,10 @@ static void cbq_ovl_delay(struct cbq_class *cl) struct cbq_sched_data *q = qdisc_priv(cl->qdisc); psched_tdiff_t delay = cl->undertime - q->now; + if (test_bit(__QDISC_STATE_DEACTIVATED, + &qdisc_root_sleeping(cl->qdisc)->state)) + return; + if (!cl->delayed) { psched_time_t sched = q->now; ktime_t expires; -- cgit v1.2.3 From f6e0b239a2657ea8cb67f0d83d0bfdbfd19a481b Mon Sep 17 00:00:00 2001 From: Jarek Poplawski Date: Fri, 22 Aug 2008 03:24:05 -0700 Subject: pkt_sched: Fix qdisc list locking Since some qdiscs call qdisc_tree_decrease_qlen() (so qdisc_lookup()) without rtnl_lock(), adding and deleting from a qdisc list needs additional locking. This patch adds global spinlock qdisc_list_lock and wrapper functions for modifying the list. It is considered as a temporary solution until hfsc_dequeue(), netem_dequeue() and tbf_dequeue() (or qdisc_tree_decrease_qlen()) are redone. With feedback from Herbert Xu and David S. Miller. Signed-off-by: Jarek Poplawski Acked-by: Herbert Xu Signed-off-by: David S. Miller --- include/net/pkt_sched.h | 1 + net/sched/sch_api.c | 44 +++++++++++++++++++++++++++++++++++++++----- net/sched/sch_generic.c | 5 ++--- 3 files changed, 42 insertions(+), 8 deletions(-) diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 853fe83d9f3..b786a5b0925 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -78,6 +78,7 @@ extern struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, extern int register_qdisc(struct Qdisc_ops *qops); extern int unregister_qdisc(struct Qdisc_ops *qops); +extern void qdisc_list_del(struct Qdisc *q); extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 45f442d7de4..e7fb9e0d21b 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -199,19 +199,53 @@ struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) return NULL; } +/* + * This lock is needed until some qdiscs stop calling qdisc_tree_decrease_qlen() + * without rtnl_lock(); currently hfsc_dequeue(), netem_dequeue(), tbf_dequeue() + */ +static DEFINE_SPINLOCK(qdisc_list_lock); + +static void qdisc_list_add(struct Qdisc *q) +{ + if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { + spin_lock_bh(&qdisc_list_lock); + list_add_tail(&q->list, &qdisc_root_sleeping(q)->list); + spin_unlock_bh(&qdisc_list_lock); + } +} + +void qdisc_list_del(struct Qdisc *q) +{ + if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { + spin_lock_bh(&qdisc_list_lock); + list_del(&q->list); + spin_unlock_bh(&qdisc_list_lock); + } +} +EXPORT_SYMBOL(qdisc_list_del); + struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) { unsigned int i; + struct Qdisc *q; + + spin_lock_bh(&qdisc_list_lock); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - struct Qdisc *q, *txq_root = txq->qdisc_sleeping; + struct Qdisc *txq_root = txq->qdisc_sleeping; q = qdisc_match_from_root(txq_root, handle); if (q) - return q; + goto unlock; } - return qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); + + q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); + +unlock: + spin_unlock_bh(&qdisc_list_lock); + + return q; } static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) @@ -810,8 +844,8 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, goto err_out3; } } - if ((parent != TC_H_ROOT) && !(sch->flags & TCQ_F_INGRESS)) - list_add_tail(&sch->list, &dev_queue->qdisc_sleeping->list); + + qdisc_list_add(sch); return sch; } diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index c3ed4d44fc1..5f0ade7806a 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -526,10 +526,9 @@ void qdisc_destroy(struct Qdisc *qdisc) !atomic_dec_and_test(&qdisc->refcnt)) return; - if (qdisc->parent) - list_del(&qdisc->list); - #ifdef CONFIG_NET_SCHED + qdisc_list_del(qdisc); + qdisc_put_stab(qdisc->stab); #endif gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); -- cgit v1.2.3 From fdc0bde90a689b9145f2b6f271c03f4c99d09667 Mon Sep 17 00:00:00 2001 From: "Denis V. Lunev" Date: Sat, 23 Aug 2008 04:43:33 -0700 Subject: icmp: icmp_sk() should not use smp_processor_id() in preemptible code Pass namespace into icmp_xmit_lock, obtain socket inside and return it as a result for caller. Thanks Alexey Dobryan for this report: Steps to reproduce: CONFIG_PREEMPT=y CONFIG_DEBUG_PREEMPT=y tracepath BUG: using smp_processor_id() in preemptible [00000000] code: tracepath/3205 caller is icmp_sk+0x15/0x30 Pid: 3205, comm: tracepath Not tainted 2.6.27-rc4 #1 Call Trace: [] debug_smp_processor_id+0xe4/0xf0 [] icmp_sk+0x15/0x30 [] icmp_send+0x4b/0x3f0 [] ? trace_hardirqs_on_caller+0xd5/0x160 [] ? trace_hardirqs_on+0xd/0x10 [] ? local_bh_enable_ip+0x95/0x110 [] ? _spin_unlock_bh+0x39/0x40 [] ? mark_held_locks+0x4c/0x90 [] ? trace_hardirqs_on+0xd/0x10 [] ? trace_hardirqs_on_caller+0xd5/0x160 [] ip_fragment+0x8d4/0x900 [] ? ip_finish_output2+0x0/0x290 [] ? ip_finish_output+0x0/0x60 [] ? dst_output+0x0/0x10 [] ip_finish_output+0x4c/0x60 [] ip_output+0xa3/0xf0 [] ip_local_out+0x20/0x30 [] ip_push_pending_frames+0x27f/0x400 [] udp_push_pending_frames+0x233/0x3d0 [] udp_sendmsg+0x321/0x6f0 [] inet_sendmsg+0x45/0x80 [] sock_sendmsg+0xdf/0x110 [] ? autoremove_wake_function+0x0/0x40 [] ? validate_chain+0x415/0x1010 [] ? __do_fault+0x140/0x450 [] ? __lock_acquire+0x260/0x590 [] ? sockfd_lookup_light+0x45/0x80 [] sys_sendto+0xea/0x120 [] ? _spin_unlock_irqrestore+0x42/0x80 [] ? __up_read+0x4c/0xb0 [] ? up_read+0x26/0x30 [] system_call_fastpath+0x16/0x1b icmp6_sk() is similar. Signed-off-by: Denis V. Lunev Signed-off-by: David S. Miller --- net/ipv4/icmp.c | 22 ++++++++++++++-------- net/ipv6/icmp.c | 23 ++++++++++++----------- 2 files changed, 26 insertions(+), 19 deletions(-) diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 860558633b2..55c355e6323 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -204,18 +204,22 @@ static struct sock *icmp_sk(struct net *net) return net->ipv4.icmp_sk[smp_processor_id()]; } -static inline int icmp_xmit_lock(struct sock *sk) +static inline struct sock *icmp_xmit_lock(struct net *net) { + struct sock *sk; + local_bh_disable(); + sk = icmp_sk(net); + if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { /* This can happen if the output path signals a * dst_link_failure() for an outgoing ICMP packet. */ local_bh_enable(); - return 1; + return NULL; } - return 0; + return sk; } static inline void icmp_xmit_unlock(struct sock *sk) @@ -354,15 +358,17 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) struct ipcm_cookie ipc; struct rtable *rt = skb->rtable; struct net *net = dev_net(rt->u.dst.dev); - struct sock *sk = icmp_sk(net); - struct inet_sock *inet = inet_sk(sk); + struct sock *sk; + struct inet_sock *inet; __be32 daddr; if (ip_options_echo(&icmp_param->replyopts, skb)) return; - if (icmp_xmit_lock(sk)) + sk = icmp_xmit_lock(net); + if (sk == NULL) return; + inet = inet_sk(sk); icmp_param->data.icmph.checksum = 0; @@ -419,7 +425,6 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) if (!rt) goto out; net = dev_net(rt->u.dst.dev); - sk = icmp_sk(net); /* * Find the original header. It is expected to be valid, of course. @@ -483,7 +488,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) } } - if (icmp_xmit_lock(sk)) + sk = icmp_xmit_lock(net); + if (sk == NULL) return; /* diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index abedf95fdf2..b3157a0cc15 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -91,19 +91,22 @@ static struct inet6_protocol icmpv6_protocol = { .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, }; -static __inline__ int icmpv6_xmit_lock(struct sock *sk) +static __inline__ struct sock *icmpv6_xmit_lock(struct net *net) { + struct sock *sk; + local_bh_disable(); + sk = icmpv6_sk(net); if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { /* This can happen if the output path (f.e. SIT or * ip6ip6 tunnel) signals dst_link_failure() for an * outgoing ICMP6 packet. */ local_bh_enable(); - return 1; + return NULL; } - return 0; + return sk; } static __inline__ void icmpv6_xmit_unlock(struct sock *sk) @@ -392,11 +395,10 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, fl.fl_icmp_code = code; security_skb_classify_flow(skb, &fl); - sk = icmpv6_sk(net); - np = inet6_sk(sk); - - if (icmpv6_xmit_lock(sk)) + sk = icmpv6_xmit_lock(net); + if (sk == NULL) return; + np = inet6_sk(sk); if (!icmpv6_xrlim_allow(sk, type, &fl)) goto out; @@ -539,11 +541,10 @@ static void icmpv6_echo_reply(struct sk_buff *skb) fl.fl_icmp_type = ICMPV6_ECHO_REPLY; security_skb_classify_flow(skb, &fl); - sk = icmpv6_sk(net); - np = inet6_sk(sk); - - if (icmpv6_xmit_lock(sk)) + sk = icmpv6_xmit_lock(net); + if (sk == NULL) return; + np = inet6_sk(sk); if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) fl.oif = np->mcast_oif; -- cgit v1.2.3 From f410a1fba7afa79d2992620e874a343fdba28332 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Sat, 23 Aug 2008 05:16:46 -0700 Subject: ipv6: protocol for address routes This fixes a problem spotted with zebra, but not sure if it is necessary a kernel problem. With IPV6 when an address is added to an interface, Zebra creates a duplicate RIB entry, one as a connected route, and other as a kernel route. When an address is added to an interface the RTN_NEWADDR message causes Zebra to create a connected route. In IPV4 when an address is added to an interface a RTN_NEWROUTE message is set to user space with the protocol RTPROT_KERNEL. Zebra ignores these messages, because it already has the connected route. The problem is that route created in IPV6 has route protocol == RTPROT_BOOT. Was this a design decision or a bug? This fixes it. Same patch applies to both net-2.6 and stable. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e2d3b7580b7..7b6a584b62d 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1688,6 +1688,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev, .fc_dst_len = plen, .fc_flags = RTF_UP | flags, .fc_nlinfo.nl_net = dev_net(dev), + .fc_protocol = RTPROT_KERNEL, }; ipv6_addr_copy(&cfg.fc_dst, pfx); -- cgit v1.2.3