2 * Internet Control Message Protocol (ICMPv6)
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: icmp.c,v 1.38 2002/02/08 03:57:19 davem Exp $
10 * Based on net/ipv4/icmp.c
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
23 * Andi Kleen : exception handling
24 * Andi Kleen add rate limits. never reply to a icmp.
25 * add more length checks and other fixes.
26 * yoshfuji : ensure to sent parameter problem for
28 * YOSHIFUJI Hideaki @USAGI: added sysctl for icmp rate limit.
30 * YOSHIFUJI Hideaki @USAGI: Per-interface statistics support
31 * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/types.h>
37 #include <linux/socket.h>
39 #include <linux/kernel.h>
40 #include <linux/sockios.h>
41 #include <linux/net.h>
42 #include <linux/skbuff.h>
43 #include <linux/init.h>
44 #include <linux/netfilter.h>
47 #include <linux/sysctl.h>
50 #include <linux/inet.h>
51 #include <linux/netdevice.h>
52 #include <linux/icmpv6.h>
58 #include <net/ip6_checksum.h>
59 #include <net/protocol.h>
61 #include <net/rawv6.h>
62 #include <net/transp_v6.h>
63 #include <net/ip6_route.h>
64 #include <net/addrconf.h>
68 #include <asm/uaccess.h>
69 #include <asm/system.h>
71 DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly;
72 EXPORT_SYMBOL(icmpv6_statistics);
73 DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics) __read_mostly;
74 EXPORT_SYMBOL(icmpv6msg_statistics);
77 * The ICMP socket(s). This is the most convenient way to flow control
78 * our ICMP output as well as maintain a clean interface throughout
79 * all layers. All Socketless IP sends will soon be gone.
81 * On SMP we have one ICMP socket per-cpu.
83 static inline struct sock *icmpv6_sk(struct net *net)
85 return net->ipv6.icmp_sk[smp_processor_id()];
88 static int icmpv6_rcv(struct sk_buff *skb);
90 static struct inet6_protocol icmpv6_protocol = {
91 .handler = icmpv6_rcv,
92 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
95 static __inline__ int icmpv6_xmit_lock(struct sock *sk)
99 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
100 /* This can happen if the output path (f.e. SIT or
101 * ip6ip6 tunnel) signals dst_link_failure() for an
102 * outgoing ICMP6 packet.
110 static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
112 spin_unlock_bh(&sk->sk_lock.slock);
116 * Slightly more convenient version of icmpv6_send.
118 void icmpv6_param_prob(struct sk_buff *skb, int code, int pos)
120 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
125 * Figure out, may we reply to this packet with icmp error.
127 * We do not reply, if:
128 * - it was icmp error message.
129 * - it is truncated, so that it is known, that protocol is ICMPV6
130 * (i.e. in the middle of some exthdr)
135 static int is_ineligible(struct sk_buff *skb)
137 int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
138 int len = skb->len - ptr;
139 __u8 nexthdr = ipv6_hdr(skb)->nexthdr;
144 ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr);
147 if (nexthdr == IPPROTO_ICMPV6) {
149 tp = skb_header_pointer(skb,
150 ptr+offsetof(struct icmp6hdr, icmp6_type),
151 sizeof(_type), &_type);
153 !(*tp & ICMPV6_INFOMSG_MASK))
160 * Check the ICMP output rate limit
162 static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
165 struct dst_entry *dst;
166 struct net *net = sk->sk_net;
169 /* Informational messages are not limited. */
170 if (type & ICMPV6_INFOMSG_MASK)
173 /* Do not limit pmtu discovery, it would break it. */
174 if (type == ICMPV6_PKT_TOOBIG)
178 * Look up the output route.
179 * XXX: perhaps the expire for routing entries cloned by
180 * this lookup should be more aggressive (not longer than timeout).
182 dst = ip6_route_output(net, sk, fl);
184 IP6_INC_STATS(ip6_dst_idev(dst),
185 IPSTATS_MIB_OUTNOROUTES);
186 } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
189 struct rt6_info *rt = (struct rt6_info *)dst;
190 int tmo = net->ipv6.sysctl.icmpv6_time;
192 /* Give more bandwidth to wider prefixes. */
193 if (rt->rt6i_dst.plen < 128)
194 tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
196 res = xrlim_allow(dst, tmo);
203 * an inline helper for the "simple" if statement below
204 * checks if parameter problem report is caused by an
205 * unrecognized IPv6 option that has the Option Type
206 * highest-order two bits set to 10
209 static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset)
213 offset += skb_network_offset(skb);
214 op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
217 return (*op & 0xC0) == 0x80;
220 static int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct icmp6hdr *thdr, int len)
223 struct icmp6hdr *icmp6h;
226 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
229 icmp6h = icmp6_hdr(skb);
230 memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
231 icmp6h->icmp6_cksum = 0;
233 if (skb_queue_len(&sk->sk_write_queue) == 1) {
234 skb->csum = csum_partial((char *)icmp6h,
235 sizeof(struct icmp6hdr), skb->csum);
236 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
243 skb_queue_walk(&sk->sk_write_queue, skb) {
244 tmp_csum = csum_add(tmp_csum, skb->csum);
247 tmp_csum = csum_partial((char *)icmp6h,
248 sizeof(struct icmp6hdr), tmp_csum);
249 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
254 ip6_push_pending_frames(sk);
265 static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
267 struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
268 struct sk_buff *org_skb = msg->skb;
271 csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
273 skb->csum = csum_block_add(skb->csum, csum, odd);
274 if (!(msg->type & ICMPV6_INFOMSG_MASK))
275 nf_ct_attach(skb, org_skb);
279 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
280 static void mip6_addr_swap(struct sk_buff *skb)
282 struct ipv6hdr *iph = ipv6_hdr(skb);
283 struct inet6_skb_parm *opt = IP6CB(skb);
284 struct ipv6_destopt_hao *hao;
289 off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
290 if (likely(off >= 0)) {
291 hao = (struct ipv6_destopt_hao *)
292 (skb_network_header(skb) + off);
293 ipv6_addr_copy(&tmp, &iph->saddr);
294 ipv6_addr_copy(&iph->saddr, &hao->addr);
295 ipv6_addr_copy(&hao->addr, &tmp);
300 static inline void mip6_addr_swap(struct sk_buff *skb) {}
304 * Send an ICMP message in response to a packet in error
306 void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
307 struct net_device *dev)
309 struct net *net = skb->dev->nd_net;
310 struct inet6_dev *idev = NULL;
311 struct ipv6hdr *hdr = ipv6_hdr(skb);
313 struct ipv6_pinfo *np;
314 struct in6_addr *saddr = NULL;
315 struct dst_entry *dst;
316 struct dst_entry *dst2;
317 struct icmp6hdr tmp_hdr;
320 struct icmpv6_msg msg;
327 if ((u8 *)hdr < skb->head ||
328 (skb->network_header + sizeof(*hdr)) > skb->tail)
332 * Make sure we respect the rules
333 * i.e. RFC 1885 2.4(e)
334 * Rule (e.1) is enforced by not using icmpv6_send
335 * in any code that processes icmp errors.
337 addr_type = ipv6_addr_type(&hdr->daddr);
339 if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0))
346 if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
347 if (type != ICMPV6_PKT_TOOBIG &&
348 !(type == ICMPV6_PARAMPROB &&
349 code == ICMPV6_UNK_OPTION &&
350 (opt_unrec(skb, info))))
356 addr_type = ipv6_addr_type(&hdr->saddr);
362 if (addr_type & IPV6_ADDR_LINKLOCAL)
363 iif = skb->dev->ifindex;
366 * Must not send error if the source does not uniquely
367 * identify a single node (RFC2463 Section 2.4).
368 * We check unspecified / multicast addresses here,
369 * and anycast addresses will be checked later.
371 if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
372 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: addr_any/mcast source\n");
377 * Never answer to a ICMP packet.
379 if (is_ineligible(skb)) {
380 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: no reply to icmp error\n");
386 memset(&fl, 0, sizeof(fl));
387 fl.proto = IPPROTO_ICMPV6;
388 ipv6_addr_copy(&fl.fl6_dst, &hdr->saddr);
390 ipv6_addr_copy(&fl.fl6_src, saddr);
392 fl.fl_icmp_type = type;
393 fl.fl_icmp_code = code;
394 security_skb_classify_flow(skb, &fl);
399 if (icmpv6_xmit_lock(sk))
402 if (!icmpv6_xrlim_allow(sk, type, &fl))
405 tmp_hdr.icmp6_type = type;
406 tmp_hdr.icmp6_code = code;
407 tmp_hdr.icmp6_cksum = 0;
408 tmp_hdr.icmp6_pointer = htonl(info);
410 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
411 fl.oif = np->mcast_oif;
413 err = ip6_dst_lookup(sk, &dst, &fl);
418 * We won't send icmp if the destination is known
421 if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
422 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n");
423 goto out_dst_release;
426 /* No need to clone since we're just using its address. */
429 err = xfrm_lookup(&dst, &fl, sk, 0);
442 if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6))
445 if (ip6_dst_lookup(sk, &dst2, &fl))
448 err = xfrm_lookup(&dst2, &fl, sk, XFRM_LOOKUP_ICMP);
449 if (err == -ENOENT) {
462 if (ipv6_addr_is_multicast(&fl.fl6_dst))
463 hlimit = np->mcast_hops;
465 hlimit = np->hop_limit;
467 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
469 hlimit = ipv6_get_hoplimit(dst->dev);
476 msg.offset = skb_network_offset(skb);
479 len = skb->len - msg.offset;
480 len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
482 LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n");
483 goto out_dst_release;
486 idev = in6_dev_get(skb->dev);
488 err = ip6_append_data(sk, icmpv6_getfrag, &msg,
489 len + sizeof(struct icmp6hdr),
490 sizeof(struct icmp6hdr),
491 hlimit, tclass, NULL, &fl, (struct rt6_info*)dst,
494 ip6_flush_pending_frames(sk);
497 err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, len + sizeof(struct icmp6hdr));
500 if (likely(idev != NULL))
505 icmpv6_xmit_unlock(sk);
508 EXPORT_SYMBOL(icmpv6_send);
510 static void icmpv6_echo_reply(struct sk_buff *skb)
512 struct net *net = skb->dev->nd_net;
514 struct inet6_dev *idev;
515 struct ipv6_pinfo *np;
516 struct in6_addr *saddr = NULL;
517 struct icmp6hdr *icmph = icmp6_hdr(skb);
518 struct icmp6hdr tmp_hdr;
520 struct icmpv6_msg msg;
521 struct dst_entry *dst;
526 saddr = &ipv6_hdr(skb)->daddr;
528 if (!ipv6_unicast_destination(skb))
531 memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
532 tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
534 memset(&fl, 0, sizeof(fl));
535 fl.proto = IPPROTO_ICMPV6;
536 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
538 ipv6_addr_copy(&fl.fl6_src, saddr);
539 fl.oif = skb->dev->ifindex;
540 fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
541 security_skb_classify_flow(skb, &fl);
546 if (icmpv6_xmit_lock(sk))
549 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
550 fl.oif = np->mcast_oif;
552 err = ip6_dst_lookup(sk, &dst, &fl);
555 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
558 if (ipv6_addr_is_multicast(&fl.fl6_dst))
559 hlimit = np->mcast_hops;
561 hlimit = np->hop_limit;
563 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
565 hlimit = ipv6_get_hoplimit(dst->dev);
571 idev = in6_dev_get(skb->dev);
575 msg.type = ICMPV6_ECHO_REPLY;
577 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
578 sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl,
579 (struct rt6_info*)dst, MSG_DONTWAIT);
582 ip6_flush_pending_frames(sk);
585 err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
588 if (likely(idev != NULL))
592 icmpv6_xmit_unlock(sk);
595 static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info)
597 struct inet6_protocol *ipprot;
602 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
605 nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
606 if (ipv6_ext_hdr(nexthdr)) {
607 /* now skip over extension headers */
608 inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
612 inner_offset = sizeof(struct ipv6hdr);
615 /* Checkin header including 8 bytes of inner protocol header. */
616 if (!pskb_may_pull(skb, inner_offset+8))
619 /* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
620 Without this we will not able f.e. to make source routed
622 Corresponding argument (opt) to notifiers is already added.
626 hash = nexthdr & (MAX_INET_PROTOS - 1);
629 ipprot = rcu_dereference(inet6_protos[hash]);
630 if (ipprot && ipprot->err_handler)
631 ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
634 raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info);
638 * Handle icmp messages
641 static int icmpv6_rcv(struct sk_buff *skb)
643 struct net_device *dev = skb->dev;
644 struct inet6_dev *idev = __in6_dev_get(dev);
645 struct in6_addr *saddr, *daddr;
646 struct ipv6hdr *orig_hdr;
647 struct icmp6hdr *hdr;
650 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
653 if (!(skb->sp && skb->sp->xvec[skb->sp->len - 1]->props.flags &
657 if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(*orig_hdr)))
660 nh = skb_network_offset(skb);
661 skb_set_network_header(skb, sizeof(*hdr));
663 if (!xfrm6_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
666 skb_set_network_header(skb, nh);
669 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INMSGS);
671 saddr = &ipv6_hdr(skb)->saddr;
672 daddr = &ipv6_hdr(skb)->daddr;
674 /* Perform checksum. */
675 switch (skb->ip_summed) {
676 case CHECKSUM_COMPLETE:
677 if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
682 skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
684 if (__skb_checksum_complete(skb)) {
685 LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [" NIP6_FMT " > " NIP6_FMT "]\n",
686 NIP6(*saddr), NIP6(*daddr));
691 if (!pskb_pull(skb, sizeof(*hdr)))
694 hdr = icmp6_hdr(skb);
696 type = hdr->icmp6_type;
698 ICMP6MSGIN_INC_STATS_BH(idev, type);
701 case ICMPV6_ECHO_REQUEST:
702 icmpv6_echo_reply(skb);
705 case ICMPV6_ECHO_REPLY:
706 /* we couldn't care less */
709 case ICMPV6_PKT_TOOBIG:
710 /* BUGGG_FUTURE: if packet contains rthdr, we cannot update
711 standard destination cache. Seems, only "advanced"
712 destination cache will allow to solve this problem
715 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
717 hdr = icmp6_hdr(skb);
718 orig_hdr = (struct ipv6hdr *) (hdr + 1);
719 rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev,
720 ntohl(hdr->icmp6_mtu));
723 * Drop through to notify
726 case ICMPV6_DEST_UNREACH:
727 case ICMPV6_TIME_EXCEED:
728 case ICMPV6_PARAMPROB:
729 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
732 case NDISC_ROUTER_SOLICITATION:
733 case NDISC_ROUTER_ADVERTISEMENT:
734 case NDISC_NEIGHBOUR_SOLICITATION:
735 case NDISC_NEIGHBOUR_ADVERTISEMENT:
740 case ICMPV6_MGM_QUERY:
741 igmp6_event_query(skb);
744 case ICMPV6_MGM_REPORT:
745 igmp6_event_report(skb);
748 case ICMPV6_MGM_REDUCTION:
749 case ICMPV6_NI_QUERY:
750 case ICMPV6_NI_REPLY:
751 case ICMPV6_MLD2_REPORT:
752 case ICMPV6_DHAAD_REQUEST:
753 case ICMPV6_DHAAD_REPLY:
754 case ICMPV6_MOBILE_PREFIX_SOL:
755 case ICMPV6_MOBILE_PREFIX_ADV:
759 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n");
762 if (type & ICMPV6_INFOMSG_MASK)
766 * error of unknown type.
767 * must pass to upper level
770 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
777 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS);
783 void icmpv6_flow_init(struct sock *sk, struct flowi *fl,
785 const struct in6_addr *saddr,
786 const struct in6_addr *daddr,
789 memset(fl, 0, sizeof(*fl));
790 ipv6_addr_copy(&fl->fl6_src, saddr);
791 ipv6_addr_copy(&fl->fl6_dst, daddr);
792 fl->proto = IPPROTO_ICMPV6;
793 fl->fl_icmp_type = type;
794 fl->fl_icmp_code = 0;
796 security_sk_classify_flow(sk, fl);
800 * Special lock-class for __icmpv6_sk:
802 static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
804 static int __net_init icmpv6_sk_init(struct net *net)
810 kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
811 if (net->ipv6.icmp_sk == NULL)
814 for_each_possible_cpu(i) {
816 err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
820 "Failed to initialize the ICMP6 control socket "
826 net->ipv6.icmp_sk[i] = sk = sock->sk;
827 sk_change_net(sk, net);
829 sk->sk_allocation = GFP_ATOMIC;
831 * Split off their lock-class, because sk->sk_dst_lock
832 * gets used from softirqs, which is safe for
833 * __icmpv6_sk (because those never get directly used
834 * via userspace syscalls), but unsafe for normal sockets.
836 lockdep_set_class(&sk->sk_dst_lock,
837 &icmpv6_socket_sk_dst_lock_key);
839 /* Enough space for 2 64K ICMP packets, including
840 * sk_buff struct overhead.
843 (2 * ((64 * 1024) + sizeof(struct sk_buff)));
845 sk->sk_prot->unhash(sk);
850 for (j = 0; j < i; j++)
851 sk_release_kernel(net->ipv6.icmp_sk[j]);
852 kfree(net->ipv6.icmp_sk);
856 static void __net_exit icmpv6_sk_exit(struct net *net)
860 for_each_possible_cpu(i) {
861 sk_release_kernel(net->ipv6.icmp_sk[i]);
863 kfree(net->ipv6.icmp_sk);
866 static struct pernet_operations icmpv6_sk_ops = {
867 .init = icmpv6_sk_init,
868 .exit = icmpv6_sk_exit,
871 int __init icmpv6_init(void)
875 err = register_pernet_subsys(&icmpv6_sk_ops);
880 if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0)
885 printk(KERN_ERR "Failed to register ICMP6 protocol\n");
886 unregister_pernet_subsys(&icmpv6_sk_ops);
890 void icmpv6_cleanup(void)
892 unregister_pernet_subsys(&icmpv6_sk_ops);
893 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
897 static const struct icmp6_err {
905 { /* ADM_PROHIBITED */
909 { /* Was NOT_NEIGHBOUR, now reserved */
923 int icmpv6_err_convert(int type, int code, int *err)
930 case ICMPV6_DEST_UNREACH:
932 if (code <= ICMPV6_PORT_UNREACH) {
933 *err = tab_unreach[code].err;
934 fatal = tab_unreach[code].fatal;
938 case ICMPV6_PKT_TOOBIG:
942 case ICMPV6_PARAMPROB:
947 case ICMPV6_TIME_EXCEED:
955 EXPORT_SYMBOL(icmpv6_err_convert);
958 ctl_table ipv6_icmp_table_template[] = {
960 .ctl_name = NET_IPV6_ICMP_RATELIMIT,
961 .procname = "ratelimit",
962 .data = &init_net.ipv6.sysctl.icmpv6_time,
963 .maxlen = sizeof(int),
965 .proc_handler = &proc_dointvec
970 struct ctl_table *ipv6_icmp_sysctl_init(struct net *net)
972 struct ctl_table *table;
974 table = kmemdup(ipv6_icmp_table_template,
975 sizeof(ipv6_icmp_table_template),
979 table[0].data = &net->ipv6.sysctl.icmpv6_time;