2 * Internet Control Message Protocol (ICMPv6)
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: icmp.c,v 1.38 2002/02/08 03:57:19 davem Exp $
10 * Based on net/ipv4/icmp.c
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
23 * Andi Kleen : exception handling
24 * Andi Kleen add rate limits. never reply to a icmp.
25 * add more length checks and other fixes.
26 * yoshfuji : ensure to sent parameter problem for
28 * YOSHIFUJI Hideaki @USAGI: added sysctl for icmp rate limit.
30 * YOSHIFUJI Hideaki @USAGI: Per-interface statistics support
31 * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/types.h>
37 #include <linux/socket.h>
39 #include <linux/kernel.h>
40 #include <linux/sockios.h>
41 #include <linux/net.h>
42 #include <linux/skbuff.h>
43 #include <linux/init.h>
44 #include <linux/netfilter.h>
47 #include <linux/sysctl.h>
50 #include <linux/inet.h>
51 #include <linux/netdevice.h>
52 #include <linux/icmpv6.h>
58 #include <net/ip6_checksum.h>
59 #include <net/protocol.h>
61 #include <net/rawv6.h>
62 #include <net/transp_v6.h>
63 #include <net/ip6_route.h>
64 #include <net/addrconf.h>
67 #include <asm/uaccess.h>
68 #include <asm/system.h>
70 DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly;
71 EXPORT_SYMBOL(icmpv6_statistics);
74 * The ICMP socket(s). This is the most convenient way to flow control
75 * our ICMP output as well as maintain a clean interface throughout
76 * all layers. All Socketless IP sends will soon be gone.
78 * On SMP we have one ICMP socket per-cpu.
80 static DEFINE_PER_CPU(struct socket *, __icmpv6_socket) = NULL;
81 #define icmpv6_socket __get_cpu_var(__icmpv6_socket)
83 static int icmpv6_rcv(struct sk_buff **pskb);
85 static struct inet6_protocol icmpv6_protocol = {
86 .handler = icmpv6_rcv,
87 .flags = INET6_PROTO_FINAL,
90 static __inline__ int icmpv6_xmit_lock(void)
94 if (unlikely(!spin_trylock(&icmpv6_socket->sk->sk_lock.slock))) {
95 /* This can happen if the output path (f.e. SIT or
96 * ip6ip6 tunnel) signals dst_link_failure() for an
97 * outgoing ICMP6 packet.
105 static __inline__ void icmpv6_xmit_unlock(void)
107 spin_unlock_bh(&icmpv6_socket->sk->sk_lock.slock);
111 * Slightly more convenient version of icmpv6_send.
113 void icmpv6_param_prob(struct sk_buff *skb, int code, int pos)
115 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
120 * Figure out, may we reply to this packet with icmp error.
122 * We do not reply, if:
123 * - it was icmp error message.
124 * - it is truncated, so that it is known, that protocol is ICMPV6
125 * (i.e. in the middle of some exthdr)
130 static int is_ineligible(struct sk_buff *skb)
132 int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
133 int len = skb->len - ptr;
134 __u8 nexthdr = ipv6_hdr(skb)->nexthdr;
139 ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr);
142 if (nexthdr == IPPROTO_ICMPV6) {
144 tp = skb_header_pointer(skb,
145 ptr+offsetof(struct icmp6hdr, icmp6_type),
146 sizeof(_type), &_type);
148 !(*tp & ICMPV6_INFOMSG_MASK))
154 static int sysctl_icmpv6_time __read_mostly = 1*HZ;
157 * Check the ICMP output rate limit
159 static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
162 struct dst_entry *dst;
165 /* Informational messages are not limited. */
166 if (type & ICMPV6_INFOMSG_MASK)
169 /* Do not limit pmtu discovery, it would break it. */
170 if (type == ICMPV6_PKT_TOOBIG)
174 * Look up the output route.
175 * XXX: perhaps the expire for routing entries cloned by
176 * this lookup should be more aggressive (not longer than timeout).
178 dst = ip6_route_output(sk, fl);
180 IP6_INC_STATS(ip6_dst_idev(dst),
181 IPSTATS_MIB_OUTNOROUTES);
182 } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
185 struct rt6_info *rt = (struct rt6_info *)dst;
186 int tmo = sysctl_icmpv6_time;
188 /* Give more bandwidth to wider prefixes. */
189 if (rt->rt6i_dst.plen < 128)
190 tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
192 res = xrlim_allow(dst, tmo);
199 * an inline helper for the "simple" if statement below
200 * checks if parameter problem report is caused by an
201 * unrecognized IPv6 option that has the Option Type
202 * highest-order two bits set to 10
205 static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset)
209 offset += skb_network_offset(skb);
210 op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
213 return (*op & 0xC0) == 0x80;
216 static int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct icmp6hdr *thdr, int len)
219 struct icmp6hdr *icmp6h;
222 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
225 icmp6h = (struct icmp6hdr*) skb->h.raw;
226 memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
227 icmp6h->icmp6_cksum = 0;
229 if (skb_queue_len(&sk->sk_write_queue) == 1) {
230 skb->csum = csum_partial((char *)icmp6h,
231 sizeof(struct icmp6hdr), skb->csum);
232 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
239 skb_queue_walk(&sk->sk_write_queue, skb) {
240 tmp_csum = csum_add(tmp_csum, skb->csum);
243 tmp_csum = csum_partial((char *)icmp6h,
244 sizeof(struct icmp6hdr), tmp_csum);
245 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
250 ip6_push_pending_frames(sk);
261 static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
263 struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
264 struct sk_buff *org_skb = msg->skb;
267 csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
269 skb->csum = csum_block_add(skb->csum, csum, odd);
270 if (!(msg->type & ICMPV6_INFOMSG_MASK))
271 nf_ct_attach(skb, org_skb);
275 #ifdef CONFIG_IPV6_MIP6
276 static void mip6_addr_swap(struct sk_buff *skb)
278 struct ipv6hdr *iph = ipv6_hdr(skb);
279 struct inet6_skb_parm *opt = IP6CB(skb);
280 struct ipv6_destopt_hao *hao;
285 off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
286 if (likely(off >= 0)) {
287 hao = (struct ipv6_destopt_hao *)
288 (skb_network_header(skb) + off);
289 ipv6_addr_copy(&tmp, &iph->saddr);
290 ipv6_addr_copy(&iph->saddr, &hao->addr);
291 ipv6_addr_copy(&hao->addr, &tmp);
296 static inline void mip6_addr_swap(struct sk_buff *skb) {}
300 * Send an ICMP message in response to a packet in error
302 void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
303 struct net_device *dev)
305 struct inet6_dev *idev = NULL;
306 struct ipv6hdr *hdr = ipv6_hdr(skb);
308 struct ipv6_pinfo *np;
309 struct in6_addr *saddr = NULL;
310 struct dst_entry *dst;
311 struct icmp6hdr tmp_hdr;
313 struct icmpv6_msg msg;
320 if ((u8*)hdr < skb->head || (u8*)(hdr+1) > skb->tail)
324 * Make sure we respect the rules
325 * i.e. RFC 1885 2.4(e)
326 * Rule (e.1) is enforced by not using icmpv6_send
327 * in any code that processes icmp errors.
329 addr_type = ipv6_addr_type(&hdr->daddr);
331 if (ipv6_chk_addr(&hdr->daddr, skb->dev, 0))
338 if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
339 if (type != ICMPV6_PKT_TOOBIG &&
340 !(type == ICMPV6_PARAMPROB &&
341 code == ICMPV6_UNK_OPTION &&
342 (opt_unrec(skb, info))))
348 addr_type = ipv6_addr_type(&hdr->saddr);
354 if (addr_type & IPV6_ADDR_LINKLOCAL)
355 iif = skb->dev->ifindex;
358 * Must not send error if the source does not uniquely
359 * identify a single node (RFC2463 Section 2.4).
360 * We check unspecified / multicast addresses here,
361 * and anycast addresses will be checked later.
363 if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
364 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: addr_any/mcast source\n");
369 * Never answer to a ICMP packet.
371 if (is_ineligible(skb)) {
372 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: no reply to icmp error\n");
378 memset(&fl, 0, sizeof(fl));
379 fl.proto = IPPROTO_ICMPV6;
380 ipv6_addr_copy(&fl.fl6_dst, &hdr->saddr);
382 ipv6_addr_copy(&fl.fl6_src, saddr);
384 fl.fl_icmp_type = type;
385 fl.fl_icmp_code = code;
386 security_skb_classify_flow(skb, &fl);
388 if (icmpv6_xmit_lock())
391 sk = icmpv6_socket->sk;
394 if (!icmpv6_xrlim_allow(sk, type, &fl))
397 tmp_hdr.icmp6_type = type;
398 tmp_hdr.icmp6_code = code;
399 tmp_hdr.icmp6_cksum = 0;
400 tmp_hdr.icmp6_pointer = htonl(info);
402 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
403 fl.oif = np->mcast_oif;
405 err = ip6_dst_lookup(sk, &dst, &fl);
410 * We won't send icmp if the destination is known
413 if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
414 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n");
415 goto out_dst_release;
418 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
421 if (ipv6_addr_is_multicast(&fl.fl6_dst))
422 hlimit = np->mcast_hops;
424 hlimit = np->hop_limit;
426 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
428 hlimit = ipv6_get_hoplimit(dst->dev);
435 msg.offset = skb_network_offset(skb);
438 len = skb->len - msg.offset;
439 len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
441 LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n");
442 goto out_dst_release;
445 idev = in6_dev_get(skb->dev);
447 err = ip6_append_data(sk, icmpv6_getfrag, &msg,
448 len + sizeof(struct icmp6hdr),
449 sizeof(struct icmp6hdr),
450 hlimit, tclass, NULL, &fl, (struct rt6_info*)dst,
453 ip6_flush_pending_frames(sk);
456 err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, len + sizeof(struct icmp6hdr));
458 if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
459 ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_OUTDESTUNREACHS, type - ICMPV6_DEST_UNREACH);
460 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
463 if (likely(idev != NULL))
468 icmpv6_xmit_unlock();
471 EXPORT_SYMBOL(icmpv6_send);
473 static void icmpv6_echo_reply(struct sk_buff *skb)
476 struct inet6_dev *idev;
477 struct ipv6_pinfo *np;
478 struct in6_addr *saddr = NULL;
479 struct icmp6hdr *icmph = (struct icmp6hdr *) skb->h.raw;
480 struct icmp6hdr tmp_hdr;
482 struct icmpv6_msg msg;
483 struct dst_entry *dst;
488 saddr = &ipv6_hdr(skb)->daddr;
490 if (!ipv6_unicast_destination(skb))
493 memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
494 tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
496 memset(&fl, 0, sizeof(fl));
497 fl.proto = IPPROTO_ICMPV6;
498 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
500 ipv6_addr_copy(&fl.fl6_src, saddr);
501 fl.oif = skb->dev->ifindex;
502 fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
503 security_skb_classify_flow(skb, &fl);
505 if (icmpv6_xmit_lock())
508 sk = icmpv6_socket->sk;
511 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
512 fl.oif = np->mcast_oif;
514 err = ip6_dst_lookup(sk, &dst, &fl);
517 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
520 if (ipv6_addr_is_multicast(&fl.fl6_dst))
521 hlimit = np->mcast_hops;
523 hlimit = np->hop_limit;
525 hlimit = dst_metric(dst, RTAX_HOPLIMIT);
527 hlimit = ipv6_get_hoplimit(dst->dev);
533 idev = in6_dev_get(skb->dev);
537 msg.type = ICMPV6_ECHO_REPLY;
539 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
540 sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl,
541 (struct rt6_info*)dst, MSG_DONTWAIT);
544 ip6_flush_pending_frames(sk);
547 err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
549 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTECHOREPLIES);
550 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS);
553 if (likely(idev != NULL))
557 icmpv6_xmit_unlock();
560 static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info)
562 struct in6_addr *saddr, *daddr;
563 struct inet6_protocol *ipprot;
569 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
572 nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
573 if (ipv6_ext_hdr(nexthdr)) {
574 /* now skip over extension headers */
575 inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
579 inner_offset = sizeof(struct ipv6hdr);
582 /* Checkin header including 8 bytes of inner protocol header. */
583 if (!pskb_may_pull(skb, inner_offset+8))
586 saddr = &ipv6_hdr(skb)->saddr;
587 daddr = &ipv6_hdr(skb)->daddr;
589 /* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
590 Without this we will not able f.e. to make source routed
592 Corresponding argument (opt) to notifiers is already added.
596 hash = nexthdr & (MAX_INET_PROTOS - 1);
599 ipprot = rcu_dereference(inet6_protos[hash]);
600 if (ipprot && ipprot->err_handler)
601 ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
604 read_lock(&raw_v6_lock);
605 if ((sk = sk_head(&raw_v6_htable[hash])) != NULL) {
606 while((sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr,
608 rawv6_err(sk, skb, NULL, type, code, inner_offset, info);
612 read_unlock(&raw_v6_lock);
616 * Handle icmp messages
619 static int icmpv6_rcv(struct sk_buff **pskb)
621 struct sk_buff *skb = *pskb;
622 struct net_device *dev = skb->dev;
623 struct inet6_dev *idev = __in6_dev_get(dev);
624 struct in6_addr *saddr, *daddr;
625 struct ipv6hdr *orig_hdr;
626 struct icmp6hdr *hdr;
629 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INMSGS);
631 saddr = &ipv6_hdr(skb)->saddr;
632 daddr = &ipv6_hdr(skb)->daddr;
634 /* Perform checksum. */
635 switch (skb->ip_summed) {
636 case CHECKSUM_COMPLETE:
637 if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
642 skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
644 if (__skb_checksum_complete(skb)) {
645 LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [" NIP6_FMT " > " NIP6_FMT "]\n",
646 NIP6(*saddr), NIP6(*daddr));
651 if (!pskb_pull(skb, sizeof(struct icmp6hdr)))
654 hdr = (struct icmp6hdr *) skb->h.raw;
656 type = hdr->icmp6_type;
658 if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB)
659 ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INDESTUNREACHS, type - ICMPV6_DEST_UNREACH);
660 else if (type >= ICMPV6_ECHO_REQUEST && type <= NDISC_REDIRECT)
661 ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INECHOS, type - ICMPV6_ECHO_REQUEST);
664 case ICMPV6_ECHO_REQUEST:
665 icmpv6_echo_reply(skb);
668 case ICMPV6_ECHO_REPLY:
669 /* we couldn't care less */
672 case ICMPV6_PKT_TOOBIG:
673 /* BUGGG_FUTURE: if packet contains rthdr, we cannot update
674 standard destination cache. Seems, only "advanced"
675 destination cache will allow to solve this problem
678 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
680 hdr = (struct icmp6hdr *) skb->h.raw;
681 orig_hdr = (struct ipv6hdr *) (hdr + 1);
682 rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev,
683 ntohl(hdr->icmp6_mtu));
686 * Drop through to notify
689 case ICMPV6_DEST_UNREACH:
690 case ICMPV6_TIME_EXCEED:
691 case ICMPV6_PARAMPROB:
692 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
695 case NDISC_ROUTER_SOLICITATION:
696 case NDISC_ROUTER_ADVERTISEMENT:
697 case NDISC_NEIGHBOUR_SOLICITATION:
698 case NDISC_NEIGHBOUR_ADVERTISEMENT:
703 case ICMPV6_MGM_QUERY:
704 igmp6_event_query(skb);
707 case ICMPV6_MGM_REPORT:
708 igmp6_event_report(skb);
711 case ICMPV6_MGM_REDUCTION:
712 case ICMPV6_NI_QUERY:
713 case ICMPV6_NI_REPLY:
714 case ICMPV6_MLD2_REPORT:
715 case ICMPV6_DHAAD_REQUEST:
716 case ICMPV6_DHAAD_REPLY:
717 case ICMPV6_MOBILE_PREFIX_SOL:
718 case ICMPV6_MOBILE_PREFIX_ADV:
722 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n");
725 if (type & ICMPV6_INFOMSG_MASK)
729 * error of unknown type.
730 * must pass to upper level
733 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
739 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS);
745 * Special lock-class for __icmpv6_socket:
747 static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
749 int __init icmpv6_init(struct net_proto_family *ops)
754 for_each_possible_cpu(i) {
755 err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
756 &per_cpu(__icmpv6_socket, i));
759 "Failed to initialize the ICMP6 control socket "
765 sk = per_cpu(__icmpv6_socket, i)->sk;
766 sk->sk_allocation = GFP_ATOMIC;
768 * Split off their lock-class, because sk->sk_dst_lock
769 * gets used from softirqs, which is safe for
770 * __icmpv6_socket (because those never get directly used
771 * via userspace syscalls), but unsafe for normal sockets.
773 lockdep_set_class(&sk->sk_dst_lock,
774 &icmpv6_socket_sk_dst_lock_key);
776 /* Enough space for 2 64K ICMP packets, including
777 * sk_buff struct overhead.
780 (2 * ((64 * 1024) + sizeof(struct sk_buff)));
782 sk->sk_prot->unhash(sk);
786 if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0) {
787 printk(KERN_ERR "Failed to register ICMP6 protocol\n");
795 for (j = 0; j < i; j++) {
796 if (!cpu_possible(j))
798 sock_release(per_cpu(__icmpv6_socket, j));
804 void icmpv6_cleanup(void)
808 for_each_possible_cpu(i) {
809 sock_release(per_cpu(__icmpv6_socket, i));
811 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
814 static const struct icmp6_err {
822 { /* ADM_PROHIBITED */
826 { /* Was NOT_NEIGHBOUR, now reserved */
840 int icmpv6_err_convert(int type, int code, int *err)
847 case ICMPV6_DEST_UNREACH:
849 if (code <= ICMPV6_PORT_UNREACH) {
850 *err = tab_unreach[code].err;
851 fatal = tab_unreach[code].fatal;
855 case ICMPV6_PKT_TOOBIG:
859 case ICMPV6_PARAMPROB:
864 case ICMPV6_TIME_EXCEED:
872 EXPORT_SYMBOL(icmpv6_err_convert);
875 ctl_table ipv6_icmp_table[] = {
877 .ctl_name = NET_IPV6_ICMP_RATELIMIT,
878 .procname = "ratelimit",
879 .data = &sysctl_icmpv6_time,
880 .maxlen = sizeof(int),
882 .proc_handler = &proc_dointvec