3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/jiffies.h>
36 #include <linux/in6.h>
37 #include <linux/netdevice.h>
38 #include <linux/init.h>
39 #include <linux/jhash.h>
40 #include <linux/ipsec.h>
41 #include <linux/times.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
65 #include <asm/uaccess.h>
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
73 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
75 static void tcp_v6_send_check(struct sock *sk, int len,
78 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
80 static struct inet_connection_sock_af_ops ipv6_mapped;
81 static struct inet_connection_sock_af_ops ipv6_specific;
82 #ifdef CONFIG_TCP_MD5SIG
83 static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
87 static void tcp_v6_hash(struct sock *sk)
89 if (sk->sk_state != TCP_CLOSE) {
90 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
100 static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len,
101 struct in6_addr *saddr,
102 struct in6_addr *daddr,
105 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
108 static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
110 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
111 ipv6_hdr(skb)->saddr.s6_addr32,
113 tcp_hdr(skb)->source);
116 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
119 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
120 struct inet_sock *inet = inet_sk(sk);
121 struct inet_connection_sock *icsk = inet_csk(sk);
122 struct ipv6_pinfo *np = inet6_sk(sk);
123 struct tcp_sock *tp = tcp_sk(sk);
124 struct in6_addr *saddr = NULL, *final_p = NULL, final;
126 struct dst_entry *dst;
130 if (addr_len < SIN6_LEN_RFC2133)
133 if (usin->sin6_family != AF_INET6)
134 return(-EAFNOSUPPORT);
136 memset(&fl, 0, sizeof(fl));
139 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
140 IP6_ECN_flow_init(fl.fl6_flowlabel);
141 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
142 struct ip6_flowlabel *flowlabel;
143 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
144 if (flowlabel == NULL)
146 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
147 fl6_sock_release(flowlabel);
152 * connect() to INADDR_ANY means loopback (BSD'ism).
155 if(ipv6_addr_any(&usin->sin6_addr))
156 usin->sin6_addr.s6_addr[15] = 0x1;
158 addr_type = ipv6_addr_type(&usin->sin6_addr);
160 if(addr_type & IPV6_ADDR_MULTICAST)
163 if (addr_type&IPV6_ADDR_LINKLOCAL) {
164 if (addr_len >= sizeof(struct sockaddr_in6) &&
165 usin->sin6_scope_id) {
166 /* If interface is set while binding, indices
169 if (sk->sk_bound_dev_if &&
170 sk->sk_bound_dev_if != usin->sin6_scope_id)
173 sk->sk_bound_dev_if = usin->sin6_scope_id;
176 /* Connect to link-local address requires an interface */
177 if (!sk->sk_bound_dev_if)
181 if (tp->rx_opt.ts_recent_stamp &&
182 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
183 tp->rx_opt.ts_recent = 0;
184 tp->rx_opt.ts_recent_stamp = 0;
188 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
189 np->flow_label = fl.fl6_flowlabel;
195 if (addr_type == IPV6_ADDR_MAPPED) {
196 u32 exthdrlen = icsk->icsk_ext_hdr_len;
197 struct sockaddr_in sin;
199 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
201 if (__ipv6_only_sock(sk))
204 sin.sin_family = AF_INET;
205 sin.sin_port = usin->sin6_port;
206 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
208 icsk->icsk_af_ops = &ipv6_mapped;
209 sk->sk_backlog_rcv = tcp_v4_do_rcv;
210 #ifdef CONFIG_TCP_MD5SIG
211 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
214 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
217 icsk->icsk_ext_hdr_len = exthdrlen;
218 icsk->icsk_af_ops = &ipv6_specific;
219 sk->sk_backlog_rcv = tcp_v6_do_rcv;
220 #ifdef CONFIG_TCP_MD5SIG
221 tp->af_specific = &tcp_sock_ipv6_specific;
225 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
227 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
234 if (!ipv6_addr_any(&np->rcv_saddr))
235 saddr = &np->rcv_saddr;
237 fl.proto = IPPROTO_TCP;
238 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
239 ipv6_addr_copy(&fl.fl6_src,
240 (saddr ? saddr : &np->saddr));
241 fl.oif = sk->sk_bound_dev_if;
242 fl.fl_ip_dport = usin->sin6_port;
243 fl.fl_ip_sport = inet->sport;
245 if (np->opt && np->opt->srcrt) {
246 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
247 ipv6_addr_copy(&final, &fl.fl6_dst);
248 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
252 security_sk_classify_flow(sk, &fl);
254 err = ip6_dst_lookup(sk, &dst, &fl);
258 ipv6_addr_copy(&fl.fl6_dst, final_p);
260 if ((err = __xfrm_lookup(&dst, &fl, sk, XFRM_LOOKUP_WAIT)) < 0) {
262 err = ip6_dst_blackhole(sk, &dst, &fl);
269 ipv6_addr_copy(&np->rcv_saddr, saddr);
272 /* set the source address */
273 ipv6_addr_copy(&np->saddr, saddr);
274 inet->rcv_saddr = LOOPBACK4_IPV6;
276 sk->sk_gso_type = SKB_GSO_TCPV6;
277 __ip6_dst_store(sk, dst, NULL, NULL);
279 icsk->icsk_ext_hdr_len = 0;
281 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
284 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
286 inet->dport = usin->sin6_port;
288 tcp_set_state(sk, TCP_SYN_SENT);
289 err = inet6_hash_connect(&tcp_death_row, sk);
294 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
299 err = tcp_connect(sk);
306 tcp_set_state(sk, TCP_CLOSE);
310 sk->sk_route_caps = 0;
314 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
315 int type, int code, int offset, __be32 info)
317 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
318 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
319 struct ipv6_pinfo *np;
325 sk = inet6_lookup(dev_net(skb->dev), &tcp_hashinfo, &hdr->daddr,
326 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
329 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
333 if (sk->sk_state == TCP_TIME_WAIT) {
334 inet_twsk_put(inet_twsk(sk));
339 if (sock_owned_by_user(sk))
340 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
342 if (sk->sk_state == TCP_CLOSE)
346 seq = ntohl(th->seq);
347 if (sk->sk_state != TCP_LISTEN &&
348 !between(seq, tp->snd_una, tp->snd_nxt)) {
349 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
355 if (type == ICMPV6_PKT_TOOBIG) {
356 struct dst_entry *dst = NULL;
358 if (sock_owned_by_user(sk))
360 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
363 /* icmp should have updated the destination cache entry */
364 dst = __sk_dst_check(sk, np->dst_cookie);
367 struct inet_sock *inet = inet_sk(sk);
370 /* BUGGG_FUTURE: Again, it is not clear how
371 to handle rthdr case. Ignore this complexity
374 memset(&fl, 0, sizeof(fl));
375 fl.proto = IPPROTO_TCP;
376 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
377 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
378 fl.oif = sk->sk_bound_dev_if;
379 fl.fl_ip_dport = inet->dport;
380 fl.fl_ip_sport = inet->sport;
381 security_skb_classify_flow(skb, &fl);
383 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
384 sk->sk_err_soft = -err;
388 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
389 sk->sk_err_soft = -err;
396 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
397 tcp_sync_mss(sk, dst_mtu(dst));
398 tcp_simple_retransmit(sk);
399 } /* else let the usual retransmit timer handle it */
404 icmpv6_err_convert(type, code, &err);
406 /* Might be for an request_sock */
407 switch (sk->sk_state) {
408 struct request_sock *req, **prev;
410 if (sock_owned_by_user(sk))
413 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
414 &hdr->saddr, inet6_iif(skb));
418 /* ICMPs are not backlogged, hence we cannot get
419 * an established socket here.
421 BUG_TRAP(req->sk == NULL);
423 if (seq != tcp_rsk(req)->snt_isn) {
424 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
428 inet_csk_reqsk_queue_drop(sk, req, prev);
432 case TCP_SYN_RECV: /* Cannot happen.
433 It can, it SYNs are crossed. --ANK */
434 if (!sock_owned_by_user(sk)) {
436 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
440 sk->sk_err_soft = err;
444 if (!sock_owned_by_user(sk) && np->recverr) {
446 sk->sk_error_report(sk);
448 sk->sk_err_soft = err;
456 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
458 struct inet6_request_sock *treq = inet6_rsk(req);
459 struct ipv6_pinfo *np = inet6_sk(sk);
460 struct sk_buff * skb;
461 struct ipv6_txoptions *opt = NULL;
462 struct in6_addr * final_p = NULL, final;
464 struct dst_entry *dst;
467 memset(&fl, 0, sizeof(fl));
468 fl.proto = IPPROTO_TCP;
469 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
470 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
471 fl.fl6_flowlabel = 0;
473 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
474 fl.fl_ip_sport = inet_sk(sk)->sport;
475 security_req_classify_flow(req, &fl);
478 if (opt && opt->srcrt) {
479 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
480 ipv6_addr_copy(&final, &fl.fl6_dst);
481 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
485 err = ip6_dst_lookup(sk, &dst, &fl);
489 ipv6_addr_copy(&fl.fl6_dst, final_p);
490 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
493 skb = tcp_make_synack(sk, dst, req);
495 struct tcphdr *th = tcp_hdr(skb);
497 th->check = tcp_v6_check(th, skb->len,
498 &treq->loc_addr, &treq->rmt_addr,
499 csum_partial((char *)th, skb->len, skb->csum));
501 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
502 err = ip6_xmit(sk, skb, &fl, opt, 0);
503 err = net_xmit_eval(err);
507 if (opt && opt != np->opt)
508 sock_kfree_s(sk, opt, opt->tot_len);
513 static inline void syn_flood_warning(struct sk_buff *skb)
515 #ifdef CONFIG_SYN_COOKIES
516 if (sysctl_tcp_syncookies)
518 "TCPv6: Possible SYN flooding on port %d. "
519 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
523 "TCPv6: Possible SYN flooding on port %d. "
524 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
527 static void tcp_v6_reqsk_destructor(struct request_sock *req)
529 if (inet6_rsk(req)->pktopts)
530 kfree_skb(inet6_rsk(req)->pktopts);
533 #ifdef CONFIG_TCP_MD5SIG
534 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
535 struct in6_addr *addr)
537 struct tcp_sock *tp = tcp_sk(sk);
542 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
545 for (i = 0; i < tp->md5sig_info->entries6; i++) {
546 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
547 return &tp->md5sig_info->keys6[i].base;
552 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
553 struct sock *addr_sk)
555 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
558 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
559 struct request_sock *req)
561 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
564 static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
565 char *newkey, u8 newkeylen)
567 /* Add key to the list */
568 struct tcp_md5sig_key *key;
569 struct tcp_sock *tp = tcp_sk(sk);
570 struct tcp6_md5sig_key *keys;
572 key = tcp_v6_md5_do_lookup(sk, peer);
574 /* modify existing entry - just update that one */
577 key->keylen = newkeylen;
579 /* reallocate new list if current one is full. */
580 if (!tp->md5sig_info) {
581 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
582 if (!tp->md5sig_info) {
586 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
588 if (tcp_alloc_md5sig_pool() == NULL) {
592 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
593 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
594 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
597 tcp_free_md5sig_pool();
602 if (tp->md5sig_info->entries6)
603 memmove(keys, tp->md5sig_info->keys6,
604 (sizeof (tp->md5sig_info->keys6[0]) *
605 tp->md5sig_info->entries6));
607 kfree(tp->md5sig_info->keys6);
608 tp->md5sig_info->keys6 = keys;
609 tp->md5sig_info->alloced6++;
612 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
614 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
615 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
617 tp->md5sig_info->entries6++;
622 static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
623 u8 *newkey, __u8 newkeylen)
625 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
629 static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
631 struct tcp_sock *tp = tcp_sk(sk);
634 for (i = 0; i < tp->md5sig_info->entries6; i++) {
635 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
637 kfree(tp->md5sig_info->keys6[i].base.key);
638 tp->md5sig_info->entries6--;
640 if (tp->md5sig_info->entries6 == 0) {
641 kfree(tp->md5sig_info->keys6);
642 tp->md5sig_info->keys6 = NULL;
643 tp->md5sig_info->alloced6 = 0;
645 /* shrink the database */
646 if (tp->md5sig_info->entries6 != i)
647 memmove(&tp->md5sig_info->keys6[i],
648 &tp->md5sig_info->keys6[i+1],
649 (tp->md5sig_info->entries6 - i)
650 * sizeof (tp->md5sig_info->keys6[0]));
652 tcp_free_md5sig_pool();
659 static void tcp_v6_clear_md5_list (struct sock *sk)
661 struct tcp_sock *tp = tcp_sk(sk);
664 if (tp->md5sig_info->entries6) {
665 for (i = 0; i < tp->md5sig_info->entries6; i++)
666 kfree(tp->md5sig_info->keys6[i].base.key);
667 tp->md5sig_info->entries6 = 0;
668 tcp_free_md5sig_pool();
671 kfree(tp->md5sig_info->keys6);
672 tp->md5sig_info->keys6 = NULL;
673 tp->md5sig_info->alloced6 = 0;
675 if (tp->md5sig_info->entries4) {
676 for (i = 0; i < tp->md5sig_info->entries4; i++)
677 kfree(tp->md5sig_info->keys4[i].base.key);
678 tp->md5sig_info->entries4 = 0;
679 tcp_free_md5sig_pool();
682 kfree(tp->md5sig_info->keys4);
683 tp->md5sig_info->keys4 = NULL;
684 tp->md5sig_info->alloced4 = 0;
687 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
690 struct tcp_md5sig cmd;
691 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
694 if (optlen < sizeof(cmd))
697 if (copy_from_user(&cmd, optval, sizeof(cmd)))
700 if (sin6->sin6_family != AF_INET6)
703 if (!cmd.tcpm_keylen) {
704 if (!tcp_sk(sk)->md5sig_info)
706 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
707 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
708 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
711 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
714 if (!tcp_sk(sk)->md5sig_info) {
715 struct tcp_sock *tp = tcp_sk(sk);
716 struct tcp_md5sig_info *p;
718 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
723 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
726 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
729 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
730 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
731 newkey, cmd.tcpm_keylen);
733 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
736 static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
737 struct in6_addr *saddr,
738 struct in6_addr *daddr,
739 struct tcphdr *th, unsigned int tcplen)
741 struct tcp_md5sig_pool *hp;
742 struct tcp6_pseudohdr *bp;
745 hp = tcp_get_md5sig_pool();
747 printk(KERN_WARNING "%s(): hash pool not found...\n", __func__);
748 goto clear_hash_noput;
751 bp = &hp->md5_blk.ip6;
753 /* 1. TCP pseudo-header (RFC2460) */
754 ipv6_addr_copy(&bp->saddr, saddr);
755 ipv6_addr_copy(&bp->daddr, daddr);
756 bp->len = htonl(tcplen);
757 bp->protocol = htonl(IPPROTO_TCP);
759 err = tcp_calc_md5_hash(md5_hash, key, sizeof(*bp),
765 /* Free up the crypto pool */
766 tcp_put_md5sig_pool();
770 tcp_put_md5sig_pool();
772 memset(md5_hash, 0, 16);
776 static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
778 struct dst_entry *dst,
779 struct request_sock *req,
780 struct tcphdr *th, unsigned int tcplen)
782 struct in6_addr *saddr, *daddr;
785 saddr = &inet6_sk(sk)->saddr;
786 daddr = &inet6_sk(sk)->daddr;
788 saddr = &inet6_rsk(req)->loc_addr;
789 daddr = &inet6_rsk(req)->rmt_addr;
791 return tcp_v6_do_calc_md5_hash(md5_hash, key,
796 static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
798 __u8 *hash_location = NULL;
799 struct tcp_md5sig_key *hash_expected;
800 struct ipv6hdr *ip6h = ipv6_hdr(skb);
801 struct tcphdr *th = tcp_hdr(skb);
805 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
806 hash_location = tcp_parse_md5sig_option(th);
808 /* do we have a hash as expected? */
809 if (!hash_expected) {
812 if (net_ratelimit()) {
813 printk(KERN_INFO "MD5 Hash NOT expected but found "
814 "(" NIP6_FMT ", %u)->"
815 "(" NIP6_FMT ", %u)\n",
816 NIP6(ip6h->saddr), ntohs(th->source),
817 NIP6(ip6h->daddr), ntohs(th->dest));
822 if (!hash_location) {
823 if (net_ratelimit()) {
824 printk(KERN_INFO "MD5 Hash expected but NOT found "
825 "(" NIP6_FMT ", %u)->"
826 "(" NIP6_FMT ", %u)\n",
827 NIP6(ip6h->saddr), ntohs(th->source),
828 NIP6(ip6h->daddr), ntohs(th->dest));
833 /* check the signature */
834 genhash = tcp_v6_do_calc_md5_hash(newhash,
836 &ip6h->saddr, &ip6h->daddr,
838 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
839 if (net_ratelimit()) {
840 printk(KERN_INFO "MD5 Hash %s for "
841 "(" NIP6_FMT ", %u)->"
842 "(" NIP6_FMT ", %u)\n",
843 genhash ? "failed" : "mismatch",
844 NIP6(ip6h->saddr), ntohs(th->source),
845 NIP6(ip6h->daddr), ntohs(th->dest));
853 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
855 .obj_size = sizeof(struct tcp6_request_sock),
856 .rtx_syn_ack = tcp_v6_send_synack,
857 .send_ack = tcp_v6_reqsk_send_ack,
858 .destructor = tcp_v6_reqsk_destructor,
859 .send_reset = tcp_v6_send_reset
862 #ifdef CONFIG_TCP_MD5SIG
863 static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
864 .md5_lookup = tcp_v6_reqsk_md5_lookup,
868 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
869 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
870 .twsk_unique = tcp_twsk_unique,
871 .twsk_destructor= tcp_twsk_destructor,
874 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
876 struct ipv6_pinfo *np = inet6_sk(sk);
877 struct tcphdr *th = tcp_hdr(skb);
879 if (skb->ip_summed == CHECKSUM_PARTIAL) {
880 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
881 skb->csum_start = skb_transport_header(skb) - skb->head;
882 skb->csum_offset = offsetof(struct tcphdr, check);
884 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
885 csum_partial((char *)th, th->doff<<2,
890 static int tcp_v6_gso_send_check(struct sk_buff *skb)
892 struct ipv6hdr *ipv6h;
895 if (!pskb_may_pull(skb, sizeof(*th)))
898 ipv6h = ipv6_hdr(skb);
902 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
904 skb->csum_start = skb_transport_header(skb) - skb->head;
905 skb->csum_offset = offsetof(struct tcphdr, check);
906 skb->ip_summed = CHECKSUM_PARTIAL;
910 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
912 struct tcphdr *th = tcp_hdr(skb), *t1;
913 struct sk_buff *buff;
915 struct net *net = dev_net(skb->dst->dev);
916 struct sock *ctl_sk = net->ipv6.tcp_sk;
917 unsigned int tot_len = sizeof(*th);
918 #ifdef CONFIG_TCP_MD5SIG
919 struct tcp_md5sig_key *key;
925 if (!ipv6_unicast_destination(skb))
928 #ifdef CONFIG_TCP_MD5SIG
930 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
935 tot_len += TCPOLEN_MD5SIG_ALIGNED;
939 * We need to grab some memory, and put together an RST,
940 * and then put it into the queue to be sent.
943 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
948 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
950 t1 = (struct tcphdr *) skb_push(buff, tot_len);
952 /* Swap the send and the receive. */
953 memset(t1, 0, sizeof(*t1));
954 t1->dest = th->source;
955 t1->source = th->dest;
956 t1->doff = tot_len / 4;
960 t1->seq = th->ack_seq;
963 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
964 + skb->len - (th->doff<<2));
967 #ifdef CONFIG_TCP_MD5SIG
969 __be32 *opt = (__be32*)(t1 + 1);
970 opt[0] = htonl((TCPOPT_NOP << 24) |
972 (TCPOPT_MD5SIG << 8) |
974 tcp_v6_do_calc_md5_hash((__u8 *)&opt[1], key,
975 &ipv6_hdr(skb)->daddr,
976 &ipv6_hdr(skb)->saddr,
981 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
983 memset(&fl, 0, sizeof(fl));
984 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
985 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
987 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
988 sizeof(*t1), IPPROTO_TCP,
991 fl.proto = IPPROTO_TCP;
992 fl.oif = inet6_iif(skb);
993 fl.fl_ip_dport = t1->dest;
994 fl.fl_ip_sport = t1->source;
995 security_skb_classify_flow(skb, &fl);
997 /* Pass a socket to ip6_dst_lookup either it is for RST
998 * Underlying function will use this to retrieve the network
1001 if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) {
1003 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1004 ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
1005 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1006 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1014 static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1015 struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
1017 struct tcphdr *th = tcp_hdr(skb), *t1;
1018 struct sk_buff *buff;
1020 struct net *net = dev_net(skb->dev);
1021 struct sock *ctl_sk = net->ipv6.tcp_sk;
1022 unsigned int tot_len = sizeof(struct tcphdr);
1024 #ifdef CONFIG_TCP_MD5SIG
1025 struct tcp_md5sig_key *key;
1026 struct tcp_md5sig_key tw_key;
1029 #ifdef CONFIG_TCP_MD5SIG
1030 if (!tw && skb->sk) {
1031 key = tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr);
1032 } else if (tw && tw->tw_md5_keylen) {
1033 tw_key.key = tw->tw_md5_key;
1034 tw_key.keylen = tw->tw_md5_keylen;
1042 tot_len += TCPOLEN_TSTAMP_ALIGNED;
1043 #ifdef CONFIG_TCP_MD5SIG
1045 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1048 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1053 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1055 t1 = (struct tcphdr *) skb_push(buff,tot_len);
1057 /* Swap the send and the receive. */
1058 memset(t1, 0, sizeof(*t1));
1059 t1->dest = th->source;
1060 t1->source = th->dest;
1061 t1->doff = tot_len/4;
1062 t1->seq = htonl(seq);
1063 t1->ack_seq = htonl(ack);
1065 t1->window = htons(win);
1067 topt = (__be32 *)(t1 + 1);
1070 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1071 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1072 *topt++ = htonl(tcp_time_stamp);
1076 #ifdef CONFIG_TCP_MD5SIG
1078 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1079 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1080 tcp_v6_do_calc_md5_hash((__u8 *)topt, key,
1081 &ipv6_hdr(skb)->daddr,
1082 &ipv6_hdr(skb)->saddr,
1087 buff->csum = csum_partial((char *)t1, tot_len, 0);
1089 memset(&fl, 0, sizeof(fl));
1090 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1091 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1093 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1094 tot_len, IPPROTO_TCP,
1097 fl.proto = IPPROTO_TCP;
1098 fl.oif = inet6_iif(skb);
1099 fl.fl_ip_dport = t1->dest;
1100 fl.fl_ip_sport = t1->source;
1101 security_skb_classify_flow(skb, &fl);
1103 if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) {
1104 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1105 ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
1106 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1114 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1116 struct inet_timewait_sock *tw = inet_twsk(sk);
1117 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1119 tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1120 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1121 tcptw->tw_ts_recent);
1126 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1128 tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
1132 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1134 struct request_sock *req, **prev;
1135 const struct tcphdr *th = tcp_hdr(skb);
1138 /* Find possible connection requests. */
1139 req = inet6_csk_search_req(sk, &prev, th->source,
1140 &ipv6_hdr(skb)->saddr,
1141 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1143 return tcp_check_req(sk, skb, req, prev);
1145 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1146 &ipv6_hdr(skb)->saddr, th->source,
1147 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1150 if (nsk->sk_state != TCP_TIME_WAIT) {
1154 inet_twsk_put(inet_twsk(nsk));
1158 #ifdef CONFIG_SYN_COOKIES
1159 if (!th->rst && !th->syn && th->ack)
1160 sk = cookie_v6_check(sk, skb);
1165 /* FIXME: this is substantially similar to the ipv4 code.
1166 * Can some kind of merge be done? -- erics
1168 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1170 struct inet6_request_sock *treq;
1171 struct ipv6_pinfo *np = inet6_sk(sk);
1172 struct tcp_options_received tmp_opt;
1173 struct tcp_sock *tp = tcp_sk(sk);
1174 struct request_sock *req = NULL;
1175 __u32 isn = TCP_SKB_CB(skb)->when;
1176 #ifdef CONFIG_SYN_COOKIES
1177 int want_cookie = 0;
1179 #define want_cookie 0
1182 if (skb->protocol == htons(ETH_P_IP))
1183 return tcp_v4_conn_request(sk, skb);
1185 if (!ipv6_unicast_destination(skb))
1188 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1189 if (net_ratelimit())
1190 syn_flood_warning(skb);
1191 #ifdef CONFIG_SYN_COOKIES
1192 if (sysctl_tcp_syncookies)
1199 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1202 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1206 #ifdef CONFIG_TCP_MD5SIG
1207 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1210 tcp_clear_options(&tmp_opt);
1211 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1212 tmp_opt.user_mss = tp->rx_opt.user_mss;
1214 tcp_parse_options(skb, &tmp_opt, 0);
1216 if (want_cookie && !tmp_opt.saw_tstamp)
1217 tcp_clear_options(&tmp_opt);
1219 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1220 tcp_openreq_init(req, &tmp_opt, skb);
1222 treq = inet6_rsk(req);
1223 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1224 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1225 treq->pktopts = NULL;
1227 TCP_ECN_create_request(req, tcp_hdr(skb));
1230 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1231 req->cookie_ts = tmp_opt.tstamp_ok;
1233 if (ipv6_opt_accepted(sk, skb) ||
1234 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1235 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1236 atomic_inc(&skb->users);
1237 treq->pktopts = skb;
1239 treq->iif = sk->sk_bound_dev_if;
1241 /* So that link locals have meaning */
1242 if (!sk->sk_bound_dev_if &&
1243 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1244 treq->iif = inet6_iif(skb);
1246 isn = tcp_v6_init_sequence(skb);
1249 tcp_rsk(req)->snt_isn = isn;
1251 security_inet_conn_request(sk, skb, req);
1253 if (tcp_v6_send_synack(sk, req))
1257 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1265 return 0; /* don't send reset */
1268 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1269 struct request_sock *req,
1270 struct dst_entry *dst)
1272 struct inet6_request_sock *treq = inet6_rsk(req);
1273 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1274 struct tcp6_sock *newtcp6sk;
1275 struct inet_sock *newinet;
1276 struct tcp_sock *newtp;
1278 struct ipv6_txoptions *opt;
1279 #ifdef CONFIG_TCP_MD5SIG
1280 struct tcp_md5sig_key *key;
1283 if (skb->protocol == htons(ETH_P_IP)) {
1288 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1293 newtcp6sk = (struct tcp6_sock *)newsk;
1294 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1296 newinet = inet_sk(newsk);
1297 newnp = inet6_sk(newsk);
1298 newtp = tcp_sk(newsk);
1300 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1302 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1305 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1308 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1310 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1311 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1312 #ifdef CONFIG_TCP_MD5SIG
1313 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1316 newnp->pktoptions = NULL;
1318 newnp->mcast_oif = inet6_iif(skb);
1319 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1322 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1323 * here, tcp_create_openreq_child now does this for us, see the comment in
1324 * that function for the gory details. -acme
1327 /* It is tricky place. Until this moment IPv4 tcp
1328 worked with IPv6 icsk.icsk_af_ops.
1331 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1338 if (sk_acceptq_is_full(sk))
1342 struct in6_addr *final_p = NULL, final;
1345 memset(&fl, 0, sizeof(fl));
1346 fl.proto = IPPROTO_TCP;
1347 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1348 if (opt && opt->srcrt) {
1349 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1350 ipv6_addr_copy(&final, &fl.fl6_dst);
1351 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1354 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1355 fl.oif = sk->sk_bound_dev_if;
1356 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1357 fl.fl_ip_sport = inet_sk(sk)->sport;
1358 security_req_classify_flow(req, &fl);
1360 if (ip6_dst_lookup(sk, &dst, &fl))
1364 ipv6_addr_copy(&fl.fl6_dst, final_p);
1366 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1370 newsk = tcp_create_openreq_child(sk, req, skb);
1375 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1376 * count here, tcp_create_openreq_child now does this for us, see the
1377 * comment in that function for the gory details. -acme
1380 newsk->sk_gso_type = SKB_GSO_TCPV6;
1381 __ip6_dst_store(newsk, dst, NULL, NULL);
1383 newtcp6sk = (struct tcp6_sock *)newsk;
1384 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1386 newtp = tcp_sk(newsk);
1387 newinet = inet_sk(newsk);
1388 newnp = inet6_sk(newsk);
1390 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1392 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1393 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1394 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1395 newsk->sk_bound_dev_if = treq->iif;
1397 /* Now IPv6 options...
1399 First: no IPv4 options.
1401 newinet->opt = NULL;
1402 newnp->ipv6_fl_list = NULL;
1405 newnp->rxopt.all = np->rxopt.all;
1407 /* Clone pktoptions received with SYN */
1408 newnp->pktoptions = NULL;
1409 if (treq->pktopts != NULL) {
1410 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1411 kfree_skb(treq->pktopts);
1412 treq->pktopts = NULL;
1413 if (newnp->pktoptions)
1414 skb_set_owner_r(newnp->pktoptions, newsk);
1417 newnp->mcast_oif = inet6_iif(skb);
1418 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1420 /* Clone native IPv6 options from listening socket (if any)
1422 Yes, keeping reference count would be much more clever,
1423 but we make one more one thing there: reattach optmem
1427 newnp->opt = ipv6_dup_options(newsk, opt);
1429 sock_kfree_s(sk, opt, opt->tot_len);
1432 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1434 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1435 newnp->opt->opt_flen);
1437 tcp_mtup_init(newsk);
1438 tcp_sync_mss(newsk, dst_mtu(dst));
1439 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1440 tcp_initialize_rcv_mss(newsk);
1442 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1444 #ifdef CONFIG_TCP_MD5SIG
1445 /* Copy over the MD5 key from the original socket */
1446 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1447 /* We're using one, so create a matching key
1448 * on the newsk structure. If we fail to get
1449 * memory, then we end up not copying the key
1452 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1454 tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
1455 newkey, key->keylen);
1459 __inet6_hash(newsk);
1460 __inet_inherit_port(sk, newsk);
1465 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1467 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1468 if (opt && opt != np->opt)
1469 sock_kfree_s(sk, opt, opt->tot_len);
1474 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1476 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1477 if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr,
1478 &ipv6_hdr(skb)->daddr, skb->csum)) {
1479 skb->ip_summed = CHECKSUM_UNNECESSARY;
1484 skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len,
1485 &ipv6_hdr(skb)->saddr,
1486 &ipv6_hdr(skb)->daddr, 0));
1488 if (skb->len <= 76) {
1489 return __skb_checksum_complete(skb);
1494 /* The socket must have it's spinlock held when we get
1497 * We have a potential double-lock case here, so even when
1498 * doing backlog processing we use the BH locking scheme.
1499 * This is because we cannot sleep with the original spinlock
1502 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1504 struct ipv6_pinfo *np = inet6_sk(sk);
1505 struct tcp_sock *tp;
1506 struct sk_buff *opt_skb = NULL;
1508 /* Imagine: socket is IPv6. IPv4 packet arrives,
1509 goes to IPv4 receive handler and backlogged.
1510 From backlog it always goes here. Kerboom...
1511 Fortunately, tcp_rcv_established and rcv_established
1512 handle them correctly, but it is not case with
1513 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1516 if (skb->protocol == htons(ETH_P_IP))
1517 return tcp_v4_do_rcv(sk, skb);
1519 #ifdef CONFIG_TCP_MD5SIG
1520 if (tcp_v6_inbound_md5_hash (sk, skb))
1524 if (sk_filter(sk, skb))
1528 * socket locking is here for SMP purposes as backlog rcv
1529 * is currently called with bh processing disabled.
1532 /* Do Stevens' IPV6_PKTOPTIONS.
1534 Yes, guys, it is the only place in our code, where we
1535 may make it not affecting IPv4.
1536 The rest of code is protocol independent,
1537 and I do not like idea to uglify IPv4.
1539 Actually, all the idea behind IPV6_PKTOPTIONS
1540 looks not very well thought. For now we latch
1541 options, received in the last packet, enqueued
1542 by tcp. Feel free to propose better solution.
1546 opt_skb = skb_clone(skb, GFP_ATOMIC);
1548 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1549 TCP_CHECK_TIMER(sk);
1550 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1552 TCP_CHECK_TIMER(sk);
1554 goto ipv6_pktoptions;
1558 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1561 if (sk->sk_state == TCP_LISTEN) {
1562 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1567 * Queue it on the new socket if the new socket is active,
1568 * otherwise we just shortcircuit this and continue with
1572 if (tcp_child_process(sk, nsk, skb))
1575 __kfree_skb(opt_skb);
1580 TCP_CHECK_TIMER(sk);
1581 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1583 TCP_CHECK_TIMER(sk);
1585 goto ipv6_pktoptions;
1589 tcp_v6_send_reset(sk, skb);
1592 __kfree_skb(opt_skb);
1596 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1601 /* Do you ask, what is it?
1603 1. skb was enqueued by tcp.
1604 2. skb is added to tail of read queue, rather than out of order.
1605 3. socket is not in passive state.
1606 4. Finally, it really contains options, which user wants to receive.
1609 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1610 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1611 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1612 np->mcast_oif = inet6_iif(opt_skb);
1613 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1614 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1615 if (ipv6_opt_accepted(sk, opt_skb)) {
1616 skb_set_owner_r(opt_skb, sk);
1617 opt_skb = xchg(&np->pktoptions, opt_skb);
1619 __kfree_skb(opt_skb);
1620 opt_skb = xchg(&np->pktoptions, NULL);
1629 static int tcp_v6_rcv(struct sk_buff *skb)
1635 if (skb->pkt_type != PACKET_HOST)
1639 * Count it even if it's bad.
1641 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1643 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1648 if (th->doff < sizeof(struct tcphdr)/4)
1650 if (!pskb_may_pull(skb, th->doff*4))
1653 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1657 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1658 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1659 skb->len - th->doff*4);
1660 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1661 TCP_SKB_CB(skb)->when = 0;
1662 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1663 TCP_SKB_CB(skb)->sacked = 0;
1665 sk = __inet6_lookup(dev_net(skb->dev), &tcp_hashinfo,
1666 &ipv6_hdr(skb)->saddr, th->source,
1667 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1674 if (sk->sk_state == TCP_TIME_WAIT)
1677 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1678 goto discard_and_relse;
1680 if (sk_filter(sk, skb))
1681 goto discard_and_relse;
1685 bh_lock_sock_nested(sk);
1687 if (!sock_owned_by_user(sk)) {
1688 #ifdef CONFIG_NET_DMA
1689 struct tcp_sock *tp = tcp_sk(sk);
1690 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1691 tp->ucopy.dma_chan = get_softnet_dma();
1692 if (tp->ucopy.dma_chan)
1693 ret = tcp_v6_do_rcv(sk, skb);
1697 if (!tcp_prequeue(sk, skb))
1698 ret = tcp_v6_do_rcv(sk, skb);
1701 sk_add_backlog(sk, skb);
1705 return ret ? -1 : 0;
1708 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1711 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1713 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1715 tcp_v6_send_reset(NULL, skb);
1732 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1733 inet_twsk_put(inet_twsk(sk));
1737 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1738 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1739 inet_twsk_put(inet_twsk(sk));
1743 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1748 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1749 &ipv6_hdr(skb)->daddr,
1750 ntohs(th->dest), inet6_iif(skb));
1752 struct inet_timewait_sock *tw = inet_twsk(sk);
1753 inet_twsk_deschedule(tw, &tcp_death_row);
1758 /* Fall through to ACK */
1761 tcp_v6_timewait_ack(sk, skb);
1765 case TCP_TW_SUCCESS:;
1770 static int tcp_v6_remember_stamp(struct sock *sk)
1772 /* Alas, not yet... */
1776 static struct inet_connection_sock_af_ops ipv6_specific = {
1777 .queue_xmit = inet6_csk_xmit,
1778 .send_check = tcp_v6_send_check,
1779 .rebuild_header = inet6_sk_rebuild_header,
1780 .conn_request = tcp_v6_conn_request,
1781 .syn_recv_sock = tcp_v6_syn_recv_sock,
1782 .remember_stamp = tcp_v6_remember_stamp,
1783 .net_header_len = sizeof(struct ipv6hdr),
1784 .setsockopt = ipv6_setsockopt,
1785 .getsockopt = ipv6_getsockopt,
1786 .addr2sockaddr = inet6_csk_addr2sockaddr,
1787 .sockaddr_len = sizeof(struct sockaddr_in6),
1788 .bind_conflict = inet6_csk_bind_conflict,
1789 #ifdef CONFIG_COMPAT
1790 .compat_setsockopt = compat_ipv6_setsockopt,
1791 .compat_getsockopt = compat_ipv6_getsockopt,
1795 #ifdef CONFIG_TCP_MD5SIG
1796 static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1797 .md5_lookup = tcp_v6_md5_lookup,
1798 .calc_md5_hash = tcp_v6_calc_md5_hash,
1799 .md5_add = tcp_v6_md5_add_func,
1800 .md5_parse = tcp_v6_parse_md5_keys,
1805 * TCP over IPv4 via INET6 API
1808 static struct inet_connection_sock_af_ops ipv6_mapped = {
1809 .queue_xmit = ip_queue_xmit,
1810 .send_check = tcp_v4_send_check,
1811 .rebuild_header = inet_sk_rebuild_header,
1812 .conn_request = tcp_v6_conn_request,
1813 .syn_recv_sock = tcp_v6_syn_recv_sock,
1814 .remember_stamp = tcp_v4_remember_stamp,
1815 .net_header_len = sizeof(struct iphdr),
1816 .setsockopt = ipv6_setsockopt,
1817 .getsockopt = ipv6_getsockopt,
1818 .addr2sockaddr = inet6_csk_addr2sockaddr,
1819 .sockaddr_len = sizeof(struct sockaddr_in6),
1820 .bind_conflict = inet6_csk_bind_conflict,
1821 #ifdef CONFIG_COMPAT
1822 .compat_setsockopt = compat_ipv6_setsockopt,
1823 .compat_getsockopt = compat_ipv6_getsockopt,
1827 #ifdef CONFIG_TCP_MD5SIG
1828 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1829 .md5_lookup = tcp_v4_md5_lookup,
1830 .calc_md5_hash = tcp_v4_calc_md5_hash,
1831 .md5_add = tcp_v6_md5_add_func,
1832 .md5_parse = tcp_v6_parse_md5_keys,
1836 /* NOTE: A lot of things set to zero explicitly by call to
1837 * sk_alloc() so need not be done here.
1839 static int tcp_v6_init_sock(struct sock *sk)
1841 struct inet_connection_sock *icsk = inet_csk(sk);
1842 struct tcp_sock *tp = tcp_sk(sk);
1844 skb_queue_head_init(&tp->out_of_order_queue);
1845 tcp_init_xmit_timers(sk);
1846 tcp_prequeue_init(tp);
1848 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1849 tp->mdev = TCP_TIMEOUT_INIT;
1851 /* So many TCP implementations out there (incorrectly) count the
1852 * initial SYN frame in their delayed-ACK and congestion control
1853 * algorithms that we must have the following bandaid to talk
1854 * efficiently to them. -DaveM
1858 /* See draft-stevens-tcpca-spec-01 for discussion of the
1859 * initialization of these values.
1861 tp->snd_ssthresh = 0x7fffffff;
1862 tp->snd_cwnd_clamp = ~0;
1863 tp->mss_cache = 536;
1865 tp->reordering = sysctl_tcp_reordering;
1867 sk->sk_state = TCP_CLOSE;
1869 icsk->icsk_af_ops = &ipv6_specific;
1870 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1871 icsk->icsk_sync_mss = tcp_sync_mss;
1872 sk->sk_write_space = sk_stream_write_space;
1873 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1875 #ifdef CONFIG_TCP_MD5SIG
1876 tp->af_specific = &tcp_sock_ipv6_specific;
1879 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1880 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1882 atomic_inc(&tcp_sockets_allocated);
1887 static int tcp_v6_destroy_sock(struct sock *sk)
1889 #ifdef CONFIG_TCP_MD5SIG
1890 /* Clean up the MD5 key list */
1891 if (tcp_sk(sk)->md5sig_info)
1892 tcp_v6_clear_md5_list(sk);
1894 tcp_v4_destroy_sock(sk);
1895 return inet6_destroy_sock(sk);
1898 #ifdef CONFIG_PROC_FS
1899 /* Proc filesystem TCPv6 sock list dumping. */
1900 static void get_openreq6(struct seq_file *seq,
1901 struct sock *sk, struct request_sock *req, int i, int uid)
1903 int ttd = req->expires - jiffies;
1904 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1905 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1911 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1912 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1914 src->s6_addr32[0], src->s6_addr32[1],
1915 src->s6_addr32[2], src->s6_addr32[3],
1916 ntohs(inet_sk(sk)->sport),
1917 dest->s6_addr32[0], dest->s6_addr32[1],
1918 dest->s6_addr32[2], dest->s6_addr32[3],
1919 ntohs(inet_rsk(req)->rmt_port),
1921 0,0, /* could print option size, but that is af dependent. */
1922 1, /* timers active (only the expire timer) */
1923 jiffies_to_clock_t(ttd),
1926 0, /* non standard timer */
1927 0, /* open_requests have no inode */
1931 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1933 struct in6_addr *dest, *src;
1936 unsigned long timer_expires;
1937 struct inet_sock *inet = inet_sk(sp);
1938 struct tcp_sock *tp = tcp_sk(sp);
1939 const struct inet_connection_sock *icsk = inet_csk(sp);
1940 struct ipv6_pinfo *np = inet6_sk(sp);
1943 src = &np->rcv_saddr;
1944 destp = ntohs(inet->dport);
1945 srcp = ntohs(inet->sport);
1947 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1949 timer_expires = icsk->icsk_timeout;
1950 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1952 timer_expires = icsk->icsk_timeout;
1953 } else if (timer_pending(&sp->sk_timer)) {
1955 timer_expires = sp->sk_timer.expires;
1958 timer_expires = jiffies;
1962 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1963 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
1965 src->s6_addr32[0], src->s6_addr32[1],
1966 src->s6_addr32[2], src->s6_addr32[3], srcp,
1967 dest->s6_addr32[0], dest->s6_addr32[1],
1968 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1970 tp->write_seq-tp->snd_una,
1971 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1973 jiffies_to_clock_t(timer_expires - jiffies),
1974 icsk->icsk_retransmits,
1976 icsk->icsk_probes_out,
1978 atomic_read(&sp->sk_refcnt), sp,
1981 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1982 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
1986 static void get_timewait6_sock(struct seq_file *seq,
1987 struct inet_timewait_sock *tw, int i)
1989 struct in6_addr *dest, *src;
1991 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1992 int ttd = tw->tw_ttd - jiffies;
1997 dest = &tw6->tw_v6_daddr;
1998 src = &tw6->tw_v6_rcv_saddr;
1999 destp = ntohs(tw->tw_dport);
2000 srcp = ntohs(tw->tw_sport);
2003 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2004 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2006 src->s6_addr32[0], src->s6_addr32[1],
2007 src->s6_addr32[2], src->s6_addr32[3], srcp,
2008 dest->s6_addr32[0], dest->s6_addr32[1],
2009 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2010 tw->tw_substate, 0, 0,
2011 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2012 atomic_read(&tw->tw_refcnt), tw);
2015 static int tcp6_seq_show(struct seq_file *seq, void *v)
2017 struct tcp_iter_state *st;
2019 if (v == SEQ_START_TOKEN) {
2024 "st tx_queue rx_queue tr tm->when retrnsmt"
2025 " uid timeout inode\n");
2030 switch (st->state) {
2031 case TCP_SEQ_STATE_LISTENING:
2032 case TCP_SEQ_STATE_ESTABLISHED:
2033 get_tcp6_sock(seq, v, st->num);
2035 case TCP_SEQ_STATE_OPENREQ:
2036 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2038 case TCP_SEQ_STATE_TIME_WAIT:
2039 get_timewait6_sock(seq, v, st->num);
2046 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2050 .owner = THIS_MODULE,
2053 .show = tcp6_seq_show,
2057 int tcp6_proc_init(struct net *net)
2059 return tcp_proc_register(net, &tcp6_seq_afinfo);
2062 void tcp6_proc_exit(struct net *net)
2064 tcp_proc_unregister(net, &tcp6_seq_afinfo);
2068 struct proto tcpv6_prot = {
2070 .owner = THIS_MODULE,
2072 .connect = tcp_v6_connect,
2073 .disconnect = tcp_disconnect,
2074 .accept = inet_csk_accept,
2076 .init = tcp_v6_init_sock,
2077 .destroy = tcp_v6_destroy_sock,
2078 .shutdown = tcp_shutdown,
2079 .setsockopt = tcp_setsockopt,
2080 .getsockopt = tcp_getsockopt,
2081 .recvmsg = tcp_recvmsg,
2082 .backlog_rcv = tcp_v6_do_rcv,
2083 .hash = tcp_v6_hash,
2084 .unhash = inet_unhash,
2085 .get_port = inet_csk_get_port,
2086 .enter_memory_pressure = tcp_enter_memory_pressure,
2087 .sockets_allocated = &tcp_sockets_allocated,
2088 .memory_allocated = &tcp_memory_allocated,
2089 .memory_pressure = &tcp_memory_pressure,
2090 .orphan_count = &tcp_orphan_count,
2091 .sysctl_mem = sysctl_tcp_mem,
2092 .sysctl_wmem = sysctl_tcp_wmem,
2093 .sysctl_rmem = sysctl_tcp_rmem,
2094 .max_header = MAX_TCP_HEADER,
2095 .obj_size = sizeof(struct tcp6_sock),
2096 .twsk_prot = &tcp6_timewait_sock_ops,
2097 .rsk_prot = &tcp6_request_sock_ops,
2098 .h.hashinfo = &tcp_hashinfo,
2099 #ifdef CONFIG_COMPAT
2100 .compat_setsockopt = compat_tcp_setsockopt,
2101 .compat_getsockopt = compat_tcp_getsockopt,
2105 static struct inet6_protocol tcpv6_protocol = {
2106 .handler = tcp_v6_rcv,
2107 .err_handler = tcp_v6_err,
2108 .gso_send_check = tcp_v6_gso_send_check,
2109 .gso_segment = tcp_tso_segment,
2110 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2113 static struct inet_protosw tcpv6_protosw = {
2114 .type = SOCK_STREAM,
2115 .protocol = IPPROTO_TCP,
2116 .prot = &tcpv6_prot,
2117 .ops = &inet6_stream_ops,
2120 .flags = INET_PROTOSW_PERMANENT |
2124 static int tcpv6_net_init(struct net *net)
2126 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2127 SOCK_RAW, IPPROTO_TCP, net);
2130 static void tcpv6_net_exit(struct net *net)
2132 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2135 static struct pernet_operations tcpv6_net_ops = {
2136 .init = tcpv6_net_init,
2137 .exit = tcpv6_net_exit,
2140 int __init tcpv6_init(void)
2144 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2148 /* register inet6 protocol */
2149 ret = inet6_register_protosw(&tcpv6_protosw);
2151 goto out_tcpv6_protocol;
2153 ret = register_pernet_subsys(&tcpv6_net_ops);
2155 goto out_tcpv6_protosw;
2160 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2162 inet6_unregister_protosw(&tcpv6_protosw);
2166 void tcpv6_exit(void)
2168 unregister_pernet_subsys(&tcpv6_net_ops);
2169 inet6_unregister_protosw(&tcpv6_protosw);
2170 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);