3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/jiffies.h>
36 #include <linux/in6.h>
37 #include <linux/netdevice.h>
38 #include <linux/init.h>
39 #include <linux/jhash.h>
40 #include <linux/ipsec.h>
41 #include <linux/times.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
59 #include <net/addrconf.h>
61 #include <net/dsfield.h>
62 #include <net/timewait_sock.h>
64 #include <asm/uaccess.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 /* Socket used for sending RSTs and ACKs */
70 static struct socket *tcp6_socket;
72 static void tcp_v6_send_reset(struct sk_buff *skb);
73 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
74 static void tcp_v6_send_check(struct sock *sk, int len,
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 static struct inet_connection_sock_af_ops ipv6_mapped;
80 static struct inet_connection_sock_af_ops ipv6_specific;
82 static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
84 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
85 inet6_csk_bind_conflict);
88 static void tcp_v6_hash(struct sock *sk)
90 if (sk->sk_state != TCP_CLOSE) {
91 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
96 __inet6_hash(&tcp_hashinfo, sk);
101 static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len,
102 struct in6_addr *saddr,
103 struct in6_addr *daddr,
106 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
109 static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
111 if (skb->protocol == htons(ETH_P_IPV6)) {
112 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
113 skb->nh.ipv6h->saddr.s6_addr32,
117 return secure_tcp_sequence_number(skb->nh.iph->daddr,
124 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
127 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
128 struct inet_sock *inet = inet_sk(sk);
129 struct inet_connection_sock *icsk = inet_csk(sk);
130 struct ipv6_pinfo *np = inet6_sk(sk);
131 struct tcp_sock *tp = tcp_sk(sk);
132 struct in6_addr *saddr = NULL, *final_p = NULL, final;
134 struct dst_entry *dst;
138 if (addr_len < SIN6_LEN_RFC2133)
141 if (usin->sin6_family != AF_INET6)
142 return(-EAFNOSUPPORT);
144 memset(&fl, 0, sizeof(fl));
147 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
148 IP6_ECN_flow_init(fl.fl6_flowlabel);
149 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
150 struct ip6_flowlabel *flowlabel;
151 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
152 if (flowlabel == NULL)
154 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
155 fl6_sock_release(flowlabel);
160 * connect() to INADDR_ANY means loopback (BSD'ism).
163 if(ipv6_addr_any(&usin->sin6_addr))
164 usin->sin6_addr.s6_addr[15] = 0x1;
166 addr_type = ipv6_addr_type(&usin->sin6_addr);
168 if(addr_type & IPV6_ADDR_MULTICAST)
171 if (addr_type&IPV6_ADDR_LINKLOCAL) {
172 if (addr_len >= sizeof(struct sockaddr_in6) &&
173 usin->sin6_scope_id) {
174 /* If interface is set while binding, indices
177 if (sk->sk_bound_dev_if &&
178 sk->sk_bound_dev_if != usin->sin6_scope_id)
181 sk->sk_bound_dev_if = usin->sin6_scope_id;
184 /* Connect to link-local address requires an interface */
185 if (!sk->sk_bound_dev_if)
189 if (tp->rx_opt.ts_recent_stamp &&
190 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
191 tp->rx_opt.ts_recent = 0;
192 tp->rx_opt.ts_recent_stamp = 0;
196 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
197 np->flow_label = fl.fl6_flowlabel;
203 if (addr_type == IPV6_ADDR_MAPPED) {
204 u32 exthdrlen = icsk->icsk_ext_hdr_len;
205 struct sockaddr_in sin;
207 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
209 if (__ipv6_only_sock(sk))
212 sin.sin_family = AF_INET;
213 sin.sin_port = usin->sin6_port;
214 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
216 icsk->icsk_af_ops = &ipv6_mapped;
217 sk->sk_backlog_rcv = tcp_v4_do_rcv;
219 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
222 icsk->icsk_ext_hdr_len = exthdrlen;
223 icsk->icsk_af_ops = &ipv6_specific;
224 sk->sk_backlog_rcv = tcp_v6_do_rcv;
227 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
229 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
236 if (!ipv6_addr_any(&np->rcv_saddr))
237 saddr = &np->rcv_saddr;
239 fl.proto = IPPROTO_TCP;
240 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
241 ipv6_addr_copy(&fl.fl6_src,
242 (saddr ? saddr : &np->saddr));
243 fl.oif = sk->sk_bound_dev_if;
244 fl.fl_ip_dport = usin->sin6_port;
245 fl.fl_ip_sport = inet->sport;
247 if (np->opt && np->opt->srcrt) {
248 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
249 ipv6_addr_copy(&final, &fl.fl6_dst);
250 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
254 security_sk_classify_flow(sk, &fl);
256 err = ip6_dst_lookup(sk, &dst, &fl);
260 ipv6_addr_copy(&fl.fl6_dst, final_p);
262 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
267 ipv6_addr_copy(&np->rcv_saddr, saddr);
270 /* set the source address */
271 ipv6_addr_copy(&np->saddr, saddr);
272 inet->rcv_saddr = LOOPBACK4_IPV6;
274 sk->sk_gso_type = SKB_GSO_TCPV6;
275 __ip6_dst_store(sk, dst, NULL);
277 icsk->icsk_ext_hdr_len = 0;
279 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
282 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
284 inet->dport = usin->sin6_port;
286 tcp_set_state(sk, TCP_SYN_SENT);
287 err = inet6_hash_connect(&tcp_death_row, sk);
292 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
297 err = tcp_connect(sk);
304 tcp_set_state(sk, TCP_CLOSE);
308 sk->sk_route_caps = 0;
312 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
313 int type, int code, int offset, __u32 info)
315 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
316 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
317 struct ipv6_pinfo *np;
323 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
324 th->source, skb->dev->ifindex);
327 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
331 if (sk->sk_state == TCP_TIME_WAIT) {
332 inet_twsk_put((struct inet_timewait_sock *)sk);
337 if (sock_owned_by_user(sk))
338 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
340 if (sk->sk_state == TCP_CLOSE)
344 seq = ntohl(th->seq);
345 if (sk->sk_state != TCP_LISTEN &&
346 !between(seq, tp->snd_una, tp->snd_nxt)) {
347 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
353 if (type == ICMPV6_PKT_TOOBIG) {
354 struct dst_entry *dst = NULL;
356 if (sock_owned_by_user(sk))
358 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
361 /* icmp should have updated the destination cache entry */
362 dst = __sk_dst_check(sk, np->dst_cookie);
365 struct inet_sock *inet = inet_sk(sk);
368 /* BUGGG_FUTURE: Again, it is not clear how
369 to handle rthdr case. Ignore this complexity
372 memset(&fl, 0, sizeof(fl));
373 fl.proto = IPPROTO_TCP;
374 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
375 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
376 fl.oif = sk->sk_bound_dev_if;
377 fl.fl_ip_dport = inet->dport;
378 fl.fl_ip_sport = inet->sport;
379 security_skb_classify_flow(skb, &fl);
381 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
382 sk->sk_err_soft = -err;
386 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
387 sk->sk_err_soft = -err;
394 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
395 tcp_sync_mss(sk, dst_mtu(dst));
396 tcp_simple_retransmit(sk);
397 } /* else let the usual retransmit timer handle it */
402 icmpv6_err_convert(type, code, &err);
404 /* Might be for an request_sock */
405 switch (sk->sk_state) {
406 struct request_sock *req, **prev;
408 if (sock_owned_by_user(sk))
411 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
412 &hdr->saddr, inet6_iif(skb));
416 /* ICMPs are not backlogged, hence we cannot get
417 * an established socket here.
419 BUG_TRAP(req->sk == NULL);
421 if (seq != tcp_rsk(req)->snt_isn) {
422 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
426 inet_csk_reqsk_queue_drop(sk, req, prev);
430 case TCP_SYN_RECV: /* Cannot happen.
431 It can, it SYNs are crossed. --ANK */
432 if (!sock_owned_by_user(sk)) {
434 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
438 sk->sk_err_soft = err;
442 if (!sock_owned_by_user(sk) && np->recverr) {
444 sk->sk_error_report(sk);
446 sk->sk_err_soft = err;
454 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
455 struct dst_entry *dst)
457 struct inet6_request_sock *treq = inet6_rsk(req);
458 struct ipv6_pinfo *np = inet6_sk(sk);
459 struct sk_buff * skb;
460 struct ipv6_txoptions *opt = NULL;
461 struct in6_addr * final_p = NULL, final;
465 memset(&fl, 0, sizeof(fl));
466 fl.proto = IPPROTO_TCP;
467 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
468 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
469 fl.fl6_flowlabel = 0;
471 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
472 fl.fl_ip_sport = inet_sk(sk)->sport;
473 security_sk_classify_flow(sk, &fl);
478 np->rxopt.bits.osrcrt == 2 &&
480 struct sk_buff *pktopts = treq->pktopts;
481 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
483 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt));
486 if (opt && opt->srcrt) {
487 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
488 ipv6_addr_copy(&final, &fl.fl6_dst);
489 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
493 err = ip6_dst_lookup(sk, &dst, &fl);
497 ipv6_addr_copy(&fl.fl6_dst, final_p);
498 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
502 skb = tcp_make_synack(sk, dst, req);
504 struct tcphdr *th = skb->h.th;
506 th->check = tcp_v6_check(th, skb->len,
507 &treq->loc_addr, &treq->rmt_addr,
508 csum_partial((char *)th, skb->len, skb->csum));
510 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
511 err = ip6_xmit(sk, skb, &fl, opt, 0);
512 if (err == NET_XMIT_CN)
517 if (opt && opt != np->opt)
518 sock_kfree_s(sk, opt, opt->tot_len);
523 static void tcp_v6_reqsk_destructor(struct request_sock *req)
525 if (inet6_rsk(req)->pktopts)
526 kfree_skb(inet6_rsk(req)->pktopts);
529 static struct request_sock_ops tcp6_request_sock_ops = {
531 .obj_size = sizeof(struct tcp6_request_sock),
532 .rtx_syn_ack = tcp_v6_send_synack,
533 .send_ack = tcp_v6_reqsk_send_ack,
534 .destructor = tcp_v6_reqsk_destructor,
535 .send_reset = tcp_v6_send_reset
538 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
539 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
540 .twsk_unique = tcp_twsk_unique,
543 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
545 struct ipv6_pinfo *np = inet6_sk(sk);
546 struct tcphdr *th = skb->h.th;
548 if (skb->ip_summed == CHECKSUM_HW) {
549 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
550 skb->csum = offsetof(struct tcphdr, check);
552 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
553 csum_partial((char *)th, th->doff<<2,
558 static int tcp_v6_gso_send_check(struct sk_buff *skb)
560 struct ipv6hdr *ipv6h;
563 if (!pskb_may_pull(skb, sizeof(*th)))
566 ipv6h = skb->nh.ipv6h;
570 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
572 skb->csum = offsetof(struct tcphdr, check);
573 skb->ip_summed = CHECKSUM_HW;
577 static void tcp_v6_send_reset(struct sk_buff *skb)
579 struct tcphdr *th = skb->h.th, *t1;
580 struct sk_buff *buff;
586 if (!ipv6_unicast_destination(skb))
590 * We need to grab some memory, and put together an RST,
591 * and then put it into the queue to be sent.
594 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr),
599 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
601 t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr));
603 /* Swap the send and the receive. */
604 memset(t1, 0, sizeof(*t1));
605 t1->dest = th->source;
606 t1->source = th->dest;
607 t1->doff = sizeof(*t1)/4;
611 t1->seq = th->ack_seq;
614 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
615 + skb->len - (th->doff<<2));
618 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
620 memset(&fl, 0, sizeof(fl));
621 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
622 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
624 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
625 sizeof(*t1), IPPROTO_TCP,
628 fl.proto = IPPROTO_TCP;
629 fl.oif = inet6_iif(skb);
630 fl.fl_ip_dport = t1->dest;
631 fl.fl_ip_sport = t1->source;
632 security_skb_classify_flow(skb, &fl);
634 /* sk = NULL, but it is safe for now. RST socket required. */
635 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
637 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
638 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
639 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
640 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
648 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
650 struct tcphdr *th = skb->h.th, *t1;
651 struct sk_buff *buff;
653 int tot_len = sizeof(struct tcphdr);
658 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
663 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
665 t1 = (struct tcphdr *) skb_push(buff,tot_len);
667 /* Swap the send and the receive. */
668 memset(t1, 0, sizeof(*t1));
669 t1->dest = th->source;
670 t1->source = th->dest;
671 t1->doff = tot_len/4;
672 t1->seq = htonl(seq);
673 t1->ack_seq = htonl(ack);
675 t1->window = htons(win);
678 u32 *ptr = (u32*)(t1 + 1);
679 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
680 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
681 *ptr++ = htonl(tcp_time_stamp);
685 buff->csum = csum_partial((char *)t1, tot_len, 0);
687 memset(&fl, 0, sizeof(fl));
688 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
689 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
691 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
692 tot_len, IPPROTO_TCP,
695 fl.proto = IPPROTO_TCP;
696 fl.oif = inet6_iif(skb);
697 fl.fl_ip_dport = t1->dest;
698 fl.fl_ip_sport = t1->source;
699 security_skb_classify_flow(skb, &fl);
701 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
702 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
703 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
704 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
712 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
714 struct inet_timewait_sock *tw = inet_twsk(sk);
715 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
717 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
718 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
719 tcptw->tw_ts_recent);
724 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
726 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
730 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
732 struct request_sock *req, **prev;
733 const struct tcphdr *th = skb->h.th;
736 /* Find possible connection requests. */
737 req = inet6_csk_search_req(sk, &prev, th->source,
738 &skb->nh.ipv6h->saddr,
739 &skb->nh.ipv6h->daddr, inet6_iif(skb));
741 return tcp_check_req(sk, skb, req, prev);
743 nsk = __inet6_lookup_established(&tcp_hashinfo, &skb->nh.ipv6h->saddr,
744 th->source, &skb->nh.ipv6h->daddr,
745 ntohs(th->dest), inet6_iif(skb));
748 if (nsk->sk_state != TCP_TIME_WAIT) {
752 inet_twsk_put((struct inet_timewait_sock *)nsk);
756 #if 0 /*def CONFIG_SYN_COOKIES*/
757 if (!th->rst && !th->syn && th->ack)
758 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));
763 /* FIXME: this is substantially similar to the ipv4 code.
764 * Can some kind of merge be done? -- erics
766 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
768 struct inet6_request_sock *treq;
769 struct ipv6_pinfo *np = inet6_sk(sk);
770 struct tcp_options_received tmp_opt;
771 struct tcp_sock *tp = tcp_sk(sk);
772 struct request_sock *req = NULL;
773 __u32 isn = TCP_SKB_CB(skb)->when;
775 if (skb->protocol == htons(ETH_P_IP))
776 return tcp_v4_conn_request(sk, skb);
778 if (!ipv6_unicast_destination(skb))
782 * There are no SYN attacks on IPv6, yet...
784 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
786 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
790 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
793 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
797 tcp_clear_options(&tmp_opt);
798 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
799 tmp_opt.user_mss = tp->rx_opt.user_mss;
801 tcp_parse_options(skb, &tmp_opt, 0);
803 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
804 tcp_openreq_init(req, &tmp_opt, skb);
806 treq = inet6_rsk(req);
807 ipv6_addr_copy(&treq->rmt_addr, &skb->nh.ipv6h->saddr);
808 ipv6_addr_copy(&treq->loc_addr, &skb->nh.ipv6h->daddr);
809 TCP_ECN_create_request(req, skb->h.th);
810 treq->pktopts = NULL;
811 if (ipv6_opt_accepted(sk, skb) ||
812 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
813 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
814 atomic_inc(&skb->users);
817 treq->iif = sk->sk_bound_dev_if;
819 /* So that link locals have meaning */
820 if (!sk->sk_bound_dev_if &&
821 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
822 treq->iif = inet6_iif(skb);
825 isn = tcp_v6_init_sequence(sk,skb);
827 tcp_rsk(req)->snt_isn = isn;
829 if (tcp_v6_send_synack(sk, req, NULL))
832 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
839 return 0; /* don't send reset */
842 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
843 struct request_sock *req,
844 struct dst_entry *dst)
846 struct inet6_request_sock *treq = inet6_rsk(req);
847 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
848 struct tcp6_sock *newtcp6sk;
849 struct inet_sock *newinet;
850 struct tcp_sock *newtp;
852 struct ipv6_txoptions *opt;
854 if (skb->protocol == htons(ETH_P_IP)) {
859 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
864 newtcp6sk = (struct tcp6_sock *)newsk;
865 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
867 newinet = inet_sk(newsk);
868 newnp = inet6_sk(newsk);
869 newtp = tcp_sk(newsk);
871 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
873 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
876 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
879 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
881 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
882 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
883 newnp->pktoptions = NULL;
885 newnp->mcast_oif = inet6_iif(skb);
886 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
889 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
890 * here, tcp_create_openreq_child now does this for us, see the comment in
891 * that function for the gory details. -acme
894 /* It is tricky place. Until this moment IPv4 tcp
895 worked with IPv6 icsk.icsk_af_ops.
898 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
905 if (sk_acceptq_is_full(sk))
908 if (np->rxopt.bits.osrcrt == 2 &&
909 opt == NULL && treq->pktopts) {
910 struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts);
912 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr *)(treq->pktopts->nh.raw + rxopt->srcrt));
916 struct in6_addr *final_p = NULL, final;
919 memset(&fl, 0, sizeof(fl));
920 fl.proto = IPPROTO_TCP;
921 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
922 if (opt && opt->srcrt) {
923 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
924 ipv6_addr_copy(&final, &fl.fl6_dst);
925 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
928 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
929 fl.oif = sk->sk_bound_dev_if;
930 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
931 fl.fl_ip_sport = inet_sk(sk)->sport;
932 security_sk_classify_flow(sk, &fl);
934 if (ip6_dst_lookup(sk, &dst, &fl))
938 ipv6_addr_copy(&fl.fl6_dst, final_p);
940 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
944 newsk = tcp_create_openreq_child(sk, req, skb);
949 * No need to charge this sock to the relevant IPv6 refcnt debug socks
950 * count here, tcp_create_openreq_child now does this for us, see the
951 * comment in that function for the gory details. -acme
954 newsk->sk_gso_type = SKB_GSO_TCPV6;
955 __ip6_dst_store(newsk, dst, NULL);
957 newtcp6sk = (struct tcp6_sock *)newsk;
958 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
960 newtp = tcp_sk(newsk);
961 newinet = inet_sk(newsk);
962 newnp = inet6_sk(newsk);
964 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
966 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
967 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
968 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
969 newsk->sk_bound_dev_if = treq->iif;
971 /* Now IPv6 options...
973 First: no IPv4 options.
978 newnp->rxopt.all = np->rxopt.all;
980 /* Clone pktoptions received with SYN */
981 newnp->pktoptions = NULL;
982 if (treq->pktopts != NULL) {
983 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
984 kfree_skb(treq->pktopts);
985 treq->pktopts = NULL;
986 if (newnp->pktoptions)
987 skb_set_owner_r(newnp->pktoptions, newsk);
990 newnp->mcast_oif = inet6_iif(skb);
991 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
993 /* Clone native IPv6 options from listening socket (if any)
995 Yes, keeping reference count would be much more clever,
996 but we make one more one thing there: reattach optmem
1000 newnp->opt = ipv6_dup_options(newsk, opt);
1002 sock_kfree_s(sk, opt, opt->tot_len);
1005 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1007 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1008 newnp->opt->opt_flen);
1010 tcp_mtup_init(newsk);
1011 tcp_sync_mss(newsk, dst_mtu(dst));
1012 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1013 tcp_initialize_rcv_mss(newsk);
1015 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1017 __inet6_hash(&tcp_hashinfo, newsk);
1018 inet_inherit_port(&tcp_hashinfo, sk, newsk);
1023 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1025 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1026 if (opt && opt != np->opt)
1027 sock_kfree_s(sk, opt, opt->tot_len);
1032 static int tcp_v6_checksum_init(struct sk_buff *skb)
1034 if (skb->ip_summed == CHECKSUM_HW) {
1035 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1036 &skb->nh.ipv6h->daddr,skb->csum)) {
1037 skb->ip_summed = CHECKSUM_UNNECESSARY;
1042 skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1043 &skb->nh.ipv6h->daddr, 0);
1045 if (skb->len <= 76) {
1046 return __skb_checksum_complete(skb);
1051 /* The socket must have it's spinlock held when we get
1054 * We have a potential double-lock case here, so even when
1055 * doing backlog processing we use the BH locking scheme.
1056 * This is because we cannot sleep with the original spinlock
1059 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1061 struct ipv6_pinfo *np = inet6_sk(sk);
1062 struct tcp_sock *tp;
1063 struct sk_buff *opt_skb = NULL;
1065 /* Imagine: socket is IPv6. IPv4 packet arrives,
1066 goes to IPv4 receive handler and backlogged.
1067 From backlog it always goes here. Kerboom...
1068 Fortunately, tcp_rcv_established and rcv_established
1069 handle them correctly, but it is not case with
1070 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1073 if (skb->protocol == htons(ETH_P_IP))
1074 return tcp_v4_do_rcv(sk, skb);
1076 if (sk_filter(sk, skb, 0))
1080 * socket locking is here for SMP purposes as backlog rcv
1081 * is currently called with bh processing disabled.
1084 /* Do Stevens' IPV6_PKTOPTIONS.
1086 Yes, guys, it is the only place in our code, where we
1087 may make it not affecting IPv4.
1088 The rest of code is protocol independent,
1089 and I do not like idea to uglify IPv4.
1091 Actually, all the idea behind IPV6_PKTOPTIONS
1092 looks not very well thought. For now we latch
1093 options, received in the last packet, enqueued
1094 by tcp. Feel free to propose better solution.
1098 opt_skb = skb_clone(skb, GFP_ATOMIC);
1100 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1101 TCP_CHECK_TIMER(sk);
1102 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1104 TCP_CHECK_TIMER(sk);
1106 goto ipv6_pktoptions;
1110 if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
1113 if (sk->sk_state == TCP_LISTEN) {
1114 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1119 * Queue it on the new socket if the new socket is active,
1120 * otherwise we just shortcircuit this and continue with
1124 if (tcp_child_process(sk, nsk, skb))
1127 __kfree_skb(opt_skb);
1132 TCP_CHECK_TIMER(sk);
1133 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1135 TCP_CHECK_TIMER(sk);
1137 goto ipv6_pktoptions;
1141 tcp_v6_send_reset(skb);
1144 __kfree_skb(opt_skb);
1148 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1153 /* Do you ask, what is it?
1155 1. skb was enqueued by tcp.
1156 2. skb is added to tail of read queue, rather than out of order.
1157 3. socket is not in passive state.
1158 4. Finally, it really contains options, which user wants to receive.
1161 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1162 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1163 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1164 np->mcast_oif = inet6_iif(opt_skb);
1165 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1166 np->mcast_hops = opt_skb->nh.ipv6h->hop_limit;
1167 if (ipv6_opt_accepted(sk, opt_skb)) {
1168 skb_set_owner_r(opt_skb, sk);
1169 opt_skb = xchg(&np->pktoptions, opt_skb);
1171 __kfree_skb(opt_skb);
1172 opt_skb = xchg(&np->pktoptions, NULL);
1181 static int tcp_v6_rcv(struct sk_buff **pskb)
1183 struct sk_buff *skb = *pskb;
1188 if (skb->pkt_type != PACKET_HOST)
1192 * Count it even if it's bad.
1194 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1196 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1201 if (th->doff < sizeof(struct tcphdr)/4)
1203 if (!pskb_may_pull(skb, th->doff*4))
1206 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1207 tcp_v6_checksum_init(skb)))
1211 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1212 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1213 skb->len - th->doff*4);
1214 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1215 TCP_SKB_CB(skb)->when = 0;
1216 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h);
1217 TCP_SKB_CB(skb)->sacked = 0;
1219 sk = __inet6_lookup(&tcp_hashinfo, &skb->nh.ipv6h->saddr, th->source,
1220 &skb->nh.ipv6h->daddr, ntohs(th->dest),
1227 if (sk->sk_state == TCP_TIME_WAIT)
1230 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1231 goto discard_and_relse;
1233 if (sk_filter(sk, skb, 0))
1234 goto discard_and_relse;
1240 if (!sock_owned_by_user(sk)) {
1241 #ifdef CONFIG_NET_DMA
1242 struct tcp_sock *tp = tcp_sk(sk);
1243 if (tp->ucopy.dma_chan)
1244 ret = tcp_v6_do_rcv(sk, skb);
1248 if (!tcp_prequeue(sk, skb))
1249 ret = tcp_v6_do_rcv(sk, skb);
1252 sk_add_backlog(sk, skb);
1256 return ret ? -1 : 0;
1259 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1262 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1264 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1266 tcp_v6_send_reset(skb);
1283 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1284 inet_twsk_put((struct inet_timewait_sock *)sk);
1288 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1289 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1290 inet_twsk_put((struct inet_timewait_sock *)sk);
1294 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1300 sk2 = inet6_lookup_listener(&tcp_hashinfo,
1301 &skb->nh.ipv6h->daddr,
1302 ntohs(th->dest), inet6_iif(skb));
1304 struct inet_timewait_sock *tw = inet_twsk(sk);
1305 inet_twsk_deschedule(tw, &tcp_death_row);
1310 /* Fall through to ACK */
1313 tcp_v6_timewait_ack(sk, skb);
1317 case TCP_TW_SUCCESS:;
1322 static int tcp_v6_remember_stamp(struct sock *sk)
1324 /* Alas, not yet... */
1328 static struct inet_connection_sock_af_ops ipv6_specific = {
1329 .queue_xmit = inet6_csk_xmit,
1330 .send_check = tcp_v6_send_check,
1331 .rebuild_header = inet6_sk_rebuild_header,
1332 .conn_request = tcp_v6_conn_request,
1333 .syn_recv_sock = tcp_v6_syn_recv_sock,
1334 .remember_stamp = tcp_v6_remember_stamp,
1335 .net_header_len = sizeof(struct ipv6hdr),
1336 .setsockopt = ipv6_setsockopt,
1337 .getsockopt = ipv6_getsockopt,
1338 .addr2sockaddr = inet6_csk_addr2sockaddr,
1339 .sockaddr_len = sizeof(struct sockaddr_in6),
1340 #ifdef CONFIG_COMPAT
1341 .compat_setsockopt = compat_ipv6_setsockopt,
1342 .compat_getsockopt = compat_ipv6_getsockopt,
1347 * TCP over IPv4 via INET6 API
1350 static struct inet_connection_sock_af_ops ipv6_mapped = {
1351 .queue_xmit = ip_queue_xmit,
1352 .send_check = tcp_v4_send_check,
1353 .rebuild_header = inet_sk_rebuild_header,
1354 .conn_request = tcp_v6_conn_request,
1355 .syn_recv_sock = tcp_v6_syn_recv_sock,
1356 .remember_stamp = tcp_v4_remember_stamp,
1357 .net_header_len = sizeof(struct iphdr),
1358 .setsockopt = ipv6_setsockopt,
1359 .getsockopt = ipv6_getsockopt,
1360 .addr2sockaddr = inet6_csk_addr2sockaddr,
1361 .sockaddr_len = sizeof(struct sockaddr_in6),
1362 #ifdef CONFIG_COMPAT
1363 .compat_setsockopt = compat_ipv6_setsockopt,
1364 .compat_getsockopt = compat_ipv6_getsockopt,
1368 /* NOTE: A lot of things set to zero explicitly by call to
1369 * sk_alloc() so need not be done here.
1371 static int tcp_v6_init_sock(struct sock *sk)
1373 struct inet_connection_sock *icsk = inet_csk(sk);
1374 struct tcp_sock *tp = tcp_sk(sk);
1376 skb_queue_head_init(&tp->out_of_order_queue);
1377 tcp_init_xmit_timers(sk);
1378 tcp_prequeue_init(tp);
1380 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1381 tp->mdev = TCP_TIMEOUT_INIT;
1383 /* So many TCP implementations out there (incorrectly) count the
1384 * initial SYN frame in their delayed-ACK and congestion control
1385 * algorithms that we must have the following bandaid to talk
1386 * efficiently to them. -DaveM
1390 /* See draft-stevens-tcpca-spec-01 for discussion of the
1391 * initialization of these values.
1393 tp->snd_ssthresh = 0x7fffffff;
1394 tp->snd_cwnd_clamp = ~0;
1395 tp->mss_cache = 536;
1397 tp->reordering = sysctl_tcp_reordering;
1399 sk->sk_state = TCP_CLOSE;
1401 icsk->icsk_af_ops = &ipv6_specific;
1402 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1403 icsk->icsk_sync_mss = tcp_sync_mss;
1404 sk->sk_write_space = sk_stream_write_space;
1405 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1407 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1408 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1410 atomic_inc(&tcp_sockets_allocated);
1415 static int tcp_v6_destroy_sock(struct sock *sk)
1417 tcp_v4_destroy_sock(sk);
1418 return inet6_destroy_sock(sk);
1421 /* Proc filesystem TCPv6 sock list dumping. */
1422 static void get_openreq6(struct seq_file *seq,
1423 struct sock *sk, struct request_sock *req, int i, int uid)
1425 int ttd = req->expires - jiffies;
1426 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1427 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1433 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1434 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1436 src->s6_addr32[0], src->s6_addr32[1],
1437 src->s6_addr32[2], src->s6_addr32[3],
1438 ntohs(inet_sk(sk)->sport),
1439 dest->s6_addr32[0], dest->s6_addr32[1],
1440 dest->s6_addr32[2], dest->s6_addr32[3],
1441 ntohs(inet_rsk(req)->rmt_port),
1443 0,0, /* could print option size, but that is af dependent. */
1444 1, /* timers active (only the expire timer) */
1445 jiffies_to_clock_t(ttd),
1448 0, /* non standard timer */
1449 0, /* open_requests have no inode */
1453 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1455 struct in6_addr *dest, *src;
1458 unsigned long timer_expires;
1459 struct inet_sock *inet = inet_sk(sp);
1460 struct tcp_sock *tp = tcp_sk(sp);
1461 const struct inet_connection_sock *icsk = inet_csk(sp);
1462 struct ipv6_pinfo *np = inet6_sk(sp);
1465 src = &np->rcv_saddr;
1466 destp = ntohs(inet->dport);
1467 srcp = ntohs(inet->sport);
1469 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1471 timer_expires = icsk->icsk_timeout;
1472 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1474 timer_expires = icsk->icsk_timeout;
1475 } else if (timer_pending(&sp->sk_timer)) {
1477 timer_expires = sp->sk_timer.expires;
1480 timer_expires = jiffies;
1484 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1485 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
1487 src->s6_addr32[0], src->s6_addr32[1],
1488 src->s6_addr32[2], src->s6_addr32[3], srcp,
1489 dest->s6_addr32[0], dest->s6_addr32[1],
1490 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1492 tp->write_seq-tp->snd_una,
1493 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1495 jiffies_to_clock_t(timer_expires - jiffies),
1496 icsk->icsk_retransmits,
1498 icsk->icsk_probes_out,
1500 atomic_read(&sp->sk_refcnt), sp,
1503 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1504 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
1508 static void get_timewait6_sock(struct seq_file *seq,
1509 struct inet_timewait_sock *tw, int i)
1511 struct in6_addr *dest, *src;
1513 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1514 int ttd = tw->tw_ttd - jiffies;
1519 dest = &tw6->tw_v6_daddr;
1520 src = &tw6->tw_v6_rcv_saddr;
1521 destp = ntohs(tw->tw_dport);
1522 srcp = ntohs(tw->tw_sport);
1525 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1526 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1528 src->s6_addr32[0], src->s6_addr32[1],
1529 src->s6_addr32[2], src->s6_addr32[3], srcp,
1530 dest->s6_addr32[0], dest->s6_addr32[1],
1531 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1532 tw->tw_substate, 0, 0,
1533 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1534 atomic_read(&tw->tw_refcnt), tw);
1537 #ifdef CONFIG_PROC_FS
1538 static int tcp6_seq_show(struct seq_file *seq, void *v)
1540 struct tcp_iter_state *st;
1542 if (v == SEQ_START_TOKEN) {
1547 "st tx_queue rx_queue tr tm->when retrnsmt"
1548 " uid timeout inode\n");
1553 switch (st->state) {
1554 case TCP_SEQ_STATE_LISTENING:
1555 case TCP_SEQ_STATE_ESTABLISHED:
1556 get_tcp6_sock(seq, v, st->num);
1558 case TCP_SEQ_STATE_OPENREQ:
1559 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1561 case TCP_SEQ_STATE_TIME_WAIT:
1562 get_timewait6_sock(seq, v, st->num);
1569 static struct file_operations tcp6_seq_fops;
1570 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1571 .owner = THIS_MODULE,
1574 .seq_show = tcp6_seq_show,
1575 .seq_fops = &tcp6_seq_fops,
1578 int __init tcp6_proc_init(void)
1580 return tcp_proc_register(&tcp6_seq_afinfo);
1583 void tcp6_proc_exit(void)
1585 tcp_proc_unregister(&tcp6_seq_afinfo);
1589 struct proto tcpv6_prot = {
1591 .owner = THIS_MODULE,
1593 .connect = tcp_v6_connect,
1594 .disconnect = tcp_disconnect,
1595 .accept = inet_csk_accept,
1597 .init = tcp_v6_init_sock,
1598 .destroy = tcp_v6_destroy_sock,
1599 .shutdown = tcp_shutdown,
1600 .setsockopt = tcp_setsockopt,
1601 .getsockopt = tcp_getsockopt,
1602 .sendmsg = tcp_sendmsg,
1603 .recvmsg = tcp_recvmsg,
1604 .backlog_rcv = tcp_v6_do_rcv,
1605 .hash = tcp_v6_hash,
1606 .unhash = tcp_unhash,
1607 .get_port = tcp_v6_get_port,
1608 .enter_memory_pressure = tcp_enter_memory_pressure,
1609 .sockets_allocated = &tcp_sockets_allocated,
1610 .memory_allocated = &tcp_memory_allocated,
1611 .memory_pressure = &tcp_memory_pressure,
1612 .orphan_count = &tcp_orphan_count,
1613 .sysctl_mem = sysctl_tcp_mem,
1614 .sysctl_wmem = sysctl_tcp_wmem,
1615 .sysctl_rmem = sysctl_tcp_rmem,
1616 .max_header = MAX_TCP_HEADER,
1617 .obj_size = sizeof(struct tcp6_sock),
1618 .twsk_prot = &tcp6_timewait_sock_ops,
1619 .rsk_prot = &tcp6_request_sock_ops,
1620 #ifdef CONFIG_COMPAT
1621 .compat_setsockopt = compat_tcp_setsockopt,
1622 .compat_getsockopt = compat_tcp_getsockopt,
1626 static struct inet6_protocol tcpv6_protocol = {
1627 .handler = tcp_v6_rcv,
1628 .err_handler = tcp_v6_err,
1629 .gso_send_check = tcp_v6_gso_send_check,
1630 .gso_segment = tcp_tso_segment,
1631 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1634 static struct inet_protosw tcpv6_protosw = {
1635 .type = SOCK_STREAM,
1636 .protocol = IPPROTO_TCP,
1637 .prot = &tcpv6_prot,
1638 .ops = &inet6_stream_ops,
1641 .flags = INET_PROTOSW_PERMANENT |
1645 void __init tcpv6_init(void)
1647 /* register inet6 protocol */
1648 if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
1649 printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
1650 inet6_register_protosw(&tcpv6_protosw);
1652 if (inet_csk_ctl_sock_create(&tcp6_socket, PF_INET6, SOCK_RAW,
1654 panic("Failed to create the TCPv6 control socket.\n");