]> err.no Git - linux-2.6/blob - net/ipv6/tcp_ipv6.c
tcp md5sig: Share most of hash calcucaltion bits between IPv4 and IPv6.
[linux-2.6] / net / ipv6 / tcp_ipv6.c
1 /*
2  *      TCP over IPv6
3  *      Linux INET6 implementation
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9  *
10  *      Based on:
11  *      linux/net/ipv4/tcp.c
12  *      linux/net/ipv4/tcp_input.c
13  *      linux/net/ipv4/tcp_output.c
14  *
15  *      Fixes:
16  *      Hideaki YOSHIFUJI       :       sin6_scope_id support
17  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
18  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
19  *                                      a single port at the same time.
20  *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
21  *
22  *      This program is free software; you can redistribute it and/or
23  *      modify it under the terms of the GNU General Public License
24  *      as published by the Free Software Foundation; either version
25  *      2 of the License, or (at your option) any later version.
26  */
27
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/jiffies.h>
35 #include <linux/in.h>
36 #include <linux/in6.h>
37 #include <linux/netdevice.h>
38 #include <linux/init.h>
39 #include <linux/jhash.h>
40 #include <linux/ipsec.h>
41 #include <linux/times.h>
42
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64
65 #include <asm/uaccess.h>
66
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
69
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
72
73 static void     tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void     tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
75 static void     tcp_v6_send_check(struct sock *sk, int len,
76                                   struct sk_buff *skb);
77
78 static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79
80 static struct inet_connection_sock_af_ops ipv6_mapped;
81 static struct inet_connection_sock_af_ops ipv6_specific;
82 #ifdef CONFIG_TCP_MD5SIG
83 static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 #endif
86
87 static void tcp_v6_hash(struct sock *sk)
88 {
89         if (sk->sk_state != TCP_CLOSE) {
90                 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
91                         tcp_prot.hash(sk);
92                         return;
93                 }
94                 local_bh_disable();
95                 __inet6_hash(sk);
96                 local_bh_enable();
97         }
98 }
99
100 static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len,
101                                    struct in6_addr *saddr,
102                                    struct in6_addr *daddr,
103                                    __wsum base)
104 {
105         return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
106 }
107
108 static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
109 {
110         return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
111                                             ipv6_hdr(skb)->saddr.s6_addr32,
112                                             tcp_hdr(skb)->dest,
113                                             tcp_hdr(skb)->source);
114 }
115
116 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
117                           int addr_len)
118 {
119         struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
120         struct inet_sock *inet = inet_sk(sk);
121         struct inet_connection_sock *icsk = inet_csk(sk);
122         struct ipv6_pinfo *np = inet6_sk(sk);
123         struct tcp_sock *tp = tcp_sk(sk);
124         struct in6_addr *saddr = NULL, *final_p = NULL, final;
125         struct flowi fl;
126         struct dst_entry *dst;
127         int addr_type;
128         int err;
129
130         if (addr_len < SIN6_LEN_RFC2133)
131                 return -EINVAL;
132
133         if (usin->sin6_family != AF_INET6)
134                 return(-EAFNOSUPPORT);
135
136         memset(&fl, 0, sizeof(fl));
137
138         if (np->sndflow) {
139                 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
140                 IP6_ECN_flow_init(fl.fl6_flowlabel);
141                 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
142                         struct ip6_flowlabel *flowlabel;
143                         flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
144                         if (flowlabel == NULL)
145                                 return -EINVAL;
146                         ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
147                         fl6_sock_release(flowlabel);
148                 }
149         }
150
151         /*
152          *      connect() to INADDR_ANY means loopback (BSD'ism).
153          */
154
155         if(ipv6_addr_any(&usin->sin6_addr))
156                 usin->sin6_addr.s6_addr[15] = 0x1;
157
158         addr_type = ipv6_addr_type(&usin->sin6_addr);
159
160         if(addr_type & IPV6_ADDR_MULTICAST)
161                 return -ENETUNREACH;
162
163         if (addr_type&IPV6_ADDR_LINKLOCAL) {
164                 if (addr_len >= sizeof(struct sockaddr_in6) &&
165                     usin->sin6_scope_id) {
166                         /* If interface is set while binding, indices
167                          * must coincide.
168                          */
169                         if (sk->sk_bound_dev_if &&
170                             sk->sk_bound_dev_if != usin->sin6_scope_id)
171                                 return -EINVAL;
172
173                         sk->sk_bound_dev_if = usin->sin6_scope_id;
174                 }
175
176                 /* Connect to link-local address requires an interface */
177                 if (!sk->sk_bound_dev_if)
178                         return -EINVAL;
179         }
180
181         if (tp->rx_opt.ts_recent_stamp &&
182             !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
183                 tp->rx_opt.ts_recent = 0;
184                 tp->rx_opt.ts_recent_stamp = 0;
185                 tp->write_seq = 0;
186         }
187
188         ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
189         np->flow_label = fl.fl6_flowlabel;
190
191         /*
192          *      TCP over IPv4
193          */
194
195         if (addr_type == IPV6_ADDR_MAPPED) {
196                 u32 exthdrlen = icsk->icsk_ext_hdr_len;
197                 struct sockaddr_in sin;
198
199                 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
200
201                 if (__ipv6_only_sock(sk))
202                         return -ENETUNREACH;
203
204                 sin.sin_family = AF_INET;
205                 sin.sin_port = usin->sin6_port;
206                 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
207
208                 icsk->icsk_af_ops = &ipv6_mapped;
209                 sk->sk_backlog_rcv = tcp_v4_do_rcv;
210 #ifdef CONFIG_TCP_MD5SIG
211                 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
212 #endif
213
214                 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
215
216                 if (err) {
217                         icsk->icsk_ext_hdr_len = exthdrlen;
218                         icsk->icsk_af_ops = &ipv6_specific;
219                         sk->sk_backlog_rcv = tcp_v6_do_rcv;
220 #ifdef CONFIG_TCP_MD5SIG
221                         tp->af_specific = &tcp_sock_ipv6_specific;
222 #endif
223                         goto failure;
224                 } else {
225                         ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
226                                       inet->saddr);
227                         ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
228                                       inet->rcv_saddr);
229                 }
230
231                 return err;
232         }
233
234         if (!ipv6_addr_any(&np->rcv_saddr))
235                 saddr = &np->rcv_saddr;
236
237         fl.proto = IPPROTO_TCP;
238         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
239         ipv6_addr_copy(&fl.fl6_src,
240                        (saddr ? saddr : &np->saddr));
241         fl.oif = sk->sk_bound_dev_if;
242         fl.fl_ip_dport = usin->sin6_port;
243         fl.fl_ip_sport = inet->sport;
244
245         if (np->opt && np->opt->srcrt) {
246                 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
247                 ipv6_addr_copy(&final, &fl.fl6_dst);
248                 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
249                 final_p = &final;
250         }
251
252         security_sk_classify_flow(sk, &fl);
253
254         err = ip6_dst_lookup(sk, &dst, &fl);
255         if (err)
256                 goto failure;
257         if (final_p)
258                 ipv6_addr_copy(&fl.fl6_dst, final_p);
259
260         if ((err = __xfrm_lookup(&dst, &fl, sk, XFRM_LOOKUP_WAIT)) < 0) {
261                 if (err == -EREMOTE)
262                         err = ip6_dst_blackhole(sk, &dst, &fl);
263                 if (err < 0)
264                         goto failure;
265         }
266
267         if (saddr == NULL) {
268                 saddr = &fl.fl6_src;
269                 ipv6_addr_copy(&np->rcv_saddr, saddr);
270         }
271
272         /* set the source address */
273         ipv6_addr_copy(&np->saddr, saddr);
274         inet->rcv_saddr = LOOPBACK4_IPV6;
275
276         sk->sk_gso_type = SKB_GSO_TCPV6;
277         __ip6_dst_store(sk, dst, NULL, NULL);
278
279         icsk->icsk_ext_hdr_len = 0;
280         if (np->opt)
281                 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
282                                           np->opt->opt_nflen);
283
284         tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
285
286         inet->dport = usin->sin6_port;
287
288         tcp_set_state(sk, TCP_SYN_SENT);
289         err = inet6_hash_connect(&tcp_death_row, sk);
290         if (err)
291                 goto late_failure;
292
293         if (!tp->write_seq)
294                 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
295                                                              np->daddr.s6_addr32,
296                                                              inet->sport,
297                                                              inet->dport);
298
299         err = tcp_connect(sk);
300         if (err)
301                 goto late_failure;
302
303         return 0;
304
305 late_failure:
306         tcp_set_state(sk, TCP_CLOSE);
307         __sk_dst_reset(sk);
308 failure:
309         inet->dport = 0;
310         sk->sk_route_caps = 0;
311         return err;
312 }
313
314 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
315                 int type, int code, int offset, __be32 info)
316 {
317         struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
318         const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
319         struct ipv6_pinfo *np;
320         struct sock *sk;
321         int err;
322         struct tcp_sock *tp;
323         __u32 seq;
324
325         sk = inet6_lookup(dev_net(skb->dev), &tcp_hashinfo, &hdr->daddr,
326                         th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
327
328         if (sk == NULL) {
329                 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
330                 return;
331         }
332
333         if (sk->sk_state == TCP_TIME_WAIT) {
334                 inet_twsk_put(inet_twsk(sk));
335                 return;
336         }
337
338         bh_lock_sock(sk);
339         if (sock_owned_by_user(sk))
340                 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
341
342         if (sk->sk_state == TCP_CLOSE)
343                 goto out;
344
345         tp = tcp_sk(sk);
346         seq = ntohl(th->seq);
347         if (sk->sk_state != TCP_LISTEN &&
348             !between(seq, tp->snd_una, tp->snd_nxt)) {
349                 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
350                 goto out;
351         }
352
353         np = inet6_sk(sk);
354
355         if (type == ICMPV6_PKT_TOOBIG) {
356                 struct dst_entry *dst = NULL;
357
358                 if (sock_owned_by_user(sk))
359                         goto out;
360                 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
361                         goto out;
362
363                 /* icmp should have updated the destination cache entry */
364                 dst = __sk_dst_check(sk, np->dst_cookie);
365
366                 if (dst == NULL) {
367                         struct inet_sock *inet = inet_sk(sk);
368                         struct flowi fl;
369
370                         /* BUGGG_FUTURE: Again, it is not clear how
371                            to handle rthdr case. Ignore this complexity
372                            for now.
373                          */
374                         memset(&fl, 0, sizeof(fl));
375                         fl.proto = IPPROTO_TCP;
376                         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
377                         ipv6_addr_copy(&fl.fl6_src, &np->saddr);
378                         fl.oif = sk->sk_bound_dev_if;
379                         fl.fl_ip_dport = inet->dport;
380                         fl.fl_ip_sport = inet->sport;
381                         security_skb_classify_flow(skb, &fl);
382
383                         if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
384                                 sk->sk_err_soft = -err;
385                                 goto out;
386                         }
387
388                         if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
389                                 sk->sk_err_soft = -err;
390                                 goto out;
391                         }
392
393                 } else
394                         dst_hold(dst);
395
396                 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
397                         tcp_sync_mss(sk, dst_mtu(dst));
398                         tcp_simple_retransmit(sk);
399                 } /* else let the usual retransmit timer handle it */
400                 dst_release(dst);
401                 goto out;
402         }
403
404         icmpv6_err_convert(type, code, &err);
405
406         /* Might be for an request_sock */
407         switch (sk->sk_state) {
408                 struct request_sock *req, **prev;
409         case TCP_LISTEN:
410                 if (sock_owned_by_user(sk))
411                         goto out;
412
413                 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
414                                            &hdr->saddr, inet6_iif(skb));
415                 if (!req)
416                         goto out;
417
418                 /* ICMPs are not backlogged, hence we cannot get
419                  * an established socket here.
420                  */
421                 BUG_TRAP(req->sk == NULL);
422
423                 if (seq != tcp_rsk(req)->snt_isn) {
424                         NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
425                         goto out;
426                 }
427
428                 inet_csk_reqsk_queue_drop(sk, req, prev);
429                 goto out;
430
431         case TCP_SYN_SENT:
432         case TCP_SYN_RECV:  /* Cannot happen.
433                                It can, it SYNs are crossed. --ANK */
434                 if (!sock_owned_by_user(sk)) {
435                         sk->sk_err = err;
436                         sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
437
438                         tcp_done(sk);
439                 } else
440                         sk->sk_err_soft = err;
441                 goto out;
442         }
443
444         if (!sock_owned_by_user(sk) && np->recverr) {
445                 sk->sk_err = err;
446                 sk->sk_error_report(sk);
447         } else
448                 sk->sk_err_soft = err;
449
450 out:
451         bh_unlock_sock(sk);
452         sock_put(sk);
453 }
454
455
456 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
457 {
458         struct inet6_request_sock *treq = inet6_rsk(req);
459         struct ipv6_pinfo *np = inet6_sk(sk);
460         struct sk_buff * skb;
461         struct ipv6_txoptions *opt = NULL;
462         struct in6_addr * final_p = NULL, final;
463         struct flowi fl;
464         struct dst_entry *dst;
465         int err = -1;
466
467         memset(&fl, 0, sizeof(fl));
468         fl.proto = IPPROTO_TCP;
469         ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
470         ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
471         fl.fl6_flowlabel = 0;
472         fl.oif = treq->iif;
473         fl.fl_ip_dport = inet_rsk(req)->rmt_port;
474         fl.fl_ip_sport = inet_sk(sk)->sport;
475         security_req_classify_flow(req, &fl);
476
477         opt = np->opt;
478         if (opt && opt->srcrt) {
479                 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
480                 ipv6_addr_copy(&final, &fl.fl6_dst);
481                 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
482                 final_p = &final;
483         }
484
485         err = ip6_dst_lookup(sk, &dst, &fl);
486         if (err)
487                 goto done;
488         if (final_p)
489                 ipv6_addr_copy(&fl.fl6_dst, final_p);
490         if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
491                 goto done;
492
493         skb = tcp_make_synack(sk, dst, req);
494         if (skb) {
495                 struct tcphdr *th = tcp_hdr(skb);
496
497                 th->check = tcp_v6_check(th, skb->len,
498                                          &treq->loc_addr, &treq->rmt_addr,
499                                          csum_partial((char *)th, skb->len, skb->csum));
500
501                 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
502                 err = ip6_xmit(sk, skb, &fl, opt, 0);
503                 err = net_xmit_eval(err);
504         }
505
506 done:
507         if (opt && opt != np->opt)
508                 sock_kfree_s(sk, opt, opt->tot_len);
509         dst_release(dst);
510         return err;
511 }
512
513 static inline void syn_flood_warning(struct sk_buff *skb)
514 {
515 #ifdef CONFIG_SYN_COOKIES
516         if (sysctl_tcp_syncookies)
517                 printk(KERN_INFO
518                        "TCPv6: Possible SYN flooding on port %d. "
519                        "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
520         else
521 #endif
522                 printk(KERN_INFO
523                        "TCPv6: Possible SYN flooding on port %d. "
524                        "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
525 }
526
527 static void tcp_v6_reqsk_destructor(struct request_sock *req)
528 {
529         if (inet6_rsk(req)->pktopts)
530                 kfree_skb(inet6_rsk(req)->pktopts);
531 }
532
533 #ifdef CONFIG_TCP_MD5SIG
534 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
535                                                    struct in6_addr *addr)
536 {
537         struct tcp_sock *tp = tcp_sk(sk);
538         int i;
539
540         BUG_ON(tp == NULL);
541
542         if (!tp->md5sig_info || !tp->md5sig_info->entries6)
543                 return NULL;
544
545         for (i = 0; i < tp->md5sig_info->entries6; i++) {
546                 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
547                         return &tp->md5sig_info->keys6[i].base;
548         }
549         return NULL;
550 }
551
552 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
553                                                 struct sock *addr_sk)
554 {
555         return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
556 }
557
558 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
559                                                       struct request_sock *req)
560 {
561         return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
562 }
563
564 static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
565                              char *newkey, u8 newkeylen)
566 {
567         /* Add key to the list */
568         struct tcp_md5sig_key *key;
569         struct tcp_sock *tp = tcp_sk(sk);
570         struct tcp6_md5sig_key *keys;
571
572         key = tcp_v6_md5_do_lookup(sk, peer);
573         if (key) {
574                 /* modify existing entry - just update that one */
575                 kfree(key->key);
576                 key->key = newkey;
577                 key->keylen = newkeylen;
578         } else {
579                 /* reallocate new list if current one is full. */
580                 if (!tp->md5sig_info) {
581                         tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
582                         if (!tp->md5sig_info) {
583                                 kfree(newkey);
584                                 return -ENOMEM;
585                         }
586                         sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
587                 }
588                 if (tcp_alloc_md5sig_pool() == NULL) {
589                         kfree(newkey);
590                         return -ENOMEM;
591                 }
592                 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
593                         keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
594                                        (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
595
596                         if (!keys) {
597                                 tcp_free_md5sig_pool();
598                                 kfree(newkey);
599                                 return -ENOMEM;
600                         }
601
602                         if (tp->md5sig_info->entries6)
603                                 memmove(keys, tp->md5sig_info->keys6,
604                                         (sizeof (tp->md5sig_info->keys6[0]) *
605                                          tp->md5sig_info->entries6));
606
607                         kfree(tp->md5sig_info->keys6);
608                         tp->md5sig_info->keys6 = keys;
609                         tp->md5sig_info->alloced6++;
610                 }
611
612                 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
613                                peer);
614                 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
615                 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
616
617                 tp->md5sig_info->entries6++;
618         }
619         return 0;
620 }
621
622 static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
623                                u8 *newkey, __u8 newkeylen)
624 {
625         return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
626                                  newkey, newkeylen);
627 }
628
629 static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
630 {
631         struct tcp_sock *tp = tcp_sk(sk);
632         int i;
633
634         for (i = 0; i < tp->md5sig_info->entries6; i++) {
635                 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
636                         /* Free the key */
637                         kfree(tp->md5sig_info->keys6[i].base.key);
638                         tp->md5sig_info->entries6--;
639
640                         if (tp->md5sig_info->entries6 == 0) {
641                                 kfree(tp->md5sig_info->keys6);
642                                 tp->md5sig_info->keys6 = NULL;
643                                 tp->md5sig_info->alloced6 = 0;
644                         } else {
645                                 /* shrink the database */
646                                 if (tp->md5sig_info->entries6 != i)
647                                         memmove(&tp->md5sig_info->keys6[i],
648                                                 &tp->md5sig_info->keys6[i+1],
649                                                 (tp->md5sig_info->entries6 - i)
650                                                 * sizeof (tp->md5sig_info->keys6[0]));
651                         }
652                         tcp_free_md5sig_pool();
653                         return 0;
654                 }
655         }
656         return -ENOENT;
657 }
658
659 static void tcp_v6_clear_md5_list (struct sock *sk)
660 {
661         struct tcp_sock *tp = tcp_sk(sk);
662         int i;
663
664         if (tp->md5sig_info->entries6) {
665                 for (i = 0; i < tp->md5sig_info->entries6; i++)
666                         kfree(tp->md5sig_info->keys6[i].base.key);
667                 tp->md5sig_info->entries6 = 0;
668                 tcp_free_md5sig_pool();
669         }
670
671         kfree(tp->md5sig_info->keys6);
672         tp->md5sig_info->keys6 = NULL;
673         tp->md5sig_info->alloced6 = 0;
674
675         if (tp->md5sig_info->entries4) {
676                 for (i = 0; i < tp->md5sig_info->entries4; i++)
677                         kfree(tp->md5sig_info->keys4[i].base.key);
678                 tp->md5sig_info->entries4 = 0;
679                 tcp_free_md5sig_pool();
680         }
681
682         kfree(tp->md5sig_info->keys4);
683         tp->md5sig_info->keys4 = NULL;
684         tp->md5sig_info->alloced4 = 0;
685 }
686
687 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
688                                   int optlen)
689 {
690         struct tcp_md5sig cmd;
691         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
692         u8 *newkey;
693
694         if (optlen < sizeof(cmd))
695                 return -EINVAL;
696
697         if (copy_from_user(&cmd, optval, sizeof(cmd)))
698                 return -EFAULT;
699
700         if (sin6->sin6_family != AF_INET6)
701                 return -EINVAL;
702
703         if (!cmd.tcpm_keylen) {
704                 if (!tcp_sk(sk)->md5sig_info)
705                         return -ENOENT;
706                 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
707                         return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
708                 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
709         }
710
711         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
712                 return -EINVAL;
713
714         if (!tcp_sk(sk)->md5sig_info) {
715                 struct tcp_sock *tp = tcp_sk(sk);
716                 struct tcp_md5sig_info *p;
717
718                 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
719                 if (!p)
720                         return -ENOMEM;
721
722                 tp->md5sig_info = p;
723                 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
724         }
725
726         newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
727         if (!newkey)
728                 return -ENOMEM;
729         if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
730                 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
731                                          newkey, cmd.tcpm_keylen);
732         }
733         return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
734 }
735
736 static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
737                                    struct in6_addr *saddr,
738                                    struct in6_addr *daddr,
739                                    struct tcphdr *th, unsigned int tcplen)
740 {
741         struct tcp_md5sig_pool *hp;
742         struct tcp6_pseudohdr *bp;
743         int err;
744
745         hp = tcp_get_md5sig_pool();
746         if (!hp) {
747                 printk(KERN_WARNING "%s(): hash pool not found...\n", __func__);
748                 goto clear_hash_noput;
749         }
750
751         bp = &hp->md5_blk.ip6;
752
753         /* 1. TCP pseudo-header (RFC2460) */
754         ipv6_addr_copy(&bp->saddr, saddr);
755         ipv6_addr_copy(&bp->daddr, daddr);
756         bp->len = htonl(tcplen);
757         bp->protocol = htonl(IPPROTO_TCP);
758
759         err = tcp_calc_md5_hash(md5_hash, key, sizeof(*bp),
760                                 th, tcplen, hp);
761
762         if (err)
763                 goto clear_hash;
764
765         /* Free up the crypto pool */
766         tcp_put_md5sig_pool();
767 out:
768         return 0;
769 clear_hash:
770         tcp_put_md5sig_pool();
771 clear_hash_noput:
772         memset(md5_hash, 0, 16);
773         goto out;
774 }
775
776 static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
777                                 struct sock *sk,
778                                 struct dst_entry *dst,
779                                 struct request_sock *req,
780                                 struct tcphdr *th, unsigned int tcplen)
781 {
782         struct in6_addr *saddr, *daddr;
783
784         if (sk) {
785                 saddr = &inet6_sk(sk)->saddr;
786                 daddr = &inet6_sk(sk)->daddr;
787         } else {
788                 saddr = &inet6_rsk(req)->loc_addr;
789                 daddr = &inet6_rsk(req)->rmt_addr;
790         }
791         return tcp_v6_do_calc_md5_hash(md5_hash, key,
792                                        saddr, daddr,
793                                        th, tcplen);
794 }
795
796 static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
797 {
798         __u8 *hash_location = NULL;
799         struct tcp_md5sig_key *hash_expected;
800         struct ipv6hdr *ip6h = ipv6_hdr(skb);
801         struct tcphdr *th = tcp_hdr(skb);
802         int genhash;
803         u8 newhash[16];
804
805         hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
806         hash_location = tcp_parse_md5sig_option(th);
807
808         /* do we have a hash as expected? */
809         if (!hash_expected) {
810                 if (!hash_location)
811                         return 0;
812                 if (net_ratelimit()) {
813                         printk(KERN_INFO "MD5 Hash NOT expected but found "
814                                "(" NIP6_FMT ", %u)->"
815                                "(" NIP6_FMT ", %u)\n",
816                                NIP6(ip6h->saddr), ntohs(th->source),
817                                NIP6(ip6h->daddr), ntohs(th->dest));
818                 }
819                 return 1;
820         }
821
822         if (!hash_location) {
823                 if (net_ratelimit()) {
824                         printk(KERN_INFO "MD5 Hash expected but NOT found "
825                                "(" NIP6_FMT ", %u)->"
826                                "(" NIP6_FMT ", %u)\n",
827                                NIP6(ip6h->saddr), ntohs(th->source),
828                                NIP6(ip6h->daddr), ntohs(th->dest));
829                 }
830                 return 1;
831         }
832
833         /* check the signature */
834         genhash = tcp_v6_do_calc_md5_hash(newhash,
835                                           hash_expected,
836                                           &ip6h->saddr, &ip6h->daddr,
837                                           th, skb->len);
838         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
839                 if (net_ratelimit()) {
840                         printk(KERN_INFO "MD5 Hash %s for "
841                                "(" NIP6_FMT ", %u)->"
842                                "(" NIP6_FMT ", %u)\n",
843                                genhash ? "failed" : "mismatch",
844                                NIP6(ip6h->saddr), ntohs(th->source),
845                                NIP6(ip6h->daddr), ntohs(th->dest));
846                 }
847                 return 1;
848         }
849         return 0;
850 }
851 #endif
852
853 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
854         .family         =       AF_INET6,
855         .obj_size       =       sizeof(struct tcp6_request_sock),
856         .rtx_syn_ack    =       tcp_v6_send_synack,
857         .send_ack       =       tcp_v6_reqsk_send_ack,
858         .destructor     =       tcp_v6_reqsk_destructor,
859         .send_reset     =       tcp_v6_send_reset
860 };
861
862 #ifdef CONFIG_TCP_MD5SIG
863 static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
864         .md5_lookup     =       tcp_v6_reqsk_md5_lookup,
865 };
866 #endif
867
868 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
869         .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
870         .twsk_unique    = tcp_twsk_unique,
871         .twsk_destructor= tcp_twsk_destructor,
872 };
873
874 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
875 {
876         struct ipv6_pinfo *np = inet6_sk(sk);
877         struct tcphdr *th = tcp_hdr(skb);
878
879         if (skb->ip_summed == CHECKSUM_PARTIAL) {
880                 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,  0);
881                 skb->csum_start = skb_transport_header(skb) - skb->head;
882                 skb->csum_offset = offsetof(struct tcphdr, check);
883         } else {
884                 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
885                                             csum_partial((char *)th, th->doff<<2,
886                                                          skb->csum));
887         }
888 }
889
890 static int tcp_v6_gso_send_check(struct sk_buff *skb)
891 {
892         struct ipv6hdr *ipv6h;
893         struct tcphdr *th;
894
895         if (!pskb_may_pull(skb, sizeof(*th)))
896                 return -EINVAL;
897
898         ipv6h = ipv6_hdr(skb);
899         th = tcp_hdr(skb);
900
901         th->check = 0;
902         th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
903                                      IPPROTO_TCP, 0);
904         skb->csum_start = skb_transport_header(skb) - skb->head;
905         skb->csum_offset = offsetof(struct tcphdr, check);
906         skb->ip_summed = CHECKSUM_PARTIAL;
907         return 0;
908 }
909
910 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
911 {
912         struct tcphdr *th = tcp_hdr(skb), *t1;
913         struct sk_buff *buff;
914         struct flowi fl;
915         struct net *net = dev_net(skb->dst->dev);
916         struct sock *ctl_sk = net->ipv6.tcp_sk;
917         unsigned int tot_len = sizeof(*th);
918 #ifdef CONFIG_TCP_MD5SIG
919         struct tcp_md5sig_key *key;
920 #endif
921
922         if (th->rst)
923                 return;
924
925         if (!ipv6_unicast_destination(skb))
926                 return;
927
928 #ifdef CONFIG_TCP_MD5SIG
929         if (sk)
930                 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
931         else
932                 key = NULL;
933
934         if (key)
935                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
936 #endif
937
938         /*
939          * We need to grab some memory, and put together an RST,
940          * and then put it into the queue to be sent.
941          */
942
943         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
944                          GFP_ATOMIC);
945         if (buff == NULL)
946                 return;
947
948         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
949
950         t1 = (struct tcphdr *) skb_push(buff, tot_len);
951
952         /* Swap the send and the receive. */
953         memset(t1, 0, sizeof(*t1));
954         t1->dest = th->source;
955         t1->source = th->dest;
956         t1->doff = tot_len / 4;
957         t1->rst = 1;
958
959         if(th->ack) {
960                 t1->seq = th->ack_seq;
961         } else {
962                 t1->ack = 1;
963                 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
964                                     + skb->len - (th->doff<<2));
965         }
966
967 #ifdef CONFIG_TCP_MD5SIG
968         if (key) {
969                 __be32 *opt = (__be32*)(t1 + 1);
970                 opt[0] = htonl((TCPOPT_NOP << 24) |
971                                (TCPOPT_NOP << 16) |
972                                (TCPOPT_MD5SIG << 8) |
973                                TCPOLEN_MD5SIG);
974                 tcp_v6_do_calc_md5_hash((__u8 *)&opt[1], key,
975                                         &ipv6_hdr(skb)->daddr,
976                                         &ipv6_hdr(skb)->saddr,
977                                         t1, tot_len);
978         }
979 #endif
980
981         buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
982
983         memset(&fl, 0, sizeof(fl));
984         ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
985         ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
986
987         t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
988                                     sizeof(*t1), IPPROTO_TCP,
989                                     buff->csum);
990
991         fl.proto = IPPROTO_TCP;
992         fl.oif = inet6_iif(skb);
993         fl.fl_ip_dport = t1->dest;
994         fl.fl_ip_sport = t1->source;
995         security_skb_classify_flow(skb, &fl);
996
997         /* Pass a socket to ip6_dst_lookup either it is for RST
998          * Underlying function will use this to retrieve the network
999          * namespace
1000          */
1001         if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) {
1002
1003                 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1004                         ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
1005                         TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1006                         TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1007                         return;
1008                 }
1009         }
1010
1011         kfree_skb(buff);
1012 }
1013
1014 static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1015                             struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
1016 {
1017         struct tcphdr *th = tcp_hdr(skb), *t1;
1018         struct sk_buff *buff;
1019         struct flowi fl;
1020         struct net *net = dev_net(skb->dev);
1021         struct sock *ctl_sk = net->ipv6.tcp_sk;
1022         unsigned int tot_len = sizeof(struct tcphdr);
1023         __be32 *topt;
1024 #ifdef CONFIG_TCP_MD5SIG
1025         struct tcp_md5sig_key *key;
1026         struct tcp_md5sig_key tw_key;
1027 #endif
1028
1029 #ifdef CONFIG_TCP_MD5SIG
1030         if (!tw && skb->sk) {
1031                 key = tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr);
1032         } else if (tw && tw->tw_md5_keylen) {
1033                 tw_key.key = tw->tw_md5_key;
1034                 tw_key.keylen = tw->tw_md5_keylen;
1035                 key = &tw_key;
1036         } else {
1037                 key = NULL;
1038         }
1039 #endif
1040
1041         if (ts)
1042                 tot_len += TCPOLEN_TSTAMP_ALIGNED;
1043 #ifdef CONFIG_TCP_MD5SIG
1044         if (key)
1045                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1046 #endif
1047
1048         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1049                          GFP_ATOMIC);
1050         if (buff == NULL)
1051                 return;
1052
1053         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1054
1055         t1 = (struct tcphdr *) skb_push(buff,tot_len);
1056
1057         /* Swap the send and the receive. */
1058         memset(t1, 0, sizeof(*t1));
1059         t1->dest = th->source;
1060         t1->source = th->dest;
1061         t1->doff = tot_len/4;
1062         t1->seq = htonl(seq);
1063         t1->ack_seq = htonl(ack);
1064         t1->ack = 1;
1065         t1->window = htons(win);
1066
1067         topt = (__be32 *)(t1 + 1);
1068
1069         if (ts) {
1070                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1071                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1072                 *topt++ = htonl(tcp_time_stamp);
1073                 *topt = htonl(ts);
1074         }
1075
1076 #ifdef CONFIG_TCP_MD5SIG
1077         if (key) {
1078                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1079                                 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1080                 tcp_v6_do_calc_md5_hash((__u8 *)topt, key,
1081                                         &ipv6_hdr(skb)->daddr,
1082                                         &ipv6_hdr(skb)->saddr,
1083                                         t1, tot_len);
1084         }
1085 #endif
1086
1087         buff->csum = csum_partial((char *)t1, tot_len, 0);
1088
1089         memset(&fl, 0, sizeof(fl));
1090         ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1091         ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1092
1093         t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1094                                     tot_len, IPPROTO_TCP,
1095                                     buff->csum);
1096
1097         fl.proto = IPPROTO_TCP;
1098         fl.oif = inet6_iif(skb);
1099         fl.fl_ip_dport = t1->dest;
1100         fl.fl_ip_sport = t1->source;
1101         security_skb_classify_flow(skb, &fl);
1102
1103         if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) {
1104                 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1105                         ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
1106                         TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1107                         return;
1108                 }
1109         }
1110
1111         kfree_skb(buff);
1112 }
1113
1114 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1115 {
1116         struct inet_timewait_sock *tw = inet_twsk(sk);
1117         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1118
1119         tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1120                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1121                         tcptw->tw_ts_recent);
1122
1123         inet_twsk_put(tw);
1124 }
1125
1126 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1127 {
1128         tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
1129 }
1130
1131
1132 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1133 {
1134         struct request_sock *req, **prev;
1135         const struct tcphdr *th = tcp_hdr(skb);
1136         struct sock *nsk;
1137
1138         /* Find possible connection requests. */
1139         req = inet6_csk_search_req(sk, &prev, th->source,
1140                                    &ipv6_hdr(skb)->saddr,
1141                                    &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1142         if (req)
1143                 return tcp_check_req(sk, skb, req, prev);
1144
1145         nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1146                         &ipv6_hdr(skb)->saddr, th->source,
1147                         &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1148
1149         if (nsk) {
1150                 if (nsk->sk_state != TCP_TIME_WAIT) {
1151                         bh_lock_sock(nsk);
1152                         return nsk;
1153                 }
1154                 inet_twsk_put(inet_twsk(nsk));
1155                 return NULL;
1156         }
1157
1158 #ifdef CONFIG_SYN_COOKIES
1159         if (!th->rst && !th->syn && th->ack)
1160                 sk = cookie_v6_check(sk, skb);
1161 #endif
1162         return sk;
1163 }
1164
1165 /* FIXME: this is substantially similar to the ipv4 code.
1166  * Can some kind of merge be done? -- erics
1167  */
1168 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1169 {
1170         struct inet6_request_sock *treq;
1171         struct ipv6_pinfo *np = inet6_sk(sk);
1172         struct tcp_options_received tmp_opt;
1173         struct tcp_sock *tp = tcp_sk(sk);
1174         struct request_sock *req = NULL;
1175         __u32 isn = TCP_SKB_CB(skb)->when;
1176 #ifdef CONFIG_SYN_COOKIES
1177         int want_cookie = 0;
1178 #else
1179 #define want_cookie 0
1180 #endif
1181
1182         if (skb->protocol == htons(ETH_P_IP))
1183                 return tcp_v4_conn_request(sk, skb);
1184
1185         if (!ipv6_unicast_destination(skb))
1186                 goto drop;
1187
1188         if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1189                 if (net_ratelimit())
1190                         syn_flood_warning(skb);
1191 #ifdef CONFIG_SYN_COOKIES
1192                 if (sysctl_tcp_syncookies)
1193                         want_cookie = 1;
1194                 else
1195 #endif
1196                 goto drop;
1197         }
1198
1199         if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1200                 goto drop;
1201
1202         req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1203         if (req == NULL)
1204                 goto drop;
1205
1206 #ifdef CONFIG_TCP_MD5SIG
1207         tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1208 #endif
1209
1210         tcp_clear_options(&tmp_opt);
1211         tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1212         tmp_opt.user_mss = tp->rx_opt.user_mss;
1213
1214         tcp_parse_options(skb, &tmp_opt, 0);
1215
1216         if (want_cookie && !tmp_opt.saw_tstamp)
1217                 tcp_clear_options(&tmp_opt);
1218
1219         tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1220         tcp_openreq_init(req, &tmp_opt, skb);
1221
1222         treq = inet6_rsk(req);
1223         ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1224         ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1225         treq->pktopts = NULL;
1226         if (!want_cookie)
1227                 TCP_ECN_create_request(req, tcp_hdr(skb));
1228
1229         if (want_cookie) {
1230                 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1231                 req->cookie_ts = tmp_opt.tstamp_ok;
1232         } else if (!isn) {
1233                 if (ipv6_opt_accepted(sk, skb) ||
1234                     np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1235                     np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1236                         atomic_inc(&skb->users);
1237                         treq->pktopts = skb;
1238                 }
1239                 treq->iif = sk->sk_bound_dev_if;
1240
1241                 /* So that link locals have meaning */
1242                 if (!sk->sk_bound_dev_if &&
1243                     ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1244                         treq->iif = inet6_iif(skb);
1245
1246                 isn = tcp_v6_init_sequence(skb);
1247         }
1248
1249         tcp_rsk(req)->snt_isn = isn;
1250
1251         security_inet_conn_request(sk, skb, req);
1252
1253         if (tcp_v6_send_synack(sk, req))
1254                 goto drop;
1255
1256         if (!want_cookie) {
1257                 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1258                 return 0;
1259         }
1260
1261 drop:
1262         if (req)
1263                 reqsk_free(req);
1264
1265         return 0; /* don't send reset */
1266 }
1267
1268 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1269                                           struct request_sock *req,
1270                                           struct dst_entry *dst)
1271 {
1272         struct inet6_request_sock *treq = inet6_rsk(req);
1273         struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1274         struct tcp6_sock *newtcp6sk;
1275         struct inet_sock *newinet;
1276         struct tcp_sock *newtp;
1277         struct sock *newsk;
1278         struct ipv6_txoptions *opt;
1279 #ifdef CONFIG_TCP_MD5SIG
1280         struct tcp_md5sig_key *key;
1281 #endif
1282
1283         if (skb->protocol == htons(ETH_P_IP)) {
1284                 /*
1285                  *      v6 mapped
1286                  */
1287
1288                 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1289
1290                 if (newsk == NULL)
1291                         return NULL;
1292
1293                 newtcp6sk = (struct tcp6_sock *)newsk;
1294                 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1295
1296                 newinet = inet_sk(newsk);
1297                 newnp = inet6_sk(newsk);
1298                 newtp = tcp_sk(newsk);
1299
1300                 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1301
1302                 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1303                               newinet->daddr);
1304
1305                 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1306                               newinet->saddr);
1307
1308                 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1309
1310                 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1311                 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1312 #ifdef CONFIG_TCP_MD5SIG
1313                 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1314 #endif
1315
1316                 newnp->pktoptions  = NULL;
1317                 newnp->opt         = NULL;
1318                 newnp->mcast_oif   = inet6_iif(skb);
1319                 newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1320
1321                 /*
1322                  * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1323                  * here, tcp_create_openreq_child now does this for us, see the comment in
1324                  * that function for the gory details. -acme
1325                  */
1326
1327                 /* It is tricky place. Until this moment IPv4 tcp
1328                    worked with IPv6 icsk.icsk_af_ops.
1329                    Sync it now.
1330                  */
1331                 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1332
1333                 return newsk;
1334         }
1335
1336         opt = np->opt;
1337
1338         if (sk_acceptq_is_full(sk))
1339                 goto out_overflow;
1340
1341         if (dst == NULL) {
1342                 struct in6_addr *final_p = NULL, final;
1343                 struct flowi fl;
1344
1345                 memset(&fl, 0, sizeof(fl));
1346                 fl.proto = IPPROTO_TCP;
1347                 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1348                 if (opt && opt->srcrt) {
1349                         struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1350                         ipv6_addr_copy(&final, &fl.fl6_dst);
1351                         ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1352                         final_p = &final;
1353                 }
1354                 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1355                 fl.oif = sk->sk_bound_dev_if;
1356                 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1357                 fl.fl_ip_sport = inet_sk(sk)->sport;
1358                 security_req_classify_flow(req, &fl);
1359
1360                 if (ip6_dst_lookup(sk, &dst, &fl))
1361                         goto out;
1362
1363                 if (final_p)
1364                         ipv6_addr_copy(&fl.fl6_dst, final_p);
1365
1366                 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1367                         goto out;
1368         }
1369
1370         newsk = tcp_create_openreq_child(sk, req, skb);
1371         if (newsk == NULL)
1372                 goto out;
1373
1374         /*
1375          * No need to charge this sock to the relevant IPv6 refcnt debug socks
1376          * count here, tcp_create_openreq_child now does this for us, see the
1377          * comment in that function for the gory details. -acme
1378          */
1379
1380         newsk->sk_gso_type = SKB_GSO_TCPV6;
1381         __ip6_dst_store(newsk, dst, NULL, NULL);
1382
1383         newtcp6sk = (struct tcp6_sock *)newsk;
1384         inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1385
1386         newtp = tcp_sk(newsk);
1387         newinet = inet_sk(newsk);
1388         newnp = inet6_sk(newsk);
1389
1390         memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1391
1392         ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1393         ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1394         ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1395         newsk->sk_bound_dev_if = treq->iif;
1396
1397         /* Now IPv6 options...
1398
1399            First: no IPv4 options.
1400          */
1401         newinet->opt = NULL;
1402         newnp->ipv6_fl_list = NULL;
1403
1404         /* Clone RX bits */
1405         newnp->rxopt.all = np->rxopt.all;
1406
1407         /* Clone pktoptions received with SYN */
1408         newnp->pktoptions = NULL;
1409         if (treq->pktopts != NULL) {
1410                 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1411                 kfree_skb(treq->pktopts);
1412                 treq->pktopts = NULL;
1413                 if (newnp->pktoptions)
1414                         skb_set_owner_r(newnp->pktoptions, newsk);
1415         }
1416         newnp->opt        = NULL;
1417         newnp->mcast_oif  = inet6_iif(skb);
1418         newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1419
1420         /* Clone native IPv6 options from listening socket (if any)
1421
1422            Yes, keeping reference count would be much more clever,
1423            but we make one more one thing there: reattach optmem
1424            to newsk.
1425          */
1426         if (opt) {
1427                 newnp->opt = ipv6_dup_options(newsk, opt);
1428                 if (opt != np->opt)
1429                         sock_kfree_s(sk, opt, opt->tot_len);
1430         }
1431
1432         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1433         if (newnp->opt)
1434                 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1435                                                      newnp->opt->opt_flen);
1436
1437         tcp_mtup_init(newsk);
1438         tcp_sync_mss(newsk, dst_mtu(dst));
1439         newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1440         tcp_initialize_rcv_mss(newsk);
1441
1442         newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1443
1444 #ifdef CONFIG_TCP_MD5SIG
1445         /* Copy over the MD5 key from the original socket */
1446         if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1447                 /* We're using one, so create a matching key
1448                  * on the newsk structure. If we fail to get
1449                  * memory, then we end up not copying the key
1450                  * across. Shucks.
1451                  */
1452                 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1453                 if (newkey != NULL)
1454                         tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
1455                                           newkey, key->keylen);
1456         }
1457 #endif
1458
1459         __inet6_hash(newsk);
1460         __inet_inherit_port(sk, newsk);
1461
1462         return newsk;
1463
1464 out_overflow:
1465         NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1466 out:
1467         NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1468         if (opt && opt != np->opt)
1469                 sock_kfree_s(sk, opt, opt->tot_len);
1470         dst_release(dst);
1471         return NULL;
1472 }
1473
1474 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1475 {
1476         if (skb->ip_summed == CHECKSUM_COMPLETE) {
1477                 if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr,
1478                                   &ipv6_hdr(skb)->daddr, skb->csum)) {
1479                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1480                         return 0;
1481                 }
1482         }
1483
1484         skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len,
1485                                               &ipv6_hdr(skb)->saddr,
1486                                               &ipv6_hdr(skb)->daddr, 0));
1487
1488         if (skb->len <= 76) {
1489                 return __skb_checksum_complete(skb);
1490         }
1491         return 0;
1492 }
1493
1494 /* The socket must have it's spinlock held when we get
1495  * here.
1496  *
1497  * We have a potential double-lock case here, so even when
1498  * doing backlog processing we use the BH locking scheme.
1499  * This is because we cannot sleep with the original spinlock
1500  * held.
1501  */
1502 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1503 {
1504         struct ipv6_pinfo *np = inet6_sk(sk);
1505         struct tcp_sock *tp;
1506         struct sk_buff *opt_skb = NULL;
1507
1508         /* Imagine: socket is IPv6. IPv4 packet arrives,
1509            goes to IPv4 receive handler and backlogged.
1510            From backlog it always goes here. Kerboom...
1511            Fortunately, tcp_rcv_established and rcv_established
1512            handle them correctly, but it is not case with
1513            tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1514          */
1515
1516         if (skb->protocol == htons(ETH_P_IP))
1517                 return tcp_v4_do_rcv(sk, skb);
1518
1519 #ifdef CONFIG_TCP_MD5SIG
1520         if (tcp_v6_inbound_md5_hash (sk, skb))
1521                 goto discard;
1522 #endif
1523
1524         if (sk_filter(sk, skb))
1525                 goto discard;
1526
1527         /*
1528          *      socket locking is here for SMP purposes as backlog rcv
1529          *      is currently called with bh processing disabled.
1530          */
1531
1532         /* Do Stevens' IPV6_PKTOPTIONS.
1533
1534            Yes, guys, it is the only place in our code, where we
1535            may make it not affecting IPv4.
1536            The rest of code is protocol independent,
1537            and I do not like idea to uglify IPv4.
1538
1539            Actually, all the idea behind IPV6_PKTOPTIONS
1540            looks not very well thought. For now we latch
1541            options, received in the last packet, enqueued
1542            by tcp. Feel free to propose better solution.
1543                                                --ANK (980728)
1544          */
1545         if (np->rxopt.all)
1546                 opt_skb = skb_clone(skb, GFP_ATOMIC);
1547
1548         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1549                 TCP_CHECK_TIMER(sk);
1550                 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1551                         goto reset;
1552                 TCP_CHECK_TIMER(sk);
1553                 if (opt_skb)
1554                         goto ipv6_pktoptions;
1555                 return 0;
1556         }
1557
1558         if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1559                 goto csum_err;
1560
1561         if (sk->sk_state == TCP_LISTEN) {
1562                 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1563                 if (!nsk)
1564                         goto discard;
1565
1566                 /*
1567                  * Queue it on the new socket if the new socket is active,
1568                  * otherwise we just shortcircuit this and continue with
1569                  * the new socket..
1570                  */
1571                 if(nsk != sk) {
1572                         if (tcp_child_process(sk, nsk, skb))
1573                                 goto reset;
1574                         if (opt_skb)
1575                                 __kfree_skb(opt_skb);
1576                         return 0;
1577                 }
1578         }
1579
1580         TCP_CHECK_TIMER(sk);
1581         if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1582                 goto reset;
1583         TCP_CHECK_TIMER(sk);
1584         if (opt_skb)
1585                 goto ipv6_pktoptions;
1586         return 0;
1587
1588 reset:
1589         tcp_v6_send_reset(sk, skb);
1590 discard:
1591         if (opt_skb)
1592                 __kfree_skb(opt_skb);
1593         kfree_skb(skb);
1594         return 0;
1595 csum_err:
1596         TCP_INC_STATS_BH(TCP_MIB_INERRS);
1597         goto discard;
1598
1599
1600 ipv6_pktoptions:
1601         /* Do you ask, what is it?
1602
1603            1. skb was enqueued by tcp.
1604            2. skb is added to tail of read queue, rather than out of order.
1605            3. socket is not in passive state.
1606            4. Finally, it really contains options, which user wants to receive.
1607          */
1608         tp = tcp_sk(sk);
1609         if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1610             !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1611                 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1612                         np->mcast_oif = inet6_iif(opt_skb);
1613                 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1614                         np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1615                 if (ipv6_opt_accepted(sk, opt_skb)) {
1616                         skb_set_owner_r(opt_skb, sk);
1617                         opt_skb = xchg(&np->pktoptions, opt_skb);
1618                 } else {
1619                         __kfree_skb(opt_skb);
1620                         opt_skb = xchg(&np->pktoptions, NULL);
1621                 }
1622         }
1623
1624         if (opt_skb)
1625                 kfree_skb(opt_skb);
1626         return 0;
1627 }
1628
1629 static int tcp_v6_rcv(struct sk_buff *skb)
1630 {
1631         struct tcphdr *th;
1632         struct sock *sk;
1633         int ret;
1634
1635         if (skb->pkt_type != PACKET_HOST)
1636                 goto discard_it;
1637
1638         /*
1639          *      Count it even if it's bad.
1640          */
1641         TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1642
1643         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1644                 goto discard_it;
1645
1646         th = tcp_hdr(skb);
1647
1648         if (th->doff < sizeof(struct tcphdr)/4)
1649                 goto bad_packet;
1650         if (!pskb_may_pull(skb, th->doff*4))
1651                 goto discard_it;
1652
1653         if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1654                 goto bad_packet;
1655
1656         th = tcp_hdr(skb);
1657         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1658         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1659                                     skb->len - th->doff*4);
1660         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1661         TCP_SKB_CB(skb)->when = 0;
1662         TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1663         TCP_SKB_CB(skb)->sacked = 0;
1664
1665         sk = __inet6_lookup(dev_net(skb->dev), &tcp_hashinfo,
1666                         &ipv6_hdr(skb)->saddr, th->source,
1667                         &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1668                         inet6_iif(skb));
1669
1670         if (!sk)
1671                 goto no_tcp_socket;
1672
1673 process:
1674         if (sk->sk_state == TCP_TIME_WAIT)
1675                 goto do_time_wait;
1676
1677         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1678                 goto discard_and_relse;
1679
1680         if (sk_filter(sk, skb))
1681                 goto discard_and_relse;
1682
1683         skb->dev = NULL;
1684
1685         bh_lock_sock_nested(sk);
1686         ret = 0;
1687         if (!sock_owned_by_user(sk)) {
1688 #ifdef CONFIG_NET_DMA
1689                 struct tcp_sock *tp = tcp_sk(sk);
1690                 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1691                         tp->ucopy.dma_chan = get_softnet_dma();
1692                 if (tp->ucopy.dma_chan)
1693                         ret = tcp_v6_do_rcv(sk, skb);
1694                 else
1695 #endif
1696                 {
1697                         if (!tcp_prequeue(sk, skb))
1698                                 ret = tcp_v6_do_rcv(sk, skb);
1699                 }
1700         } else
1701                 sk_add_backlog(sk, skb);
1702         bh_unlock_sock(sk);
1703
1704         sock_put(sk);
1705         return ret ? -1 : 0;
1706
1707 no_tcp_socket:
1708         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1709                 goto discard_it;
1710
1711         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1712 bad_packet:
1713                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1714         } else {
1715                 tcp_v6_send_reset(NULL, skb);
1716         }
1717
1718 discard_it:
1719
1720         /*
1721          *      Discard frame
1722          */
1723
1724         kfree_skb(skb);
1725         return 0;
1726
1727 discard_and_relse:
1728         sock_put(sk);
1729         goto discard_it;
1730
1731 do_time_wait:
1732         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1733                 inet_twsk_put(inet_twsk(sk));
1734                 goto discard_it;
1735         }
1736
1737         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1738                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1739                 inet_twsk_put(inet_twsk(sk));
1740                 goto discard_it;
1741         }
1742
1743         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1744         case TCP_TW_SYN:
1745         {
1746                 struct sock *sk2;
1747
1748                 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1749                                             &ipv6_hdr(skb)->daddr,
1750                                             ntohs(th->dest), inet6_iif(skb));
1751                 if (sk2 != NULL) {
1752                         struct inet_timewait_sock *tw = inet_twsk(sk);
1753                         inet_twsk_deschedule(tw, &tcp_death_row);
1754                         inet_twsk_put(tw);
1755                         sk = sk2;
1756                         goto process;
1757                 }
1758                 /* Fall through to ACK */
1759         }
1760         case TCP_TW_ACK:
1761                 tcp_v6_timewait_ack(sk, skb);
1762                 break;
1763         case TCP_TW_RST:
1764                 goto no_tcp_socket;
1765         case TCP_TW_SUCCESS:;
1766         }
1767         goto discard_it;
1768 }
1769
1770 static int tcp_v6_remember_stamp(struct sock *sk)
1771 {
1772         /* Alas, not yet... */
1773         return 0;
1774 }
1775
1776 static struct inet_connection_sock_af_ops ipv6_specific = {
1777         .queue_xmit        = inet6_csk_xmit,
1778         .send_check        = tcp_v6_send_check,
1779         .rebuild_header    = inet6_sk_rebuild_header,
1780         .conn_request      = tcp_v6_conn_request,
1781         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1782         .remember_stamp    = tcp_v6_remember_stamp,
1783         .net_header_len    = sizeof(struct ipv6hdr),
1784         .setsockopt        = ipv6_setsockopt,
1785         .getsockopt        = ipv6_getsockopt,
1786         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1787         .sockaddr_len      = sizeof(struct sockaddr_in6),
1788         .bind_conflict     = inet6_csk_bind_conflict,
1789 #ifdef CONFIG_COMPAT
1790         .compat_setsockopt = compat_ipv6_setsockopt,
1791         .compat_getsockopt = compat_ipv6_getsockopt,
1792 #endif
1793 };
1794
1795 #ifdef CONFIG_TCP_MD5SIG
1796 static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1797         .md5_lookup     =       tcp_v6_md5_lookup,
1798         .calc_md5_hash  =       tcp_v6_calc_md5_hash,
1799         .md5_add        =       tcp_v6_md5_add_func,
1800         .md5_parse      =       tcp_v6_parse_md5_keys,
1801 };
1802 #endif
1803
1804 /*
1805  *      TCP over IPv4 via INET6 API
1806  */
1807
1808 static struct inet_connection_sock_af_ops ipv6_mapped = {
1809         .queue_xmit        = ip_queue_xmit,
1810         .send_check        = tcp_v4_send_check,
1811         .rebuild_header    = inet_sk_rebuild_header,
1812         .conn_request      = tcp_v6_conn_request,
1813         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1814         .remember_stamp    = tcp_v4_remember_stamp,
1815         .net_header_len    = sizeof(struct iphdr),
1816         .setsockopt        = ipv6_setsockopt,
1817         .getsockopt        = ipv6_getsockopt,
1818         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1819         .sockaddr_len      = sizeof(struct sockaddr_in6),
1820         .bind_conflict     = inet6_csk_bind_conflict,
1821 #ifdef CONFIG_COMPAT
1822         .compat_setsockopt = compat_ipv6_setsockopt,
1823         .compat_getsockopt = compat_ipv6_getsockopt,
1824 #endif
1825 };
1826
1827 #ifdef CONFIG_TCP_MD5SIG
1828 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1829         .md5_lookup     =       tcp_v4_md5_lookup,
1830         .calc_md5_hash  =       tcp_v4_calc_md5_hash,
1831         .md5_add        =       tcp_v6_md5_add_func,
1832         .md5_parse      =       tcp_v6_parse_md5_keys,
1833 };
1834 #endif
1835
1836 /* NOTE: A lot of things set to zero explicitly by call to
1837  *       sk_alloc() so need not be done here.
1838  */
1839 static int tcp_v6_init_sock(struct sock *sk)
1840 {
1841         struct inet_connection_sock *icsk = inet_csk(sk);
1842         struct tcp_sock *tp = tcp_sk(sk);
1843
1844         skb_queue_head_init(&tp->out_of_order_queue);
1845         tcp_init_xmit_timers(sk);
1846         tcp_prequeue_init(tp);
1847
1848         icsk->icsk_rto = TCP_TIMEOUT_INIT;
1849         tp->mdev = TCP_TIMEOUT_INIT;
1850
1851         /* So many TCP implementations out there (incorrectly) count the
1852          * initial SYN frame in their delayed-ACK and congestion control
1853          * algorithms that we must have the following bandaid to talk
1854          * efficiently to them.  -DaveM
1855          */
1856         tp->snd_cwnd = 2;
1857
1858         /* See draft-stevens-tcpca-spec-01 for discussion of the
1859          * initialization of these values.
1860          */
1861         tp->snd_ssthresh = 0x7fffffff;
1862         tp->snd_cwnd_clamp = ~0;
1863         tp->mss_cache = 536;
1864
1865         tp->reordering = sysctl_tcp_reordering;
1866
1867         sk->sk_state = TCP_CLOSE;
1868
1869         icsk->icsk_af_ops = &ipv6_specific;
1870         icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1871         icsk->icsk_sync_mss = tcp_sync_mss;
1872         sk->sk_write_space = sk_stream_write_space;
1873         sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1874
1875 #ifdef CONFIG_TCP_MD5SIG
1876         tp->af_specific = &tcp_sock_ipv6_specific;
1877 #endif
1878
1879         sk->sk_sndbuf = sysctl_tcp_wmem[1];
1880         sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1881
1882         atomic_inc(&tcp_sockets_allocated);
1883
1884         return 0;
1885 }
1886
1887 static int tcp_v6_destroy_sock(struct sock *sk)
1888 {
1889 #ifdef CONFIG_TCP_MD5SIG
1890         /* Clean up the MD5 key list */
1891         if (tcp_sk(sk)->md5sig_info)
1892                 tcp_v6_clear_md5_list(sk);
1893 #endif
1894         tcp_v4_destroy_sock(sk);
1895         return inet6_destroy_sock(sk);
1896 }
1897
1898 #ifdef CONFIG_PROC_FS
1899 /* Proc filesystem TCPv6 sock list dumping. */
1900 static void get_openreq6(struct seq_file *seq,
1901                          struct sock *sk, struct request_sock *req, int i, int uid)
1902 {
1903         int ttd = req->expires - jiffies;
1904         struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1905         struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1906
1907         if (ttd < 0)
1908                 ttd = 0;
1909
1910         seq_printf(seq,
1911                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1912                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1913                    i,
1914                    src->s6_addr32[0], src->s6_addr32[1],
1915                    src->s6_addr32[2], src->s6_addr32[3],
1916                    ntohs(inet_sk(sk)->sport),
1917                    dest->s6_addr32[0], dest->s6_addr32[1],
1918                    dest->s6_addr32[2], dest->s6_addr32[3],
1919                    ntohs(inet_rsk(req)->rmt_port),
1920                    TCP_SYN_RECV,
1921                    0,0, /* could print option size, but that is af dependent. */
1922                    1,   /* timers active (only the expire timer) */
1923                    jiffies_to_clock_t(ttd),
1924                    req->retrans,
1925                    uid,
1926                    0,  /* non standard timer */
1927                    0, /* open_requests have no inode */
1928                    0, req);
1929 }
1930
1931 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1932 {
1933         struct in6_addr *dest, *src;
1934         __u16 destp, srcp;
1935         int timer_active;
1936         unsigned long timer_expires;
1937         struct inet_sock *inet = inet_sk(sp);
1938         struct tcp_sock *tp = tcp_sk(sp);
1939         const struct inet_connection_sock *icsk = inet_csk(sp);
1940         struct ipv6_pinfo *np = inet6_sk(sp);
1941
1942         dest  = &np->daddr;
1943         src   = &np->rcv_saddr;
1944         destp = ntohs(inet->dport);
1945         srcp  = ntohs(inet->sport);
1946
1947         if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1948                 timer_active    = 1;
1949                 timer_expires   = icsk->icsk_timeout;
1950         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1951                 timer_active    = 4;
1952                 timer_expires   = icsk->icsk_timeout;
1953         } else if (timer_pending(&sp->sk_timer)) {
1954                 timer_active    = 2;
1955                 timer_expires   = sp->sk_timer.expires;
1956         } else {
1957                 timer_active    = 0;
1958                 timer_expires = jiffies;
1959         }
1960
1961         seq_printf(seq,
1962                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1963                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
1964                    i,
1965                    src->s6_addr32[0], src->s6_addr32[1],
1966                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1967                    dest->s6_addr32[0], dest->s6_addr32[1],
1968                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1969                    sp->sk_state,
1970                    tp->write_seq-tp->snd_una,
1971                    (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1972                    timer_active,
1973                    jiffies_to_clock_t(timer_expires - jiffies),
1974                    icsk->icsk_retransmits,
1975                    sock_i_uid(sp),
1976                    icsk->icsk_probes_out,
1977                    sock_i_ino(sp),
1978                    atomic_read(&sp->sk_refcnt), sp,
1979                    icsk->icsk_rto,
1980                    icsk->icsk_ack.ato,
1981                    (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1982                    tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
1983                    );
1984 }
1985
1986 static void get_timewait6_sock(struct seq_file *seq,
1987                                struct inet_timewait_sock *tw, int i)
1988 {
1989         struct in6_addr *dest, *src;
1990         __u16 destp, srcp;
1991         struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1992         int ttd = tw->tw_ttd - jiffies;
1993
1994         if (ttd < 0)
1995                 ttd = 0;
1996
1997         dest = &tw6->tw_v6_daddr;
1998         src  = &tw6->tw_v6_rcv_saddr;
1999         destp = ntohs(tw->tw_dport);
2000         srcp  = ntohs(tw->tw_sport);
2001
2002         seq_printf(seq,
2003                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2004                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2005                    i,
2006                    src->s6_addr32[0], src->s6_addr32[1],
2007                    src->s6_addr32[2], src->s6_addr32[3], srcp,
2008                    dest->s6_addr32[0], dest->s6_addr32[1],
2009                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
2010                    tw->tw_substate, 0, 0,
2011                    3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2012                    atomic_read(&tw->tw_refcnt), tw);
2013 }
2014
2015 static int tcp6_seq_show(struct seq_file *seq, void *v)
2016 {
2017         struct tcp_iter_state *st;
2018
2019         if (v == SEQ_START_TOKEN) {
2020                 seq_puts(seq,
2021                          "  sl  "
2022                          "local_address                         "
2023                          "remote_address                        "
2024                          "st tx_queue rx_queue tr tm->when retrnsmt"
2025                          "   uid  timeout inode\n");
2026                 goto out;
2027         }
2028         st = seq->private;
2029
2030         switch (st->state) {
2031         case TCP_SEQ_STATE_LISTENING:
2032         case TCP_SEQ_STATE_ESTABLISHED:
2033                 get_tcp6_sock(seq, v, st->num);
2034                 break;
2035         case TCP_SEQ_STATE_OPENREQ:
2036                 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2037                 break;
2038         case TCP_SEQ_STATE_TIME_WAIT:
2039                 get_timewait6_sock(seq, v, st->num);
2040                 break;
2041         }
2042 out:
2043         return 0;
2044 }
2045
2046 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2047         .name           = "tcp6",
2048         .family         = AF_INET6,
2049         .seq_fops       = {
2050                 .owner          = THIS_MODULE,
2051         },
2052         .seq_ops        = {
2053                 .show           = tcp6_seq_show,
2054         },
2055 };
2056
2057 int tcp6_proc_init(struct net *net)
2058 {
2059         return tcp_proc_register(net, &tcp6_seq_afinfo);
2060 }
2061
2062 void tcp6_proc_exit(struct net *net)
2063 {
2064         tcp_proc_unregister(net, &tcp6_seq_afinfo);
2065 }
2066 #endif
2067
2068 struct proto tcpv6_prot = {
2069         .name                   = "TCPv6",
2070         .owner                  = THIS_MODULE,
2071         .close                  = tcp_close,
2072         .connect                = tcp_v6_connect,
2073         .disconnect             = tcp_disconnect,
2074         .accept                 = inet_csk_accept,
2075         .ioctl                  = tcp_ioctl,
2076         .init                   = tcp_v6_init_sock,
2077         .destroy                = tcp_v6_destroy_sock,
2078         .shutdown               = tcp_shutdown,
2079         .setsockopt             = tcp_setsockopt,
2080         .getsockopt             = tcp_getsockopt,
2081         .recvmsg                = tcp_recvmsg,
2082         .backlog_rcv            = tcp_v6_do_rcv,
2083         .hash                   = tcp_v6_hash,
2084         .unhash                 = inet_unhash,
2085         .get_port               = inet_csk_get_port,
2086         .enter_memory_pressure  = tcp_enter_memory_pressure,
2087         .sockets_allocated      = &tcp_sockets_allocated,
2088         .memory_allocated       = &tcp_memory_allocated,
2089         .memory_pressure        = &tcp_memory_pressure,
2090         .orphan_count           = &tcp_orphan_count,
2091         .sysctl_mem             = sysctl_tcp_mem,
2092         .sysctl_wmem            = sysctl_tcp_wmem,
2093         .sysctl_rmem            = sysctl_tcp_rmem,
2094         .max_header             = MAX_TCP_HEADER,
2095         .obj_size               = sizeof(struct tcp6_sock),
2096         .twsk_prot              = &tcp6_timewait_sock_ops,
2097         .rsk_prot               = &tcp6_request_sock_ops,
2098         .h.hashinfo             = &tcp_hashinfo,
2099 #ifdef CONFIG_COMPAT
2100         .compat_setsockopt      = compat_tcp_setsockopt,
2101         .compat_getsockopt      = compat_tcp_getsockopt,
2102 #endif
2103 };
2104
2105 static struct inet6_protocol tcpv6_protocol = {
2106         .handler        =       tcp_v6_rcv,
2107         .err_handler    =       tcp_v6_err,
2108         .gso_send_check =       tcp_v6_gso_send_check,
2109         .gso_segment    =       tcp_tso_segment,
2110         .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2111 };
2112
2113 static struct inet_protosw tcpv6_protosw = {
2114         .type           =       SOCK_STREAM,
2115         .protocol       =       IPPROTO_TCP,
2116         .prot           =       &tcpv6_prot,
2117         .ops            =       &inet6_stream_ops,
2118         .capability     =       -1,
2119         .no_check       =       0,
2120         .flags          =       INET_PROTOSW_PERMANENT |
2121                                 INET_PROTOSW_ICSK,
2122 };
2123
2124 static int tcpv6_net_init(struct net *net)
2125 {
2126         return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2127                                     SOCK_RAW, IPPROTO_TCP, net);
2128 }
2129
2130 static void tcpv6_net_exit(struct net *net)
2131 {
2132         inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2133 }
2134
2135 static struct pernet_operations tcpv6_net_ops = {
2136         .init = tcpv6_net_init,
2137         .exit = tcpv6_net_exit,
2138 };
2139
2140 int __init tcpv6_init(void)
2141 {
2142         int ret;
2143
2144         ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2145         if (ret)
2146                 goto out;
2147
2148         /* register inet6 protocol */
2149         ret = inet6_register_protosw(&tcpv6_protosw);
2150         if (ret)
2151                 goto out_tcpv6_protocol;
2152
2153         ret = register_pernet_subsys(&tcpv6_net_ops);
2154         if (ret)
2155                 goto out_tcpv6_protosw;
2156 out:
2157         return ret;
2158
2159 out_tcpv6_protocol:
2160         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2161 out_tcpv6_protosw:
2162         inet6_unregister_protosw(&tcpv6_protosw);
2163         goto out;
2164 }
2165
2166 void tcpv6_exit(void)
2167 {
2168         unregister_pernet_subsys(&tcpv6_net_ops);
2169         inet6_unregister_protosw(&tcpv6_protosw);
2170         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2171 }