]> err.no Git - linux-2.6/blobdiff - net/ipv4/tcp_ipv4.c
[PATCH] sunrpc: cache_register can use wrong module reference
[linux-2.6] / net / ipv4 / tcp_ipv4.c
index 5f9ad95304cae143eb837b5e7a007aa3f8075460..13dfb391cdf17a376c301c9f56973db93696c1d6 100644 (file)
@@ -66,6 +66,7 @@
 #include <net/icmp.h>
 #include <net/inet_hashtables.h>
 #include <net/tcp.h>
+#include <net/transp_v6.h>
 #include <net/ipv6.h>
 #include <net/inet_common.h>
 #include <net/xfrm.h>
@@ -76,7 +77,6 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 
-extern int sysctl_ip_dynaddr;
 int sysctl_tcp_tw_reuse;
 int sysctl_tcp_low_latency;
 
@@ -97,309 +97,21 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
        .port_rover     = 1024 - 1,
 };
 
-/*
- * This array holds the first and last local port number.
- * For high-usage systems, use sysctl to change this to
- * 32768-61000
- */
-int sysctl_local_port_range[2] = { 1024, 4999 };
-
-static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb)
-{
-       const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk);
-       struct sock *sk2;
-       struct hlist_node *node;
-       int reuse = sk->sk_reuse;
-
-       sk_for_each_bound(sk2, node, &tb->owners) {
-               if (sk != sk2 &&
-                   !tcp_v6_ipv6only(sk2) &&
-                   (!sk->sk_bound_dev_if ||
-                    !sk2->sk_bound_dev_if ||
-                    sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
-                       if (!reuse || !sk2->sk_reuse ||
-                           sk2->sk_state == TCP_LISTEN) {
-                               const u32 sk2_rcv_saddr = tcp_v4_rcv_saddr(sk2);
-                               if (!sk2_rcv_saddr || !sk_rcv_saddr ||
-                                   sk2_rcv_saddr == sk_rcv_saddr)
-                                       break;
-                       }
-               }
-       }
-       return node != NULL;
-}
-
-/* Obtain a reference to a local port for the given sock,
- * if snum is zero it means select any available local port.
- */
 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
 {
-       struct inet_bind_hashbucket *head;
-       struct hlist_node *node;
-       struct inet_bind_bucket *tb;
-       int ret;
-
-       local_bh_disable();
-       if (!snum) {
-               int low = sysctl_local_port_range[0];
-               int high = sysctl_local_port_range[1];
-               int remaining = (high - low) + 1;
-               int rover;
-
-               spin_lock(&tcp_hashinfo.portalloc_lock);
-               if (tcp_hashinfo.port_rover < low)
-                       rover = low;
-               else
-                       rover = tcp_hashinfo.port_rover;
-               do {
-                       rover++;
-                       if (rover > high)
-                               rover = low;
-                       head = &tcp_hashinfo.bhash[inet_bhashfn(rover, tcp_hashinfo.bhash_size)];
-                       spin_lock(&head->lock);
-                       inet_bind_bucket_for_each(tb, node, &head->chain)
-                               if (tb->port == rover)
-                                       goto next;
-                       break;
-               next:
-                       spin_unlock(&head->lock);
-               } while (--remaining > 0);
-               tcp_hashinfo.port_rover = rover;
-               spin_unlock(&tcp_hashinfo.portalloc_lock);
-
-               /* Exhausted local port range during search?  It is not
-                * possible for us to be holding one of the bind hash
-                * locks if this test triggers, because if 'remaining'
-                * drops to zero, we broke out of the do/while loop at
-                * the top level, not from the 'break;' statement.
-                */
-               ret = 1;
-               if (unlikely(remaining <= 0))
-                       goto fail;
-
-               /* OK, here is the one we will use.  HEAD is
-                * non-NULL and we hold it's mutex.
-                */
-               snum = rover;
-       } else {
-               head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
-               spin_lock(&head->lock);
-               inet_bind_bucket_for_each(tb, node, &head->chain)
-                       if (tb->port == snum)
-                               goto tb_found;
-       }
-       tb = NULL;
-       goto tb_not_found;
-tb_found:
-       if (!hlist_empty(&tb->owners)) {
-               if (sk->sk_reuse > 1)
-                       goto success;
-               if (tb->fastreuse > 0 &&
-                   sk->sk_reuse && sk->sk_state != TCP_LISTEN) {
-                       goto success;
-               } else {
-                       ret = 1;
-                       if (tcp_bind_conflict(sk, tb))
-                               goto fail_unlock;
-               }
-       }
-tb_not_found:
-       ret = 1;
-       if (!tb && (tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, snum)) == NULL)
-               goto fail_unlock;
-       if (hlist_empty(&tb->owners)) {
-               if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
-                       tb->fastreuse = 1;
-               else
-                       tb->fastreuse = 0;
-       } else if (tb->fastreuse &&
-                  (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
-               tb->fastreuse = 0;
-success:
-       if (!inet_sk(sk)->bind_hash)
-               inet_bind_hash(sk, tb, snum);
-       BUG_TRAP(inet_sk(sk)->bind_hash == tb);
-       ret = 0;
-
-fail_unlock:
-       spin_unlock(&head->lock);
-fail:
-       local_bh_enable();
-       return ret;
+       return inet_csk_get_port(&tcp_hashinfo, sk, snum);
 }
 
 static void tcp_v4_hash(struct sock *sk)
 {
-       if (sk->sk_state != TCP_CLOSE) {
-               local_bh_disable();
-               __inet_hash(&tcp_hashinfo, sk, 1);
-               local_bh_enable();
-       }
+       inet_hash(&tcp_hashinfo, sk);
 }
 
 void tcp_unhash(struct sock *sk)
 {
-       rwlock_t *lock;
-
-       if (sk_unhashed(sk))
-               goto ende;
-
-       if (sk->sk_state == TCP_LISTEN) {
-               local_bh_disable();
-               inet_listen_wlock(&tcp_hashinfo);
-               lock = &tcp_hashinfo.lhash_lock;
-       } else {
-               struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[sk->sk_hashent];
-               lock = &head->lock;
-               write_lock_bh(&head->lock);
-       }
-
-       if (__sk_del_node_init(sk))
-               sock_prot_dec_use(sk->sk_prot);
-       write_unlock_bh(lock);
-
- ende:
-       if (sk->sk_state == TCP_LISTEN)
-               wake_up(&tcp_hashinfo.lhash_wait);
-}
-
-/* Don't inline this cruft.  Here are some nice properties to
- * exploit here.  The BSD API does not allow a listening TCP
- * to specify the remote port nor the remote address for the
- * connection.  So always assume those are both wildcarded
- * during the search since they can never be otherwise.
- */
-static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head,
-                                            const u32 daddr,
-                                            const unsigned short hnum,
-                                            const int dif)
-{
-       struct sock *result = NULL, *sk;
-       struct hlist_node *node;
-       int score, hiscore;
-
-       hiscore=-1;
-       sk_for_each(sk, node, head) {
-               struct inet_sock *inet = inet_sk(sk);
-
-               if (inet->num == hnum && !ipv6_only_sock(sk)) {
-                       __u32 rcv_saddr = inet->rcv_saddr;
-
-                       score = (sk->sk_family == PF_INET ? 1 : 0);
-                       if (rcv_saddr) {
-                               if (rcv_saddr != daddr)
-                                       continue;
-                               score+=2;
-                       }
-                       if (sk->sk_bound_dev_if) {
-                               if (sk->sk_bound_dev_if != dif)
-                                       continue;
-                               score+=2;
-                       }
-                       if (score == 5)
-                               return sk;
-                       if (score > hiscore) {
-                               hiscore = score;
-                               result = sk;
-                       }
-               }
-       }
-       return result;
-}
-
-/* Optimize the common listener case. */
-static inline struct sock *tcp_v4_lookup_listener(const u32 daddr,
-                                                 const unsigned short hnum,
-                                                 const int dif)
-{
-       struct sock *sk = NULL;
-       struct hlist_head *head;
-
-       read_lock(&tcp_hashinfo.lhash_lock);
-       head = &tcp_hashinfo.listening_hash[inet_lhashfn(hnum)];
-       if (!hlist_empty(head)) {
-               struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
-
-               if (inet->num == hnum && !sk->sk_node.next &&
-                   (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
-                   (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
-                   !sk->sk_bound_dev_if)
-                       goto sherry_cache;
-               sk = __tcp_v4_lookup_listener(head, daddr, hnum, dif);
-       }
-       if (sk) {
-sherry_cache:
-               sock_hold(sk);
-       }
-       read_unlock(&tcp_hashinfo.lhash_lock);
-       return sk;
-}
-
-/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
- * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
- *
- * Local BH must be disabled here.
- */
-
-static inline struct sock *__tcp_v4_lookup_established(const u32 saddr,
-                                                      const u16 sport,
-                                                      const u32 daddr,
-                                                      const u16 hnum,
-                                                      const int dif)
-{
-       struct inet_ehash_bucket *head;
-       TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
-       __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
-       struct sock *sk;
-       struct hlist_node *node;
-       /* Optimize here for direct hit, only listening connections can
-        * have wildcards anyways.
-        */
-       const int hash = inet_ehashfn(daddr, hnum, saddr, sport, tcp_hashinfo.ehash_size);
-       head = &tcp_hashinfo.ehash[hash];
-       read_lock(&head->lock);
-       sk_for_each(sk, node, &head->chain) {
-               if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
-                       goto hit; /* You sunk my battleship! */
-       }
-
-       /* Must check for a TIME_WAIT'er before going to listener hash. */
-       sk_for_each(sk, node, &(head + tcp_hashinfo.ehash_size)->chain) {
-               if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
-                       goto hit;
-       }
-       sk = NULL;
-out:
-       read_unlock(&head->lock);
-       return sk;
-hit:
-       sock_hold(sk);
-       goto out;
+       inet_unhash(&tcp_hashinfo, sk);
 }
 
-static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport,
-                                          u32 daddr, u16 hnum, int dif)
-{
-       struct sock *sk = __tcp_v4_lookup_established(saddr, sport,
-                                                     daddr, hnum, dif);
-
-       return sk ? : tcp_v4_lookup_listener(daddr, hnum, dif);
-}
-
-inline struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr,
-                                 u16 dport, int dif)
-{
-       struct sock *sk;
-
-       local_bh_disable();
-       sk = __tcp_v4_lookup(saddr, sport, daddr, ntohs(dport), dif);
-       local_bh_enable();
-
-       return sk;
-}
-
-EXPORT_SYMBOL_GPL(tcp_v4_lookup);
-
 static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
 {
        return secure_tcp_sequence_number(skb->nh.iph->daddr,
@@ -410,27 +122,28 @@ static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
 
 /* called with local bh disabled */
 static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
-                                     struct tcp_tw_bucket **twp)
+                                     struct inet_timewait_sock **twp)
 {
        struct inet_sock *inet = inet_sk(sk);
        u32 daddr = inet->rcv_saddr;
        u32 saddr = inet->daddr;
        int dif = sk->sk_bound_dev_if;
-       TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
-       __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
+       INET_ADDR_COOKIE(acookie, saddr, daddr)
+       const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
        const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_hashinfo.ehash_size);
        struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash];
        struct sock *sk2;
-       struct hlist_node *node;
-       struct tcp_tw_bucket *tw;
+       const struct hlist_node *node;
+       struct inet_timewait_sock *tw;
 
        write_lock(&head->lock);
 
        /* Check TIME-WAIT sockets first. */
        sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
-               tw = (struct tcp_tw_bucket *)sk2;
+               tw = inet_twsk(sk2);
 
-               if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
+               if (INET_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
+                       const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2);
                        struct tcp_sock *tp = tcp_sk(sk);
 
                        /* With PAWS, it is safe from the viewpoint
@@ -447,15 +160,15 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
                           fall back to VJ's scheme and use initial
                           timestamp retrieved from peer table.
                         */
-                       if (tw->tw_ts_recent_stamp &&
+                       if (tcptw->tw_ts_recent_stamp &&
                            (!twp || (sysctl_tcp_tw_reuse &&
                                      xtime.tv_sec -
-                                     tw->tw_ts_recent_stamp > 1))) {
-                               if ((tp->write_seq =
-                                               tw->tw_snd_nxt + 65535 + 2) == 0)
+                                     tcptw->tw_ts_recent_stamp > 1))) {
+                               tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
+                               if (tp->write_seq == 0)
                                        tp->write_seq = 1;
-                               tp->rx_opt.ts_recent       = tw->tw_ts_recent;
-                               tp->rx_opt.ts_recent_stamp = tw->tw_ts_recent_stamp;
+                               tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
+                               tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
                                sock_hold(sk2);
                                goto unique;
                        } else
@@ -466,7 +179,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
 
        /* And established part... */
        sk_for_each(sk2, node, &head->chain) {
-               if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif))
+               if (INET_MATCH(sk2, acookie, saddr, daddr, ports, dif))
                        goto not_unique;
        }
 
@@ -486,10 +199,10 @@ unique:
                NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
        } else if (tw) {
                /* Silly. Should hash-dance instead... */
-               tcp_tw_deschedule(tw);
+               inet_twsk_deschedule(tw, &tcp_death_row);
                NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
 
-               tcp_tw_put(tw);
+               inet_twsk_put(tw);
        }
 
        return 0;
@@ -526,7 +239,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk)
                static u32 hint;
                u32 offset = hint + connect_port_offset(sk);
                struct hlist_node *node;
-               struct tcp_tw_bucket *tw = NULL;
+               struct inet_timewait_sock *tw = NULL;
 
                local_bh_disable();
                for (i = 1; i <= range; i++) {
@@ -578,8 +291,8 @@ ok:
                spin_unlock(&head->lock);
 
                if (tw) {
-                       tcp_tw_deschedule(tw);
-                       tcp_tw_put(tw);
+                       inet_twsk_deschedule(tw, &tcp_death_row);;
+                       inet_twsk_put(tw);
                }
 
                ret = 0;
@@ -587,7 +300,7 @@ ok:
        }
 
        head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
-       tb  = inet_sk(sk)->bind_hash;
+       tb  = inet_csk(sk)->icsk_bind_hash;
        spin_lock_bh(&head->lock);
        if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
                __inet_hash(&tcp_hashinfo, sk, 0);
@@ -653,7 +366,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                tp->write_seq              = 0;
        }
 
-       if (sysctl_tcp_tw_recycle &&
+       if (tcp_death_row.sysctl_tw_recycle &&
            !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
                struct inet_peer *peer = rt_get_peer(rt);
 
@@ -718,53 +431,6 @@ failure:
        return err;
 }
 
-static __inline__ int tcp_v4_iif(struct sk_buff *skb)
-{
-       return ((struct rtable *)skb->dst)->rt_iif;
-}
-
-static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
-{
-       return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
-}
-
-static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp,
-                                             struct request_sock ***prevp,
-                                             __u16 rport,
-                                             __u32 raddr, __u32 laddr)
-{
-       struct listen_sock *lopt = tp->accept_queue.listen_opt;
-       struct request_sock *req, **prev;
-
-       for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
-            (req = *prev) != NULL;
-            prev = &req->dl_next) {
-               const struct inet_request_sock *ireq = inet_rsk(req);
-
-               if (ireq->rmt_port == rport &&
-                   ireq->rmt_addr == raddr &&
-                   ireq->loc_addr == laddr &&
-                   TCP_INET_FAMILY(req->rsk_ops->family)) {
-                       BUG_TRAP(!req->sk);
-                       *prevp = prev;
-                       break;
-               }
-       }
-
-       return req;
-}
-
-static void tcp_v4_synq_add(struct sock *sk, struct request_sock *req)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       struct listen_sock *lopt = tp->accept_queue.listen_opt;
-       u32 h = tcp_v4_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd);
-
-       reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT);
-       tcp_synq_added(sk);
-}
-
-
 /*
  * This routine does path mtu discovery as defined in RFC1191.
  */
@@ -847,14 +513,14 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
                return;
        }
 
-       sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,
-                          th->source, tcp_v4_iif(skb));
+       sk = inet_lookup(&tcp_hashinfo, iph->daddr, th->dest, iph->saddr,
+                        th->source, inet_iif(skb));
        if (!sk) {
                ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
                return;
        }
        if (sk->sk_state == TCP_TIME_WAIT) {
-               tcp_tw_put((struct tcp_tw_bucket *)sk);
+               inet_twsk_put((struct inet_timewait_sock *)sk);
                return;
        }
 
@@ -908,8 +574,8 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
                if (sock_owned_by_user(sk))
                        goto out;
 
-               req = tcp_v4_search_req(tp, &prev, th->dest,
-                                       iph->daddr, iph->saddr);
+               req = inet_csk_search_req(sk, &prev, th->dest,
+                                         iph->daddr, iph->saddr);
                if (!req)
                        goto out;
 
@@ -929,7 +595,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
                 * created socket, and POSIX does not want network
                 * errors returned from accept().
                 */
-               tcp_synq_drop(sk, req, prev);
+               inet_csk_reqsk_queue_drop(sk, req, prev);
                goto out;
 
        case TCP_SYN_SENT:
@@ -1099,12 +765,13 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
 
 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
 {
-       struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
+       struct inet_timewait_sock *tw = inet_twsk(sk);
+       const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 
-       tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
-                       tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
+       tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+                       tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent);
 
-       tcp_tw_put(tw);
+       inet_twsk_put(tw);
 }
 
 static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
@@ -1113,36 +780,6 @@ static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
                        req->ts_recent);
 }
 
-static struct dst_entry* tcp_v4_route_req(struct sock *sk,
-                                         struct request_sock *req)
-{
-       struct rtable *rt;
-       const struct inet_request_sock *ireq = inet_rsk(req);
-       struct ip_options *opt = inet_rsk(req)->opt;
-       struct flowi fl = { .oif = sk->sk_bound_dev_if,
-                           .nl_u = { .ip4_u =
-                                     { .daddr = ((opt && opt->srr) ?
-                                                 opt->faddr :
-                                                 ireq->rmt_addr),
-                                       .saddr = ireq->loc_addr,
-                                       .tos = RT_CONN_FLAGS(sk) } },
-                           .proto = IPPROTO_TCP,
-                           .uli_u = { .ports =
-                                      { .sport = inet_sk(sk)->sport,
-                                        .dport = ireq->rmt_port } } };
-
-       if (ip_route_output_flow(&rt, &fl, sk, 0)) {
-               IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
-               return NULL;
-       }
-       if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) {
-               ip_rt_put(rt);
-               IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
-               return NULL;
-       }
-       return &rt->u.dst;
-}
-
 /*
  *     Send a SYN-ACK after having received an ACK.
  *     This still operates on a request_sock only, not on a big
@@ -1156,7 +793,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
        struct sk_buff * skb;
 
        /* First, grab a route. */
-       if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
+       if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
                goto out;
 
        skb = tcp_make_synack(sk, dst, req);
@@ -1258,7 +895,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
         * limitations, they conserve resources and peer is
         * evidently real one.
         */
-       if (tcp_synq_is_full(sk) && !isn) {
+       if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
 #ifdef CONFIG_SYN_COOKIES
                if (sysctl_tcp_syncookies) {
                        want_cookie = 1;
@@ -1272,7 +909,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
         * clogging syn queue with openreqs with exponentially increasing
         * timeout.
         */
-       if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
+       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
                goto drop;
 
        req = reqsk_alloc(&tcp_request_sock_ops);
@@ -1328,8 +965,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                 * are made in the function processing timewait state.
                 */
                if (tmp_opt.saw_tstamp &&
-                   sysctl_tcp_tw_recycle &&
-                   (dst = tcp_v4_route_req(sk, req)) != NULL &&
+                   tcp_death_row.sysctl_tw_recycle &&
+                   (dst = inet_csk_route_req(sk, req)) != NULL &&
                    (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
                    peer->v4daddr == saddr) {
                        if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
@@ -1342,7 +979,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                }
                /* Kill the following clause, if you dislike this way. */
                else if (!sysctl_tcp_syncookies &&
-                        (sysctl_max_syn_backlog - tcp_synq_len(sk) <
+                        (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
                          (sysctl_max_syn_backlog >> 2)) &&
                         (!peer || !peer->tcp_ts_stamp) &&
                         (!dst || !dst_metric(dst, RTAX_RTT))) {
@@ -1353,11 +990,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                         * to destinations, already remembered
                         * to the moment of synflood.
                         */
-                       LIMIT_NETDEBUG(printk(KERN_DEBUG "TCP: drop open "
-                                             "request from %u.%u."
-                                             "%u.%u/%u\n",
-                                             NIPQUAD(saddr),
-                                             ntohs(skb->h.th->source)));
+                       LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
+                                      "request from %u.%u.%u.%u/%u\n",
+                                      NIPQUAD(saddr),
+                                      ntohs(skb->h.th->source));
                        dst_release(dst);
                        goto drop_and_free;
                }
@@ -1372,7 +1008,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        if (want_cookie) {
                reqsk_free(req);
        } else {
-               tcp_v4_synq_add(sk, req);
+               inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
        }
        return 0;
 
@@ -1400,7 +1036,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        if (sk_acceptq_is_full(sk))
                goto exit_overflow;
 
-       if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
+       if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
                goto exit;
 
        newsk = tcp_create_openreq_child(sk, req, skb);
@@ -1417,7 +1053,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        newinet->saddr        = ireq->loc_addr;
        newinet->opt          = ireq->opt;
        ireq->opt             = NULL;
-       newinet->mc_index     = tcp_v4_iif(skb);
+       newinet->mc_index     = inet_iif(skb);
        newinet->mc_ttl       = skb->nh.iph->ttl;
        newtp->ext_header_len = 0;
        if (newinet->opt)
@@ -1445,27 +1081,24 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
 {
        struct tcphdr *th = skb->h.th;
        struct iphdr *iph = skb->nh.iph;
-       struct tcp_sock *tp = tcp_sk(sk);
        struct sock *nsk;
        struct request_sock **prev;
        /* Find possible connection requests. */
-       struct request_sock *req = tcp_v4_search_req(tp, &prev, th->source,
-                                                    iph->saddr, iph->daddr);
+       struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
+                                                      iph->saddr, iph->daddr);
        if (req)
                return tcp_check_req(sk, skb, req, prev);
 
-       nsk = __tcp_v4_lookup_established(skb->nh.iph->saddr,
-                                         th->source,
-                                         skb->nh.iph->daddr,
-                                         ntohs(th->dest),
-                                         tcp_v4_iif(skb));
+       nsk = __inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr,
+                                       th->source, skb->nh.iph->daddr,
+                                       ntohs(th->dest), inet_iif(skb));
 
        if (nsk) {
                if (nsk->sk_state != TCP_TIME_WAIT) {
                        bh_lock_sock(nsk);
                        return nsk;
                }
-               tcp_tw_put((struct tcp_tw_bucket *)nsk);
+               inet_twsk_put((struct inet_timewait_sock *)nsk);
                return NULL;
        }
 
@@ -1484,7 +1117,7 @@ static int tcp_v4_checksum_init(struct sk_buff *skb)
                                  skb->nh.iph->daddr, skb->csum))
                        return 0;
 
-               LIMIT_NETDEBUG(printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
+               LIMIT_NETDEBUG(KERN_DEBUG "hw tcp v4 csum failed\n");
                skb->ip_summed = CHECKSUM_NONE;
        }
        if (skb->len <= 76) {
@@ -1600,9 +1233,9 @@ int tcp_v4_rcv(struct sk_buff *skb)
        TCP_SKB_CB(skb)->flags   = skb->nh.iph->tos;
        TCP_SKB_CB(skb)->sacked  = 0;
 
-       sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source,
-                            skb->nh.iph->daddr, ntohs(th->dest),
-                            tcp_v4_iif(skb));
+       sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source,
+                          skb->nh.iph->daddr, ntohs(th->dest),
+                          inet_iif(skb));
 
        if (!sk)
                goto no_tcp_socket;
@@ -1654,24 +1287,26 @@ discard_and_relse:
 
 do_time_wait:
        if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
-               tcp_tw_put((struct tcp_tw_bucket *) sk);
+               inet_twsk_put((struct inet_timewait_sock *) sk);
                goto discard_it;
        }
 
        if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
                TCP_INC_STATS_BH(TCP_MIB_INERRS);
-               tcp_tw_put((struct tcp_tw_bucket *) sk);
+               inet_twsk_put((struct inet_timewait_sock *) sk);
                goto discard_it;
        }
-       switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
-                                          skb, th, skb->len)) {
+       switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
+                                          skb, th)) {
        case TCP_TW_SYN: {
-               struct sock *sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr,
-                                                         ntohs(th->dest),
-                                                         tcp_v4_iif(skb));
+               struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo,
+                                                       skb->nh.iph->daddr,
+                                                       ntohs(th->dest),
+                                                       inet_iif(skb));
                if (sk2) {
-                       tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
-                       tcp_tw_put((struct tcp_tw_bucket *)sk);
+                       inet_twsk_deschedule((struct inet_timewait_sock *)sk,
+                                            &tcp_death_row);
+                       inet_twsk_put((struct inet_timewait_sock *)sk);
                        sk = sk2;
                        goto process;
                }
@@ -1735,18 +1370,18 @@ int tcp_v4_remember_stamp(struct sock *sk)
        return 0;
 }
 
-int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
+int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
 {
-       struct inet_peer *peer = NULL;
-
-       peer = inet_getpeer(tw->tw_daddr, 1);
+       struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
 
        if (peer) {
-               if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 ||
+               const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
+
+               if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
                    (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
-                    peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) {
-                       peer->tcp_ts_stamp = tw->tw_ts_recent_stamp;
-                       peer->tcp_ts tw->tw_ts_recent;
+                    peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
+                       peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
+                       peer->tcp_ts       = tcptw->tw_ts_recent;
                }
                inet_putpeer(peer);
                return 1;
@@ -1774,13 +1409,14 @@ struct tcp_func ipv4_specific = {
  */
 static int tcp_v4_init_sock(struct sock *sk)
 {
+       struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
 
        skb_queue_head_init(&tp->out_of_order_queue);
        tcp_init_xmit_timers(sk);
        tcp_prequeue_init(tp);
 
-       tp->rto  = TCP_TIMEOUT_INIT;
+       icsk->icsk_rto = TCP_TIMEOUT_INIT;
        tp->mdev = TCP_TIMEOUT_INIT;
 
        /* So many TCP implementations out there (incorrectly) count the
@@ -1798,7 +1434,7 @@ static int tcp_v4_init_sock(struct sock *sk)
        tp->mss_cache = 536;
 
        tp->reordering = sysctl_tcp_reordering;
-       tp->ca_ops = &tcp_init_congestion_ops;
+       icsk->icsk_ca_ops = &tcp_init_congestion_ops;
 
        sk->sk_state = TCP_CLOSE;
 
@@ -1821,7 +1457,7 @@ int tcp_v4_destroy_sock(struct sock *sk)
 
        tcp_clear_xmit_timers(sk);
 
-       tcp_cleanup_congestion_control(tp);
+       tcp_cleanup_congestion_control(sk);
 
        /* Cleanup up the write buffer. */
        sk_stream_writequeue_purge(sk);
@@ -1833,7 +1469,7 @@ int tcp_v4_destroy_sock(struct sock *sk)
        __skb_queue_purge(&tp->ucopy.prequeue);
 
        /* Clean up a referenced TCP bind bucket. */
-       if (inet_sk(sk)->bind_hash)
+       if (inet_csk(sk)->icsk_bind_hash)
                inet_put_port(&tcp_hashinfo, sk);
 
        /*
@@ -1854,13 +1490,13 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock);
 #ifdef CONFIG_PROC_FS
 /* Proc filesystem TCP sock list dumping. */
 
-static inline struct tcp_tw_bucket *tw_head(struct hlist_head *head)
+static inline struct inet_timewait_sock *tw_head(struct hlist_head *head)
 {
        return hlist_empty(head) ? NULL :
-               list_entry(head->first, struct tcp_tw_bucket, tw_node);
+               list_entry(head->first, struct inet_timewait_sock, tw_node);
 }
 
-static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw)
+static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
 {
        return tw->tw_node.next ?
                hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
@@ -1868,7 +1504,7 @@ static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw)
 
 static void *listening_get_next(struct seq_file *seq, void *cur)
 {
-       struct tcp_sock *tp;
+       struct inet_connection_sock *icsk;
        struct hlist_node *node;
        struct sock *sk = cur;
        struct tcp_iter_state* st = seq->private;
@@ -1884,7 +1520,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
        if (st->state == TCP_SEQ_STATE_OPENREQ) {
                struct request_sock *req = cur;
 
-               tp = tcp_sk(st->syn_wait_sk);
+               icsk = inet_csk(st->syn_wait_sk);
                req = req->dl_next;
                while (1) {
                        while (req) {
@@ -1897,17 +1533,17 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
                        if (++st->sbucket >= TCP_SYNQ_HSIZE)
                                break;
 get_req:
-                       req = tp->accept_queue.listen_opt->syn_table[st->sbucket];
+                       req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
                }
                sk        = sk_next(st->syn_wait_sk);
                st->state = TCP_SEQ_STATE_LISTENING;
-               read_unlock_bh(&tp->accept_queue.syn_wait_lock);
+               read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
        } else {
-               tp = tcp_sk(sk);
-               read_lock_bh(&tp->accept_queue.syn_wait_lock);
-               if (reqsk_queue_len(&tp->accept_queue))
+               icsk = inet_csk(sk);
+               read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+               if (reqsk_queue_len(&icsk->icsk_accept_queue))
                        goto start_req;
-               read_unlock_bh(&tp->accept_queue.syn_wait_lock);
+               read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
                sk = sk_next(sk);
        }
 get_sk:
@@ -1916,9 +1552,9 @@ get_sk:
                        cur = sk;
                        goto out;
                }
-               tp = tcp_sk(sk);
-               read_lock_bh(&tp->accept_queue.syn_wait_lock);
-               if (reqsk_queue_len(&tp->accept_queue)) {
+               icsk = inet_csk(sk);
+               read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+               if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
 start_req:
                        st->uid         = sock_i_uid(sk);
                        st->syn_wait_sk = sk;
@@ -1926,7 +1562,7 @@ start_req:
                        st->sbucket     = 0;
                        goto get_req;
                }
-               read_unlock_bh(&tp->accept_queue.syn_wait_lock);
+               read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
        }
        if (++st->bucket < INET_LHTABLE_SIZE) {
                sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
@@ -1956,7 +1592,7 @@ static void *established_get_first(struct seq_file *seq)
        for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
                struct sock *sk;
                struct hlist_node *node;
-               struct tcp_tw_bucket *tw;
+               struct inet_timewait_sock *tw;
 
                /* We can reschedule _before_ having picked the target: */
                cond_resched_softirq();
@@ -1970,8 +1606,8 @@ static void *established_get_first(struct seq_file *seq)
                        goto out;
                }
                st->state = TCP_SEQ_STATE_TIME_WAIT;
-               tw_for_each(tw, node,
-                           &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) {
+               inet_twsk_for_each(tw, node,
+                                  &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) {
                        if (tw->tw_family != st->family) {
                                continue;
                        }
@@ -1988,7 +1624,7 @@ out:
 static void *established_get_next(struct seq_file *seq, void *cur)
 {
        struct sock *sk = cur;
-       struct tcp_tw_bucket *tw;
+       struct inet_timewait_sock *tw;
        struct hlist_node *node;
        struct tcp_iter_state* st = seq->private;
 
@@ -2112,8 +1748,8 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
        switch (st->state) {
        case TCP_SEQ_STATE_OPENREQ:
                if (v) {
-                       struct tcp_sock *tp = tcp_sk(st->syn_wait_sk);
-                       read_unlock_bh(&tp->accept_queue.syn_wait_lock);
+                       struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
+                       read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
                }
        case TCP_SEQ_STATE_LISTENING:
                if (v != SEQ_START_TOKEN)
@@ -2219,18 +1855,19 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
        int timer_active;
        unsigned long timer_expires;
        struct tcp_sock *tp = tcp_sk(sp);
+       const struct inet_connection_sock *icsk = inet_csk(sp);
        struct inet_sock *inet = inet_sk(sp);
        unsigned int dest = inet->daddr;
        unsigned int src = inet->rcv_saddr;
        __u16 destp = ntohs(inet->dport);
        __u16 srcp = ntohs(inet->sport);
 
-       if (tp->pending == TCP_TIME_RETRANS) {
+       if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
                timer_active    = 1;
-               timer_expires   = tp->timeout;
-       } else if (tp->pending == TCP_TIME_PROBE0) {
+               timer_expires   = icsk->icsk_timeout;
+       } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
                timer_active    = 4;
-               timer_expires   = tp->timeout;
+               timer_expires   = icsk->icsk_timeout;
        } else if (timer_pending(&sp->sk_timer)) {
                timer_active    = 2;
                timer_expires   = sp->sk_timer.expires;
@@ -2245,17 +1882,19 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
                tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
                timer_active,
                jiffies_to_clock_t(timer_expires - jiffies),
-               tp->retransmits,
+               icsk->icsk_retransmits,
                sock_i_uid(sp),
-               tp->probes_out,
+               icsk->icsk_probes_out,
                sock_i_ino(sp),
                atomic_read(&sp->sk_refcnt), sp,
-               tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong,
+               icsk->icsk_rto,
+               icsk->icsk_ack.ato,
+               (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
                tp->snd_cwnd,
                tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
 }
 
-static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i)
+static void get_timewait4_sock(struct inet_timewait_sock *tw, char *tmpbuf, int i)
 {
        unsigned int dest, src;
        __u16 destp, srcp;
@@ -2335,7 +1974,7 @@ struct proto tcp_prot = {
        .close                  = tcp_close,
        .connect                = tcp_v4_connect,
        .disconnect             = tcp_disconnect,
-       .accept                 = tcp_accept,
+       .accept                 = inet_csk_accept,
        .ioctl                  = tcp_ioctl,
        .init                   = tcp_v4_init_sock,
        .destroy                = tcp_v4_destroy_sock,
@@ -2350,6 +1989,7 @@ struct proto tcp_prot = {
        .get_port               = tcp_v4_get_port,
        .enter_memory_pressure  = tcp_enter_memory_pressure,
        .sockets_allocated      = &tcp_sockets_allocated,
+       .orphan_count           = &tcp_orphan_count,
        .memory_allocated       = &tcp_memory_allocated,
        .memory_pressure        = &tcp_memory_pressure,
        .sysctl_mem             = sysctl_tcp_mem,
@@ -2357,6 +1997,7 @@ struct proto tcp_prot = {
        .sysctl_rmem            = sysctl_tcp_rmem,
        .max_header             = MAX_TCP_HEADER,
        .obj_size               = sizeof(struct tcp_sock),
+       .twsk_obj_size          = sizeof(struct tcp_timewait_sock),
        .rsk_prot               = &tcp_request_sock_ops,
 };