]> err.no Git - linux-2.6/blobdiff - net/ipv4/tcp_ipv4.c
Merge branch 'drm-patches' of ssh://master.kernel.org/pub/scm/linux/kernel/git/airlie...
[linux-2.6] / net / ipv4 / tcp_ipv4.c
index c146a02f84951aadaf88d933577e8169ee0befd9..3f5f7423b95ca818463938dbaad1c8bed8903bb3 100644 (file)
@@ -88,7 +88,7 @@ int sysctl_tcp_low_latency __read_mostly;
 #define ICMP_MIN_LENGTH 8
 
 /* Socket used for sending RSTs */
-static struct socket *tcp_socket;
+static struct socket *tcp_socket __read_mostly;
 
 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
 
@@ -127,8 +127,8 @@ static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
 {
        return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
                                          ip_hdr(skb)->saddr,
-                                         skb->h.th->dest,
-                                         skb->h.th->source);
+                                         tcp_hdr(skb)->dest,
+                                         tcp_hdr(skb)->source);
 }
 
 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
@@ -192,8 +192,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                               RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
                               IPPROTO_TCP,
                               inet->sport, usin->sin_port, sk, 1);
-       if (tmp < 0)
+       if (tmp < 0) {
+               if (tmp == -ENETUNREACH)
+                       IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
                return tmp;
+       }
 
        if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
                ip_rt_put(rt);
@@ -499,11 +502,12 @@ out:
 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
 {
        struct inet_sock *inet = inet_sk(sk);
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                th->check = ~tcp_v4_check(len, inet->saddr,
                                          inet->daddr, 0);
+               skb->csum_start = skb_transport_header(skb) - skb->head;
                skb->csum_offset = offsetof(struct tcphdr, check);
        } else {
                th->check = tcp_v4_check(len, inet->saddr, inet->daddr,
@@ -522,10 +526,11 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
                return -EINVAL;
 
        iph = ip_hdr(skb);
-       th = skb->h.th;
+       th = tcp_hdr(skb);
 
        th->check = 0;
        th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
+       skb->csum_start = skb_transport_header(skb) - skb->head;
        skb->csum_offset = offsetof(struct tcphdr, check);
        skb->ip_summed = CHECKSUM_PARTIAL;
        return 0;
@@ -546,7 +551,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
 
 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
 {
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        struct {
                struct tcphdr th;
 #ifdef CONFIG_TCP_MD5SIG
@@ -622,7 +627,7 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
                            struct sk_buff *skb, u32 seq, u32 ack,
                            u32 win, u32 ts)
 {
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        struct {
                struct tcphdr th;
                __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
@@ -700,6 +705,8 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
                                      ip_hdr(skb)->saddr, /* XXX */
                                      arg.iov[0].iov_len, IPPROTO_TCP, 0);
        arg.csumoffset = offsetof(struct tcphdr, check) / 2;
+       if (twsk)
+               arg.bound_dev_if = twsk->tw_sk.tw_bound_dev_if;
 
        ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
 
@@ -745,7 +752,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
        skb = tcp_make_synack(sk, dst, req);
 
        if (skb) {
-               struct tcphdr *th = skb->h.th;
+               struct tcphdr *th = tcp_hdr(skb);
 
                th->check = tcp_v4_check(skb->len,
                                         ireq->loc_addr,
@@ -781,7 +788,7 @@ static void syn_flood_warning(struct sk_buff *skb)
                warntime = jiffies;
                printk(KERN_INFO
                       "possible SYN flooding on port %d. Sending cookies.\n",
-                      ntohs(skb->h.th->dest));
+                      ntohs(tcp_hdr(skb)->dest));
        }
 }
 #endif
@@ -871,6 +878,7 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
                                kfree(newkey);
                                return -ENOMEM;
                        }
+                       sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
                }
                if (tcp_alloc_md5sig_pool() == NULL) {
                        kfree(newkey);
@@ -1000,7 +1008,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
                        return -EINVAL;
 
                tp->md5sig_info = p;
-
+               sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
        }
 
        newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
@@ -1134,7 +1142,7 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
        __u8 *hash_location = NULL;
        struct tcp_md5sig_key *hash_expected;
        const struct iphdr *iph = ip_hdr(skb);
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        int length = (th->doff << 2) - sizeof(struct tcphdr);
        int genhash;
        unsigned char *ptr;
@@ -1327,7 +1335,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        ireq->rmt_addr = saddr;
        ireq->opt = tcp_v4_save_options(sk, skb);
        if (!want_cookie)
-               TCP_ECN_create_request(req, skb->h.th);
+               TCP_ECN_create_request(req, tcp_hdr(skb));
 
        if (want_cookie) {
 #ifdef CONFIG_SYN_COOKIES
@@ -1375,7 +1383,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                        LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
                                       "request from %u.%u.%u.%u/%u\n",
                                       NIPQUAD(saddr),
-                                      ntohs(skb->h.th->source));
+                                      ntohs(tcp_hdr(skb)->source));
                        dst_release(dst);
                        goto drop_and_free;
                }
@@ -1481,7 +1489,7 @@ exit:
 
 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
 {
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        const struct iphdr *iph = ip_hdr(skb);
        struct sock *nsk;
        struct request_sock **prev;
@@ -1556,7 +1564,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
 
        if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
                TCP_CHECK_TIMER(sk);
-               if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) {
+               if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
                        rsk = sk;
                        goto reset;
                }
@@ -1582,7 +1590,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
        }
 
        TCP_CHECK_TIMER(sk);
-       if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) {
+       if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
                rsk = sk;
                goto reset;
        }
@@ -1625,7 +1633,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
        if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
                goto discard_it;
 
-       th = skb->h.th;
+       th = tcp_hdr(skb);
 
        if (th->doff < sizeof(struct tcphdr) / 4)
                goto bad_packet;
@@ -1636,11 +1644,10 @@ int tcp_v4_rcv(struct sk_buff *skb)
         * Packet length and doff are validated by header prediction,
         * provided case of th->doff==0 is eliminated.
         * So, we defer the checks. */
-       if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
-            tcp_v4_checksum_init(skb)))
+       if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
                goto bad_packet;
 
-       th = skb->h.th;
+       th = tcp_hdr(skb);
        iph = ip_hdr(skb);
        TCP_SKB_CB(skb)->seq = ntohl(th->seq);
        TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
@@ -2038,10 +2045,7 @@ static void *established_get_first(struct seq_file *seq)
                struct hlist_node *node;
                struct inet_timewait_sock *tw;
 
-               /* We can reschedule _before_ having picked the target: */
-               cond_resched_softirq();
-
-               read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
+               read_lock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
                sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
                        if (sk->sk_family != st->family) {
                                continue;
@@ -2058,7 +2062,7 @@ static void *established_get_first(struct seq_file *seq)
                        rc = tw;
                        goto out;
                }
-               read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
+               read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
                st->state = TCP_SEQ_STATE_ESTABLISHED;
        }
 out:
@@ -2085,14 +2089,11 @@ get_tw:
                        cur = tw;
                        goto out;
                }
-               read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
+               read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
                st->state = TCP_SEQ_STATE_ESTABLISHED;
 
-               /* We can reschedule between buckets: */
-               cond_resched_softirq();
-
                if (++st->bucket < tcp_hashinfo.ehash_size) {
-                       read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
+                       read_lock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
                        sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
                } else {
                        cur = NULL;
@@ -2137,7 +2138,6 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
 
        if (!rc) {
                inet_listen_unlock(&tcp_hashinfo);
-               local_bh_disable();
                st->state = TCP_SEQ_STATE_ESTABLISHED;
                rc        = established_get_idx(seq, pos);
        }
@@ -2170,7 +2170,6 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                rc = listening_get_next(seq, v);
                if (!rc) {
                        inet_listen_unlock(&tcp_hashinfo);
-                       local_bh_disable();
                        st->state = TCP_SEQ_STATE_ESTABLISHED;
                        rc        = established_get_first(seq);
                }
@@ -2202,8 +2201,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
        case TCP_SEQ_STATE_TIME_WAIT:
        case TCP_SEQ_STATE_ESTABLISHED:
                if (v)
-                       read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
-               local_bh_enable();
+                       read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
                break;
        }
 }