]> err.no Git - linux-2.6/blobdiff - net/ipv4/tcp_ipv4.c
Merge branch 'drm-patches' of ssh://master.kernel.org/pub/scm/linux/kernel/git/airlie...
[linux-2.6] / net / ipv4 / tcp_ipv4.c
index 617a5e4ca010d1b5f9b883371d9b893e141bec31..3f5f7423b95ca818463938dbaad1c8bed8903bb3 100644 (file)
@@ -88,7 +88,7 @@ int sysctl_tcp_low_latency __read_mostly;
 #define ICMP_MIN_LENGTH 8
 
 /* Socket used for sending RSTs */
-static struct socket *tcp_socket;
+static struct socket *tcp_socket __read_mostly;
 
 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
 
@@ -192,8 +192,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                               RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
                               IPPROTO_TCP,
                               inet->sport, usin->sin_port, sk, 1);
-       if (tmp < 0)
+       if (tmp < 0) {
+               if (tmp == -ENETUNREACH)
+                       IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
                return tmp;
+       }
 
        if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
                ip_rt_put(rt);
@@ -504,6 +507,7 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                th->check = ~tcp_v4_check(len, inet->saddr,
                                          inet->daddr, 0);
+               skb->csum_start = skb_transport_header(skb) - skb->head;
                skb->csum_offset = offsetof(struct tcphdr, check);
        } else {
                th->check = tcp_v4_check(len, inet->saddr, inet->daddr,
@@ -526,6 +530,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
 
        th->check = 0;
        th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
+       skb->csum_start = skb_transport_header(skb) - skb->head;
        skb->csum_offset = offsetof(struct tcphdr, check);
        skb->ip_summed = CHECKSUM_PARTIAL;
        return 0;
@@ -700,6 +705,8 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
                                      ip_hdr(skb)->saddr, /* XXX */
                                      arg.iov[0].iov_len, IPPROTO_TCP, 0);
        arg.csumoffset = offsetof(struct tcphdr, check) / 2;
+       if (twsk)
+               arg.bound_dev_if = twsk->tw_sk.tw_bound_dev_if;
 
        ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
 
@@ -871,6 +878,7 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
                                kfree(newkey);
                                return -ENOMEM;
                        }
+                       sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
                }
                if (tcp_alloc_md5sig_pool() == NULL) {
                        kfree(newkey);
@@ -1000,7 +1008,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
                        return -EINVAL;
 
                tp->md5sig_info = p;
-
+               sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
        }
 
        newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
@@ -1636,8 +1644,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
         * Packet length and doff are validated by header prediction,
         * provided case of th->doff==0 is eliminated.
         * So, we defer the checks. */
-       if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
-            tcp_v4_checksum_init(skb)))
+       if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
                goto bad_packet;
 
        th = tcp_hdr(skb);
@@ -2038,10 +2045,7 @@ static void *established_get_first(struct seq_file *seq)
                struct hlist_node *node;
                struct inet_timewait_sock *tw;
 
-               /* We can reschedule _before_ having picked the target: */
-               cond_resched_softirq();
-
-               read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
+               read_lock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
                sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
                        if (sk->sk_family != st->family) {
                                continue;
@@ -2058,7 +2062,7 @@ static void *established_get_first(struct seq_file *seq)
                        rc = tw;
                        goto out;
                }
-               read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
+               read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
                st->state = TCP_SEQ_STATE_ESTABLISHED;
        }
 out:
@@ -2085,14 +2089,11 @@ get_tw:
                        cur = tw;
                        goto out;
                }
-               read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
+               read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
                st->state = TCP_SEQ_STATE_ESTABLISHED;
 
-               /* We can reschedule between buckets: */
-               cond_resched_softirq();
-
                if (++st->bucket < tcp_hashinfo.ehash_size) {
-                       read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
+                       read_lock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
                        sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
                } else {
                        cur = NULL;
@@ -2137,7 +2138,6 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
 
        if (!rc) {
                inet_listen_unlock(&tcp_hashinfo);
-               local_bh_disable();
                st->state = TCP_SEQ_STATE_ESTABLISHED;
                rc        = established_get_idx(seq, pos);
        }
@@ -2170,7 +2170,6 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                rc = listening_get_next(seq, v);
                if (!rc) {
                        inet_listen_unlock(&tcp_hashinfo);
-                       local_bh_disable();
                        st->state = TCP_SEQ_STATE_ESTABLISHED;
                        rc        = established_get_first(seq);
                }
@@ -2202,8 +2201,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
        case TCP_SEQ_STATE_TIME_WAIT:
        case TCP_SEQ_STATE_ESTABLISHED:
                if (v)
-                       read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
-               local_bh_enable();
+                       read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
                break;
        }
 }