]> err.no Git - linux-2.6/blobdiff - net/ipv4/tcp_input.c
[TCP]: Advance fast path pointer for first block only
[linux-2.6] / net / ipv4 / tcp_input.c
index 9304034c0c471cf376fb13e5fe41754ea707c2be..7670ef968dceda068706c019611aba2f15b24601 100644 (file)
@@ -936,13 +936,16 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
        struct tcp_sock *tp = tcp_sk(sk);
        unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked;
        struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2);
+       struct sk_buff *cached_skb;
        int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
        int reord = tp->packets_out;
        int prior_fackets;
        u32 lost_retrans = 0;
        int flag = 0;
        int dup_sack = 0;
+       int cached_fack_count;
        int i;
+       int first_sack_index;
 
        if (!tp->sacked_out)
                tp->fackets_out = 0;
@@ -1000,6 +1003,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                }
        }
 
+       first_sack_index = 0;
        if (flag)
                num_sacks = 1;
        else {
@@ -1011,10 +1015,15 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                        for (j = 0; j < i; j++){
                                if (after(ntohl(sp[j].start_seq),
                                          ntohl(sp[j+1].start_seq))){
-                                       sp[j].start_seq = htonl(tp->recv_sack_cache[j+1].start_seq);
-                                       sp[j].end_seq = htonl(tp->recv_sack_cache[j+1].end_seq);
-                                       sp[j+1].start_seq = htonl(tp->recv_sack_cache[j].start_seq);
-                                       sp[j+1].end_seq = htonl(tp->recv_sack_cache[j].end_seq);
+                                       struct tcp_sack_block_wire tmp;
+
+                                       tmp = sp[j];
+                                       sp[j] = sp[j+1];
+                                       sp[j+1] = tmp;
+
+                                       /* Track where the first SACK block goes to */
+                                       if (j == first_sack_index)
+                                               first_sack_index = j+1;
                                }
 
                        }
@@ -1024,20 +1033,22 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
        /* clear flag as used for different purpose in following code */
        flag = 0;
 
+       /* Use SACK fastpath hint if valid */
+       cached_skb = tp->fastpath_skb_hint;
+       cached_fack_count = tp->fastpath_cnt_hint;
+       if (!cached_skb) {
+               cached_skb = sk->sk_write_queue.next;
+               cached_fack_count = 0;
+       }
+
        for (i=0; i<num_sacks; i++, sp++) {
                struct sk_buff *skb;
                __u32 start_seq = ntohl(sp->start_seq);
                __u32 end_seq = ntohl(sp->end_seq);
                int fack_count;
 
-               /* Use SACK fastpath hint if valid */
-               if (tp->fastpath_skb_hint) {
-                       skb = tp->fastpath_skb_hint;
-                       fack_count = tp->fastpath_cnt_hint;
-               } else {
-                       skb = sk->sk_write_queue.next;
-                       fack_count = 0;
-               }
+               skb = cached_skb;
+               fack_count = cached_fack_count;
 
                /* Event "B" in the comment above. */
                if (after(end_seq, tp->high_seq))
@@ -1047,8 +1058,12 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                        int in_sack, pcount;
                        u8 sacked;
 
-                       tp->fastpath_skb_hint = skb;
-                       tp->fastpath_cnt_hint = fack_count;
+                       cached_skb = skb;
+                       cached_fack_count = fack_count;
+                       if (i == first_sack_index) {
+                               tp->fastpath_skb_hint = skb;
+                               tp->fastpath_cnt_hint = fack_count;
+                       }
 
                        /* The retransmission queue is always in order, so
                         * we can short-circuit the walk early.
@@ -4235,7 +4250,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                 * Change state from SYN-SENT only after copied_seq
                 * is initialized. */
                tp->copied_seq = tp->rcv_nxt;
-               mb();
+               smp_mb();
                tcp_set_state(sk, TCP_ESTABLISHED);
 
                security_inet_conn_established(sk, skb);
@@ -4420,9 +4435,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                         * But, this leaves one open to an easy denial of
                         * service attack, and SYN cookies can't defend
                         * against this problem. So, we drop the data
-                        * in the interest of security over speed.
+                        * in the interest of security over speed unless
+                        * it's still in use.
                         */
-                       goto discard;
+                       kfree_skb(skb);
+                       return 0;
                }
                goto discard;
 
@@ -4483,7 +4500,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                case TCP_SYN_RECV:
                        if (acceptable) {
                                tp->copied_seq = tp->rcv_nxt;
-                               mb();
+                               smp_mb();
                                tcp_set_state(sk, TCP_ESTABLISHED);
                                sk->sk_state_change(sk);