]> err.no Git - linux-2.6/blobdiff - net/ipv4/tcp_input.c
Merge branch 'for-2.6.26' of master.kernel.org:/pub/scm/linux/kernel/git/jwboyer...
[linux-2.6] / net / ipv4 / tcp_input.c
index 1bdb1bd22134ae0f20ce09f297791d4468b1da80..eda4f4a233f36e940fe81cf83188dcbc2817e0fa 100644 (file)
@@ -1172,8 +1172,8 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
                           struct tcp_sack_block_wire *sp, int num_sacks,
                           u32 prior_snd_una)
 {
-       u32 start_seq_0 = ntohl(get_unaligned(&sp[0].start_seq));
-       u32 end_seq_0 = ntohl(get_unaligned(&sp[0].end_seq));
+       u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
+       u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
        int dup_sack = 0;
 
        if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
@@ -1181,8 +1181,8 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
                tcp_dsack_seen(tp);
                NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
        } else if (num_sacks > 1) {
-               u32 end_seq_1 = ntohl(get_unaligned(&sp[1].end_seq));
-               u32 start_seq_1 = ntohl(get_unaligned(&sp[1].start_seq));
+               u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
+               u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
 
                if (!after(end_seq_0, end_seq_1) &&
                    !before(start_seq_0, start_seq_1)) {
@@ -1453,8 +1453,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
        for (i = 0; i < num_sacks; i++) {
                int dup_sack = !i && found_dup_sack;
 
-               sp[used_sacks].start_seq = ntohl(get_unaligned(&sp_wire[i].start_seq));
-               sp[used_sacks].end_seq = ntohl(get_unaligned(&sp_wire[i].end_seq));
+               sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq);
+               sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq);
 
                if (!tcp_is_sackblock_valid(tp, dup_sack,
                                            sp[used_sacks].start_seq,
@@ -2298,7 +2298,7 @@ static inline int tcp_packet_delayed(struct tcp_sock *tp)
 {
        return !tp->retrans_stamp ||
                (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
-                (__s32)(tp->rx_opt.rcv_tsecr - tp->retrans_stamp) < 0);
+                before(tp->rx_opt.rcv_tsecr, tp->retrans_stamp));
 }
 
 /* Undo procedures. */
@@ -3340,7 +3340,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                        switch (opcode) {
                        case TCPOPT_MSS:
                                if (opsize == TCPOLEN_MSS && th->syn && !estab) {
-                                       u16 in_mss = ntohs(get_unaligned((__be16 *)ptr));
+                                       u16 in_mss = get_unaligned_be16(ptr);
                                        if (in_mss) {
                                                if (opt_rx->user_mss &&
                                                    opt_rx->user_mss < in_mss)
@@ -3369,8 +3369,8 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                    ((estab && opt_rx->tstamp_ok) ||
                                     (!estab && sysctl_tcp_timestamps))) {
                                        opt_rx->saw_tstamp = 1;
-                                       opt_rx->rcv_tsval = ntohl(get_unaligned((__be32 *)ptr));
-                                       opt_rx->rcv_tsecr = ntohl(get_unaligned((__be32 *)(ptr+4)));
+                                       opt_rx->rcv_tsval = get_unaligned_be32(ptr);
+                                       opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
                                }
                                break;
                        case TCPOPT_SACK_PERM:
@@ -3854,8 +3854,28 @@ static void tcp_ofo_queue(struct sock *sk)
        }
 }
 
+static int tcp_prune_ofo_queue(struct sock *sk);
 static int tcp_prune_queue(struct sock *sk);
 
+static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
+{
+       if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
+           !sk_rmem_schedule(sk, size)) {
+
+               if (tcp_prune_queue(sk) < 0)
+                       return -1;
+
+               if (!sk_rmem_schedule(sk, size)) {
+                       if (!tcp_prune_ofo_queue(sk))
+                               return -1;
+
+                       if (!sk_rmem_schedule(sk, size))
+                               return -1;
+               }
+       }
+       return 0;
+}
+
 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 {
        struct tcphdr *th = tcp_hdr(skb);
@@ -3905,12 +3925,9 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
                if (eaten <= 0) {
 queue_and_out:
                        if (eaten < 0 &&
-                           (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
-                            !sk_rmem_schedule(sk, skb->truesize))) {
-                               if (tcp_prune_queue(sk) < 0 ||
-                                   !sk_rmem_schedule(sk, skb->truesize))
-                                       goto drop;
-                       }
+                           tcp_try_rmem_schedule(sk, skb->truesize))
+                               goto drop;
+
                        skb_set_owner_r(skb, sk);
                        __skb_queue_tail(&sk->sk_receive_queue, skb);
                }
@@ -3979,12 +3996,8 @@ drop:
 
        TCP_ECN_check_ce(tp, skb);
 
-       if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
-           !sk_rmem_schedule(sk, skb->truesize)) {
-               if (tcp_prune_queue(sk) < 0 ||
-                   !sk_rmem_schedule(sk, skb->truesize))
-                       goto drop;
-       }
+       if (tcp_try_rmem_schedule(sk, skb->truesize))
+               goto drop;
 
        /* Disable header prediction. */
        tp->pred_flags = 0;
@@ -4211,6 +4224,32 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
        }
 }
 
+/*
+ * Purge the out-of-order queue.
+ * Return true if queue was pruned.
+ */
+static int tcp_prune_ofo_queue(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       int res = 0;
+
+       if (!skb_queue_empty(&tp->out_of_order_queue)) {
+               NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
+               __skb_queue_purge(&tp->out_of_order_queue);
+
+               /* Reset SACK state.  A conforming SACK implementation will
+                * do the same at a timeout based retransmit.  When a connection
+                * is in a sad state like this, we care only about integrity
+                * of the connection not performance.
+                */
+               if (tp->rx_opt.sack_ok)
+                       tcp_sack_reset(&tp->rx_opt);
+               sk_mem_reclaim(sk);
+               res = 1;
+       }
+       return res;
+}
+
 /* Reduce allocated memory if we can, trying to get
  * the socket within its memory limits again.
  *
@@ -4244,20 +4283,7 @@ static int tcp_prune_queue(struct sock *sk)
        /* Collapsing did not help, destructive actions follow.
         * This must not ever occur. */
 
-       /* First, purge the out_of_order queue. */
-       if (!skb_queue_empty(&tp->out_of_order_queue)) {
-               NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
-               __skb_queue_purge(&tp->out_of_order_queue);
-
-               /* Reset SACK state.  A conforming SACK implementation will
-                * do the same at a timeout based retransmit.  When a connection
-                * is in a sad state like this, we care only about integrity
-                * of the connection not performance.
-                */
-               if (tcp_is_sack(tp))
-                       tcp_sack_reset(&tp->rx_opt);
-               sk_mem_reclaim(sk);
-       }
+       tcp_prune_ofo_queue(sk);
 
        if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
                return 0;
@@ -4899,8 +4925,7 @@ step5:
        tcp_data_snd_check(sk);
        tcp_ack_snd_check(sk);
 
-       if (tcp_defer_accept_check(sk))
-               return -1;
+       tcp_defer_accept_check(sk);
        return 0;
 
 csum_error: