]> err.no Git - linux-2.6/blobdiff - net/ipv4/tcp_output.c
[NETFILTER]: CLUSTERIP: fix memcpy() length typo
[linux-2.6] / net / ipv4 / tcp_output.c
index 6f0a7e30ceac45bc66ee479bf5ea7bcc80527ae9..75b68116682ae2912ca8e0dd4faf84eb4993e6bd 100644 (file)
@@ -112,9 +112,9 @@ static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
        u32 restart_cwnd = tcp_init_cwnd(tp, dst);
        u32 cwnd = tp->snd_cwnd;
 
-       tcp_ca_event(tp, CA_EVENT_CWND_RESTART);
+       tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
 
-       tp->snd_ssthresh = tcp_current_ssthresh(tp);
+       tp->snd_ssthresh = tcp_current_ssthresh(sk);
        restart_cwnd = min(restart_cwnd, cwnd);
 
        while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
@@ -265,6 +265,7 @@ static __inline__ u16 tcp_select_window(struct sock *sk)
 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
 {
        if (skb != NULL) {
+               const struct inet_connection_sock *icsk = inet_csk(sk);
                struct inet_sock *inet = inet_sk(sk);
                struct tcp_sock *tp = tcp_sk(sk);
                struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
@@ -280,8 +281,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
 #define SYSCTL_FLAG_SACK       0x4
 
                /* If congestion control is doing timestamping */
-               if (tp->ca_ops->rtt_sample)
-                       do_gettimeofday(&skb->stamp);
+               if (icsk->icsk_ca_ops->rtt_sample)
+                       __net_timestamp(skb);
 
                sysctl_flags = 0;
                if (tcb->flags & TCPCB_FLAG_SYN) {
@@ -308,7 +309,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
                }
                
                if (tcp_packets_in_flight(tp) == 0)
-                       tcp_ca_event(tp, CA_EVENT_TX_START);
+                       tcp_ca_event(sk, CA_EVENT_TX_START);
 
                th = (struct tcphdr *) skb_push(skb, tcp_header_size);
                skb->h.th = th;
@@ -366,7 +367,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
                if (err <= 0)
                        return err;
 
-               tcp_enter_cwr(tp);
+               tcp_enter_cwr(sk);
 
                /* NET_XMIT_CN is special. It does not guarantee,
                 * that this packet is lost. It tells that device
@@ -482,7 +483,7 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned
         * skbs, which it never sent before. --ANK
         */
        TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
-       buff->stamp = skb->stamp;
+       buff->tstamp = skb->tstamp;
 
        if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
                tp->lost_out -= tcp_skb_pcount(skb);
@@ -905,12 +906,13 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
  */
 static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
 {
+       const struct inet_connection_sock *icsk = inet_csk(sk);
        u32 send_win, cong_win, limit, in_flight;
 
        if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
                return 0;
 
-       if (tp->ca_state != TCP_CA_Open)
+       if (icsk->icsk_ca_state != TCP_CA_Open)
                return 0;
 
        in_flight = tcp_packets_in_flight(tp);
@@ -1287,6 +1289,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
  */ 
 void tcp_simple_retransmit(struct sock *sk)
 {
+       const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
        unsigned int mss = tcp_current_mss(sk, 0);
@@ -1317,12 +1320,12 @@ void tcp_simple_retransmit(struct sock *sk)
         * in network, but units changed and effective
         * cwnd/ssthresh really reduced now.
         */
-       if (tp->ca_state != TCP_CA_Loss) {
+       if (icsk->icsk_ca_state != TCP_CA_Loss) {
                tp->high_seq = tp->snd_nxt;
-               tp->snd_ssthresh = tcp_current_ssthresh(tp);
+               tp->snd_ssthresh = tcp_current_ssthresh(sk);
                tp->prior_ssthresh = 0;
                tp->undo_marker = 0;
-               tcp_set_ca_state(tp, TCP_CA_Loss);
+               tcp_set_ca_state(sk, TCP_CA_Loss);
        }
        tcp_xmit_retransmit_queue(sk);
 }
@@ -1462,6 +1465,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
  */
 void tcp_xmit_retransmit_queue(struct sock *sk)
 {
+       const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
        int packet_cnt = tp->lost_out;
@@ -1485,7 +1489,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
                                if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
                                        if (tcp_retransmit_skb(sk, skb))
                                                return;
-                                       if (tp->ca_state != TCP_CA_Loss)
+                                       if (icsk->icsk_ca_state != TCP_CA_Loss)
                                                NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS);
                                        else
                                                NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS);
@@ -1493,7 +1497,8 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
                                        if (skb ==
                                            skb_peek(&sk->sk_write_queue))
                                                inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
-                                                                         inet_csk(sk)->icsk_rto);
+                                                                         inet_csk(sk)->icsk_rto,
+                                                                         TCP_RTO_MAX);
                                }
 
                                packet_cnt -= tcp_skb_pcount(skb);
@@ -1506,7 +1511,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
        /* OK, demanded retransmission is finished. */
 
        /* Forward retransmissions are possible only during Recovery. */
-       if (tp->ca_state != TCP_CA_Recovery)
+       if (icsk->icsk_ca_state != TCP_CA_Recovery)
                return;
 
        /* No forward retransmissions in Reno are possible. */
@@ -1546,7 +1551,9 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
                        break;
 
                if (skb == skb_peek(&sk->sk_write_queue))
-                       inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto);
+                       inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+                                                 inet_csk(sk)->icsk_rto,
+                                                 TCP_RTO_MAX);
 
                NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
        }
@@ -1575,7 +1582,7 @@ void tcp_send_fin(struct sock *sk)
        } else {
                /* Socket is locked, keep trying until memory is available. */
                for (;;) {
-                       skb = alloc_skb(MAX_TCP_HEADER, GFP_KERNEL);
+                       skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL);
                        if (skb)
                                break;
                        yield();
@@ -1797,7 +1804,7 @@ int tcp_connect(struct sock *sk)
 
        tcp_connect_init(sk);
 
-       buff = alloc_skb(MAX_TCP_HEADER + 15, sk->sk_allocation);
+       buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
        if (unlikely(buff == NULL))
                return -ENOBUFS;
 
@@ -1826,7 +1833,8 @@ int tcp_connect(struct sock *sk)
        TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
 
        /* Timer for repeating the SYN until an answer. */
-       inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto);
+       inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+                                 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
        return 0;
 }
 
@@ -1901,7 +1909,8 @@ void tcp_send_ack(struct sock *sk)
                if (buff == NULL) {
                        inet_csk_schedule_ack(sk);
                        inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
-                       inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX);
+                       inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
+                                                 TCP_DELACK_MAX, TCP_RTO_MAX);
                        return;
                }
 
@@ -2023,7 +2032,7 @@ void tcp_send_probe0(struct sock *sk)
 
        if (tp->packets_out || !sk->sk_send_head) {
                /* Cancel probe timer, if it is not required. */
-               tp->probes_out = 0;
+               icsk->icsk_probes_out = 0;
                icsk->icsk_backoff = 0;
                return;
        }
@@ -2031,21 +2040,23 @@ void tcp_send_probe0(struct sock *sk)
        if (err <= 0) {
                if (icsk->icsk_backoff < sysctl_tcp_retries2)
                        icsk->icsk_backoff++;
-               tp->probes_out++;
+               icsk->icsk_probes_out++;
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 
-                                         min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX));
+                                         min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
+                                         TCP_RTO_MAX);
        } else {
                /* If packet was not sent due to local congestion,
-                * do not backoff and do not remember probes_out.
+                * do not backoff and do not remember icsk_probes_out.
                 * Let local senders to fight for local resources.
                 *
                 * Use accumulated backoff yet.
                 */
-               if (!tp->probes_out)
-                       tp->probes_out=1;
+               if (!icsk->icsk_probes_out)
+                       icsk->icsk_probes_out = 1;
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 
                                          min(icsk->icsk_rto << icsk->icsk_backoff,
-                                             TCP_RESOURCE_PROBE_INTERVAL));
+                                             TCP_RESOURCE_PROBE_INTERVAL),
+                                         TCP_RTO_MAX);
        }
 }