#include <linux/compiler.h>
#include <linux/module.h>
-#include <linux/smp_lock.h>
/* People can turn this off for buggy TCP's found in printers etc. */
int sysctl_tcp_retrans_collapse __read_mostly = 1;
/* By default, RFC2861 behavior. */
int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
-static void update_send_head(struct sock *sk, struct tcp_sock *tp,
- struct sk_buff *skb)
+static inline void tcp_packets_out_inc(struct sock *sk,
+ const struct sk_buff *skb)
{
+ struct tcp_sock *tp = tcp_sk(sk);
+ int orig = tp->packets_out;
+
+ tp->packets_out += tcp_skb_pcount(skb);
+ if (!orig)
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+ inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
+}
+
+static void update_send_head(struct sock *sk, struct sk_buff *skb)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
tcp_advance_send_head(sk, skb);
tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
- tcp_packets_out_inc(sk, tp, skb);
+ tcp_packets_out_inc(sk, skb);
}
/* SND.NXT, if window was not shrunk.
* Anything in between SND.UNA...SND.UNA+SND.WND also can be already
* invalid. OK, let's make this for now:
*/
-static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp)
+static inline __u32 tcp_acceptable_seq(struct sock *sk)
{
+ struct tcp_sock *tp = tcp_sk(sk);
+
if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt))
return tp->snd_nxt;
else
return new_win;
}
+static inline void TCP_ECN_send_synack(struct tcp_sock *tp,
+ struct sk_buff *skb)
+{
+ TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR;
+ if (!(tp->ecn_flags&TCP_ECN_OK))
+ TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE;
+}
+
+static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ tp->ecn_flags = 0;
+ if (sysctl_tcp_ecn) {
+ TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR;
+ tp->ecn_flags = TCP_ECN_OK;
+ }
+}
+
+static __inline__ void
+TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
+{
+ if (inet_rsk(req)->ecn_ok)
+ th->ece = 1;
+}
+
+static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
+ int tcp_header_len)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (tp->ecn_flags & TCP_ECN_OK) {
+ /* Not-retransmitted data segment: set ECT and inject CWR. */
+ if (skb->len != tcp_header_len &&
+ !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
+ INET_ECN_xmit(sk);
+ if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) {
+ tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
+ tcp_hdr(skb)->cwr = 1;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+ }
+ } else {
+ /* ACK or retransmitted segment: clear ECT|CE */
+ INET_ECN_dontxmit(sk);
+ }
+ if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
+ tcp_hdr(skb)->ece = 1;
+ }
+}
+
static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
__u32 tstamp, __u8 **md5_hash)
{
/* If congestion control is doing timestamping, we must
* take such a timestamp before we potentially clone/copy.
*/
- if (icsk->icsk_ca_ops->rtt_sample)
+ if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
__net_timestamp(skb);
if (likely(clone_it)) {
tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
#endif
- th = (struct tcphdr *) skb_push(skb, tcp_header_size);
- skb->h.th = th;
+ skb_push(skb, tcp_header_size);
+ skb_reset_transport_header(skb);
skb_set_owner_w(skb, sk);
/* Build TCP header and checksum it. */
+ th = tcp_hdr(skb);
th->source = inet->sport;
th->dest = inet->dport;
th->seq = htonl(tcb->seq);
md5 ? &md5_hash_location :
#endif
NULL);
- TCP_ECN_send(sk, tp, skb, tcp_header_size);
+ TCP_ECN_send(sk, skb, tcp_header_size);
}
#ifdef CONFIG_TCP_MD5SIG
tp->af_specific->calc_md5_hash(md5_hash_location,
md5,
sk, NULL, NULL,
- skb->h.th,
+ tcp_hdr(skb),
sk->sk_protocol,
skb->len);
}
skb_shinfo(skb)->gso_size = 0;
skb_shinfo(skb)->gso_type = 0;
} else {
- unsigned int factor;
-
- factor = skb->len + (mss_now - 1);
- factor /= mss_now;
- skb_shinfo(skb)->gso_segs = factor;
+ skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
skb_shinfo(skb)->gso_size = mss_now;
skb_shinfo(skb)->gso_type = sk->sk_gso_type;
}
}
+/* When a modification to fackets out becomes necessary, we need to check
+ * skb is counted to fackets_out or not. Another important thing is to
+ * tweak SACK fastpath hint too as it would overwrite all changes unless
+ * hint is also changed.
+ */
+static void tcp_adjust_fackets_out(struct tcp_sock *tp, struct sk_buff *skb,
+ int decr)
+{
+ if (!tp->sacked_out || tcp_is_reno(tp))
+ return;
+
+ if (!before(tp->highest_sack, TCP_SKB_CB(skb)->seq))
+ tp->fackets_out -= decr;
+
+ /* cnt_hint is "off-by-one" compared with fackets_out (see sacktag) */
+ if (tp->fastpath_skb_hint != NULL &&
+ after(TCP_SKB_CB(tp->fastpath_skb_hint)->seq, TCP_SKB_CB(skb)->seq))
+ tp->fastpath_cnt_hint -= decr;
+}
+
/* Function to create two new TCP segments. Shrinks the given segment
* to the specified size and appends a new segment with the rest of the
* packet to the list. This won't be called frequently, I hope.
BUG_ON(len > skb->len);
- clear_all_retrans_hints(tp);
+ tcp_clear_retrans_hints_partial(tp);
nsize = skb_headlen(skb) - len;
if (nsize < 0)
nsize = 0;
TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
+ if (tcp_is_sack(tp) && tp->sacked_out &&
+ (TCP_SKB_CB(skb)->seq == tp->highest_sack))
+ tp->highest_sack = TCP_SKB_CB(buff)->seq;
+
/* PSH and FIN should only be set in the second packet. */
flags = TCP_SKB_CB(skb)->flags;
TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
tp->retrans_out -= diff;
- if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
+ if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
tp->lost_out -= diff;
- tp->left_out -= diff;
- }
-
- if (diff > 0) {
- /* Adjust Reno SACK estimate. */
- if (!tp->rx_opt.sack_ok) {
- tp->sacked_out -= diff;
- if ((int)tp->sacked_out < 0)
- tp->sacked_out = 0;
- tcp_sync_left_out(tp);
- }
- tp->fackets_out -= diff;
- if ((int)tp->fackets_out < 0)
- tp->fackets_out = 0;
+ /* Adjust Reno SACK estimate. */
+ if (tcp_is_reno(tp) && diff > 0) {
+ tcp_dec_pcount_approx_int(&tp->sacked_out, diff);
+ tcp_verify_left_out(tp);
}
+ tcp_adjust_fackets_out(tp, skb, diff);
}
/* Link BUFF into the send queue. */
}
skb_shinfo(skb)->nr_frags = k;
- skb->tail = skb->data;
+ skb_reset_tail_pointer(skb);
skb->data_len -= len;
skb->len = skb->data_len;
}
/* Congestion window validation. (RFC2861) */
-static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
+static void tcp_cwnd_validate(struct sock *sk)
{
+ struct tcp_sock *tp = tcp_sk(sk);
__u32 packets_out = tp->packets_out;
if (packets_out >= tp->snd_cwnd) {
if (nonagle & TCP_NAGLE_PUSH)
return 1;
- /* Don't use the nagle rule for urgent data (or for the final FIN). */
- if (tp->urg_mode ||
+ /* Don't use the nagle rule for urgent data (or for the final FIN).
+ * Nagle can be ignored during F-RTO too (see RFC4138).
+ */
+ if (tp->urg_mode || (tp->frto_counter == 2) ||
(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
return 1;
return cwnd_quota;
}
-int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
+int tcp_may_send_now(struct sock *sk)
{
+ struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = tcp_send_head(sk);
return (skb &&
*
* This algorithm is from John Heffner.
*/
-static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
+static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
{
+ struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
u32 send_win, cong_win, limit, in_flight;
skb = tcp_send_head(sk);
tcp_insert_write_queue_before(nskb, skb, sk);
- tcp_advance_send_head(sk, skb);
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
/* Decrement cwnd here because we are sending
* effectively two packets. */
tp->snd_cwnd--;
- update_send_head(sk, tp, nskb);
+ update_send_head(sk, nskb);
icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
nonagle : TCP_NAGLE_PUSH))))
break;
} else {
- if (tcp_tso_should_defer(sk, tp, skb))
+ if (tcp_tso_should_defer(sk, skb))
break;
}
/* Advance the send_head. This one is sent out.
* This call will increment packets_out.
*/
- update_send_head(sk, tp, skb);
+ update_send_head(sk, skb);
tcp_minshall_update(tp, mss_now, skb);
sent_pkts++;
}
if (likely(sent_pkts)) {
- tcp_cwnd_validate(sk, tp);
+ tcp_cwnd_validate(sk);
return 0;
}
return !tp->packets_out && tcp_send_head(sk);
* TCP_CORK or attempt at coalescing tiny packets.
* The socket must be locked by the caller.
*/
-void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
- unsigned int cur_mss, int nonagle)
+void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
+ int nonagle)
{
struct sk_buff *skb = tcp_send_head(sk);
if (skb) {
if (tcp_write_xmit(sk, cur_mss, nonagle))
- tcp_check_probe_timer(sk, tp);
+ tcp_check_probe_timer(sk);
}
}
TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {
- update_send_head(sk, tp, skb);
- tcp_cwnd_validate(sk, tp);
+ update_send_head(sk, skb);
+ tcp_cwnd_validate(sk);
return;
}
}
if (window <= free_space - mss || window > free_space)
window = (free_space/mss)*mss;
else if (mss == full_space &&
- free_space > window + full_space/2)
+ free_space > window + full_space/2)
window = free_space;
}
BUG_ON(tcp_skb_pcount(skb) != 1 ||
tcp_skb_pcount(next_skb) != 1);
- /* changing transmit queue under us so clear hints */
- clear_all_retrans_hints(tp);
+ if (WARN_ON(tcp_is_sack(tp) && tp->sacked_out &&
+ (TCP_SKB_CB(next_skb)->seq == tp->highest_sack)))
+ return;
/* Ok. We will be able to collapse the packet. */
tcp_unlink_write_queue(next_skb, sk);
- memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
+ skb_copy_from_linear_data(next_skb,
+ skb_put(skb, next_skb_size),
+ next_skb_size);
if (next_skb->ip_summed == CHECKSUM_PARTIAL)
skb->ip_summed = CHECKSUM_PARTIAL;
TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL);
if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS)
tp->retrans_out -= tcp_skb_pcount(next_skb);
- if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) {
+ if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST)
tp->lost_out -= tcp_skb_pcount(next_skb);
- tp->left_out -= tcp_skb_pcount(next_skb);
- }
/* Reno case is special. Sigh... */
- if (!tp->rx_opt.sack_ok && tp->sacked_out) {
+ if (tcp_is_reno(tp) && tp->sacked_out)
tcp_dec_pcount_approx(&tp->sacked_out, next_skb);
- tp->left_out -= tcp_skb_pcount(next_skb);
+
+ tcp_adjust_fackets_out(tp, next_skb, tcp_skb_pcount(next_skb));
+ tp->packets_out -= tcp_skb_pcount(next_skb);
+
+ /* changed transmit queue under us so clear hints */
+ tcp_clear_retrans_hints_partial(tp);
+ /* manually tune sacktag skb hint */
+ if (tp->fastpath_skb_hint == next_skb) {
+ tp->fastpath_skb_hint = skb;
+ tp->fastpath_cnt_hint -= tcp_skb_pcount(skb);
}
- /* Not quite right: it can be > snd.fack, but
- * it is better to underestimate fackets.
- */
- tcp_dec_pcount_approx(&tp->fackets_out, next_skb);
- tcp_packets_out_dec(tp, next_skb);
sk_stream_free_skb(sk, next_skb);
}
}
}
}
- clear_all_retrans_hints(tp);
+ tcp_clear_all_retrans_hints(tp);
if (!lost)
return;
- tcp_sync_left_out(tp);
+ tcp_verify_left_out(tp);
/* Don't muck with the congestion window here.
* Reason is that we do not increase amount of _data_
printk(KERN_DEBUG "retrans_out leaked.\n");
}
#endif
+ if (!tp->retrans_out)
+ tp->lost_retrans_low = tp->snd_nxt;
TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
tp->retrans_out += tcp_skb_pcount(skb);
return;
/* No forward retransmissions in Reno are possible. */
- if (!tp->rx_opt.sack_ok)
+ if (tcp_is_reno(tp))
return;
/* Yeah, we have to make difficult choice between forward transmission
* and retransmission... Both ways have their merits...
*
* For now we do not retransmit anything, while we have some new
- * segments to send.
+ * segments to send. In the other cases, follow rule 3 for
+ * NextSeg() specified in RFC3517.
*/
- if (tcp_may_send_now(sk, tp))
+ if (tcp_may_send_now(sk))
return;
- if (tp->forward_skb_hint) {
+ /* If nothing is SACKed, highest_sack in the loop won't be valid */
+ if (!tp->sacked_out)
+ return;
+
+ if (tp->forward_skb_hint)
skb = tp->forward_skb_hint;
- packet_cnt = tp->forward_cnt_hint;
- } else{
+ else
skb = tcp_write_queue_head(sk);
- packet_cnt = 0;
- }
tcp_for_write_queue_from(skb, sk) {
if (skb == tcp_send_head(sk))
break;
- tp->forward_cnt_hint = packet_cnt;
tp->forward_skb_hint = skb;
- /* Similar to the retransmit loop above we
- * can pretend that the retransmitted SKB
- * we send out here will be composed of one
- * real MSS sized packet because tcp_retransmit_skb()
- * will fragment it if necessary.
- */
- if (++packet_cnt > tp->fackets_out)
+ if (after(TCP_SKB_CB(skb)->seq, tp->highest_sack))
break;
if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
tcp_queue_skb(sk, skb);
}
- __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF);
+ __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
}
/* We get here when a process closes a file descriptor (either due to
* an explicit close() or as a byproduct of exit()'ing) and there
* was unread data in the receive queue. This behavior is recommended
- * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM
+ * by RFC 2525, section 2.17. -DaveM
*/
void tcp_send_active_reset(struct sock *sk, gfp_t priority)
{
- struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
/* NOTE: No TCP options attached and we never retransmit this. */
skb_shinfo(skb)->gso_type = 0;
/* Send it off. */
- TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp);
+ TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk);
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (tcp_transmit_skb(sk, skb, 0, priority))
if (md5)
tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
#endif
- skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size);
+ skb_push(skb, tcp_header_size);
+ skb_reset_transport_header(skb);
+ th = tcp_hdr(skb);
memset(th, 0, sizeof(struct tcphdr));
th->syn = 1;
th->ack = 1;
tp->af_specific->calc_md5_hash(md5_hash_location,
md5,
NULL, dst, req,
- skb->h.th, sk->sk_protocol,
+ tcp_hdr(skb), sk->sk_protocol,
skb->len);
}
#endif
skb_reserve(buff, MAX_TCP_HEADER);
TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
- TCP_ECN_send_syn(sk, tp, buff);
+ TCP_ECN_send_syn(sk, buff);
TCP_SKB_CB(buff)->sacked = 0;
skb_shinfo(buff)->gso_segs = 1;
skb_shinfo(buff)->gso_size = 0;
{
/* If we have been reset, we may not send again. */
if (sk->sk_state != TCP_CLOSE) {
- struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *buff;
/* We are not putting this on the write queue, so
skb_shinfo(buff)->gso_type = 0;
/* Send it off, this clears delayed acks for us. */
- TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp);
+ TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk);
TCP_SKB_CB(buff)->when = tcp_time_stamp;
tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
}
TCP_SKB_CB(skb)->when = tcp_time_stamp;
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
if (!err) {
- update_send_head(sk, tp, skb);
+ update_send_head(sk, skb);
}
return err;
} else {