X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Fnet%2Ftcp.h;h=633147cb6bbc8967bc5ab04bdd6dd251a9dde90d;hb=4934ed888e6fe78d9d339471fb870819da911e8b;hp=723b36851dde5deb165ecfca75aab580f8b8a0a6;hpb=2051f11fb86b0056fec440fe7e9fa8370d60a5c6;p=linux-2.6 diff --git a/include/net/tcp.h b/include/net/tcp.h index 723b36851d..633147cb6b 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -442,6 +442,9 @@ extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mss); +extern __u32 cookie_init_timestamp(struct request_sock *req); +extern void cookie_check_timestamp(struct tcp_options_received *tcp_opt); + /* From net/ipv6/syncookies.c */ extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, @@ -760,6 +763,8 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) return tp->packets_out - tcp_left_out(tp) + tp->retrans_out; } +extern int tcp_limit_reno_sacked(struct tcp_sock *tp); + /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. * The exception is rate halving phase, when cwnd is decreasing towards * ssthresh. @@ -782,11 +787,14 @@ extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); /* Slow start with delack produces 3 packets of burst, so that - * it is safe "de facto". + * it is safe "de facto". This will be the default - same as + * the default reordering threshold - but if reordering increases, + * we must be able to allow cwnd to burst at least this much in order + * to not pull it back when holes are filled. */ static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp) { - return 3; + return tp->reordering; } /* Returns end sequence number of the receiver's advertised window */ @@ -956,6 +964,7 @@ static inline void tcp_openreq_init(struct request_sock *req, struct inet_request_sock *ireq = inet_rsk(req); req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ + req->cookie_ts = 0; tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; req->mss = rx_opt->mss_clamp; req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; @@ -1243,7 +1252,7 @@ static inline void tcp_insert_write_queue_after(struct sk_buff *skb, struct sk_buff *buff, struct sock *sk) { - __skb_append(skb, buff, &sk->sk_write_queue); + __skb_queue_after(&sk->sk_write_queue, skb, buff); } /* Insert skb between prev and next on the write queue of sk. */ @@ -1321,20 +1330,18 @@ enum tcp_seq_states { }; struct tcp_seq_afinfo { - struct module *owner; char *name; sa_family_t family; - int (*seq_show) (struct seq_file *m, void *v); - struct file_operations *seq_fops; + struct file_operations seq_fops; + struct seq_operations seq_ops; }; struct tcp_iter_state { - struct net *net; + struct seq_net_private p; sa_family_t family; enum tcp_seq_states state; struct sock *syn_wait_sk; int bucket, sbucket, num, uid; - struct seq_operations seq_ops; }; extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);