2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
57 * Alan Cox : Tidied tcp_data to avoid a potential
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
215 * Description of States:
217 * TCP_SYN_SENT sent a connection request, waiting for ack
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
222 * TCP_ESTABLISHED connection established
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
247 * TCP_CLOSE socket is finished
250 #include <linux/config.h>
251 #include <linux/module.h>
252 #include <linux/types.h>
253 #include <linux/fcntl.h>
254 #include <linux/poll.h>
255 #include <linux/init.h>
256 #include <linux/smp_lock.h>
257 #include <linux/fs.h>
258 #include <linux/random.h>
259 #include <linux/bootmem.h>
260 #include <linux/cache.h>
262 #include <net/icmp.h>
264 #include <net/xfrm.h>
268 #include <asm/uaccess.h>
269 #include <asm/ioctls.h>
271 int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
273 DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
275 atomic_t tcp_orphan_count = ATOMIC_INIT(0);
277 EXPORT_SYMBOL_GPL(tcp_orphan_count);
279 int sysctl_tcp_mem[3] __read_mostly;
280 int sysctl_tcp_wmem[3] __read_mostly;
281 int sysctl_tcp_rmem[3] __read_mostly;
283 EXPORT_SYMBOL(sysctl_tcp_mem);
284 EXPORT_SYMBOL(sysctl_tcp_rmem);
285 EXPORT_SYMBOL(sysctl_tcp_wmem);
287 atomic_t tcp_memory_allocated; /* Current allocated memory. */
288 atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
290 EXPORT_SYMBOL(tcp_memory_allocated);
291 EXPORT_SYMBOL(tcp_sockets_allocated);
294 * Pressure flag: try to collapse.
295 * Technical note: it is used by multiple contexts non atomically.
296 * All the sk_stream_mem_schedule() is of this nature: accounting
297 * is strict, actions are advisory and have some latency.
299 int tcp_memory_pressure;
301 EXPORT_SYMBOL(tcp_memory_pressure);
303 void tcp_enter_memory_pressure(void)
305 if (!tcp_memory_pressure) {
306 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
307 tcp_memory_pressure = 1;
311 EXPORT_SYMBOL(tcp_enter_memory_pressure);
314 * Wait for a TCP event.
316 * Note that we don't need to lock the socket, as the upper poll layers
317 * take care of normal races (between the test and the event) and we don't
318 * go look at any of the socket buffers directly.
320 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
323 struct sock *sk = sock->sk;
324 struct tcp_sock *tp = tcp_sk(sk);
326 poll_wait(file, sk->sk_sleep, wait);
327 if (sk->sk_state == TCP_LISTEN)
328 return inet_csk_listen_poll(sk);
330 /* Socket is not locked. We are protected from async events
331 by poll logic and correct handling of state changes
332 made by another threads is impossible in any case.
340 * POLLHUP is certainly not done right. But poll() doesn't
341 * have a notion of HUP in just one direction, and for a
342 * socket the read side is more interesting.
344 * Some poll() documentation says that POLLHUP is incompatible
345 * with the POLLOUT/POLLWR flags, so somebody should check this
346 * all. But careful, it tends to be safer to return too many
347 * bits than too few, and you can easily break real applications
348 * if you don't tell them that something has hung up!
352 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
353 * our fs/select.c). It means that after we received EOF,
354 * poll always returns immediately, making impossible poll() on write()
355 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
356 * if and only if shutdown has been made in both directions.
357 * Actually, it is interesting to look how Solaris and DUX
358 * solve this dilemma. I would prefer, if PULLHUP were maskable,
359 * then we could set it on SND_SHUTDOWN. BTW examples given
360 * in Stevens' books assume exactly this behaviour, it explains
361 * why PULLHUP is incompatible with POLLOUT. --ANK
363 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
364 * blocking on fresh not-connected or disconnected socket. --ANK
366 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
368 if (sk->sk_shutdown & RCV_SHUTDOWN)
369 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
372 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
373 /* Potential race condition. If read of tp below will
374 * escape above sk->sk_state, we can be illegally awaken
375 * in SYN_* states. */
376 if ((tp->rcv_nxt != tp->copied_seq) &&
377 (tp->urg_seq != tp->copied_seq ||
378 tp->rcv_nxt != tp->copied_seq + 1 ||
379 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
380 mask |= POLLIN | POLLRDNORM;
382 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
383 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
384 mask |= POLLOUT | POLLWRNORM;
385 } else { /* send SIGIO later */
386 set_bit(SOCK_ASYNC_NOSPACE,
387 &sk->sk_socket->flags);
388 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
390 /* Race breaker. If space is freed after
391 * wspace test but before the flags are set,
392 * IO signal will be lost.
394 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
395 mask |= POLLOUT | POLLWRNORM;
399 if (tp->urg_data & TCP_URG_VALID)
405 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
407 struct tcp_sock *tp = tcp_sk(sk);
412 if (sk->sk_state == TCP_LISTEN)
416 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
418 else if (sock_flag(sk, SOCK_URGINLINE) ||
420 before(tp->urg_seq, tp->copied_seq) ||
421 !before(tp->urg_seq, tp->rcv_nxt)) {
422 answ = tp->rcv_nxt - tp->copied_seq;
424 /* Subtract 1, if FIN is in queue. */
425 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
427 ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
429 answ = tp->urg_seq - tp->copied_seq;
433 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
436 if (sk->sk_state == TCP_LISTEN)
439 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
442 answ = tp->write_seq - tp->snd_una;
448 return put_user(answ, (int __user *)arg);
451 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
453 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
454 tp->pushed_seq = tp->write_seq;
457 static inline int forced_push(struct tcp_sock *tp)
459 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
462 static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
466 TCP_SKB_CB(skb)->seq = tp->write_seq;
467 TCP_SKB_CB(skb)->end_seq = tp->write_seq;
468 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
469 TCP_SKB_CB(skb)->sacked = 0;
470 skb_header_release(skb);
471 __skb_queue_tail(&sk->sk_write_queue, skb);
472 sk_charge_skb(sk, skb);
473 if (!sk->sk_send_head)
474 sk->sk_send_head = skb;
475 if (tp->nonagle & TCP_NAGLE_PUSH)
476 tp->nonagle &= ~TCP_NAGLE_PUSH;
479 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
482 if (flags & MSG_OOB) {
484 tp->snd_up = tp->write_seq;
485 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
489 static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
490 int mss_now, int nonagle)
492 if (sk->sk_send_head) {
493 struct sk_buff *skb = sk->sk_write_queue.prev;
494 if (!(flags & MSG_MORE) || forced_push(tp))
495 tcp_mark_push(tp, skb);
496 tcp_mark_urg(tp, flags, skb);
497 __tcp_push_pending_frames(sk, tp, mss_now,
498 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
502 static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
503 size_t psize, int flags)
505 struct tcp_sock *tp = tcp_sk(sk);
506 int mss_now, size_goal;
509 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
511 /* Wait for a connection to finish. */
512 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
513 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
516 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
518 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
519 size_goal = tp->xmit_size_goal;
523 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
527 struct sk_buff *skb = sk->sk_write_queue.prev;
528 struct page *page = pages[poffset / PAGE_SIZE];
529 int copy, i, can_coalesce;
530 int offset = poffset % PAGE_SIZE;
531 int size = min_t(size_t, psize, PAGE_SIZE - offset);
533 if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) {
535 if (!sk_stream_memory_free(sk))
536 goto wait_for_sndbuf;
538 skb = sk_stream_alloc_pskb(sk, 0, 0,
541 goto wait_for_memory;
543 skb_entail(sk, tp, skb);
550 i = skb_shinfo(skb)->nr_frags;
551 can_coalesce = skb_can_coalesce(skb, i, page, offset);
552 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
553 tcp_mark_push(tp, skb);
556 if (!sk_stream_wmem_schedule(sk, copy))
557 goto wait_for_memory;
560 skb_shinfo(skb)->frags[i - 1].size += copy;
563 skb_fill_page_desc(skb, i, page, offset, copy);
567 skb->data_len += copy;
568 skb->truesize += copy;
569 sk->sk_wmem_queued += copy;
570 sk->sk_forward_alloc -= copy;
571 skb->ip_summed = CHECKSUM_HW;
572 tp->write_seq += copy;
573 TCP_SKB_CB(skb)->end_seq += copy;
574 skb_shinfo(skb)->tso_segs = 0;
577 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
581 if (!(psize -= copy))
584 if (skb->len < mss_now || (flags & MSG_OOB))
587 if (forced_push(tp)) {
588 tcp_mark_push(tp, skb);
589 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
590 } else if (skb == sk->sk_send_head)
591 tcp_push_one(sk, mss_now);
595 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
598 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
600 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
603 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
604 size_goal = tp->xmit_size_goal;
609 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
616 return sk_stream_error(sk, flags, err);
619 ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
620 size_t size, int flags)
623 struct sock *sk = sock->sk;
625 #define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
627 if (!(sk->sk_route_caps & NETIF_F_SG) ||
628 !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS))
629 return sock_no_sendpage(sock, page, offset, size, flags);
631 #undef TCP_ZC_CSUM_FLAGS
635 res = do_tcp_sendpages(sk, &page, offset, size, flags);
641 #define TCP_PAGE(sk) (sk->sk_sndmsg_page)
642 #define TCP_OFF(sk) (sk->sk_sndmsg_off)
644 static inline int select_size(struct sock *sk, struct tcp_sock *tp)
646 int tmp = tp->mss_cache;
648 if (sk->sk_route_caps & NETIF_F_SG) {
649 if (sk->sk_route_caps & NETIF_F_TSO)
652 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
654 if (tmp >= pgbreak &&
655 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
663 int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
667 struct tcp_sock *tp = tcp_sk(sk);
670 int mss_now, size_goal;
677 flags = msg->msg_flags;
678 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
680 /* Wait for a connection to finish. */
681 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
682 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
685 /* This should be in poll */
686 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
688 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
689 size_goal = tp->xmit_size_goal;
691 /* Ok commence sending. */
692 iovlen = msg->msg_iovlen;
697 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
700 while (--iovlen >= 0) {
701 int seglen = iov->iov_len;
702 unsigned char __user *from = iov->iov_base;
709 skb = sk->sk_write_queue.prev;
711 if (!sk->sk_send_head ||
712 (copy = size_goal - skb->len) <= 0) {
715 /* Allocate new segment. If the interface is SG,
716 * allocate skb fitting to single page.
718 if (!sk_stream_memory_free(sk))
719 goto wait_for_sndbuf;
721 skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
722 0, sk->sk_allocation);
724 goto wait_for_memory;
727 * Check whether we can use HW checksum.
729 if (sk->sk_route_caps &
730 (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM |
732 skb->ip_summed = CHECKSUM_HW;
734 skb_entail(sk, tp, skb);
738 /* Try to append data to the end of skb. */
742 /* Where to copy to? */
743 if (skb_tailroom(skb) > 0) {
744 /* We have some space in skb head. Superb! */
745 if (copy > skb_tailroom(skb))
746 copy = skb_tailroom(skb);
747 if ((err = skb_add_data(skb, from, copy)) != 0)
751 int i = skb_shinfo(skb)->nr_frags;
752 struct page *page = TCP_PAGE(sk);
753 int off = TCP_OFF(sk);
755 if (skb_can_coalesce(skb, i, page, off) &&
757 /* We can extend the last page
760 } else if (i == MAX_SKB_FRAGS ||
762 !(sk->sk_route_caps & NETIF_F_SG))) {
763 /* Need to add new fragment and cannot
764 * do this because interface is non-SG,
765 * or because all the page slots are
767 tcp_mark_push(tp, skb);
770 if (off == PAGE_SIZE) {
772 TCP_PAGE(sk) = page = NULL;
778 if (copy > PAGE_SIZE - off)
779 copy = PAGE_SIZE - off;
781 if (!sk_stream_wmem_schedule(sk, copy))
782 goto wait_for_memory;
785 /* Allocate new cache page. */
786 if (!(page = sk_stream_alloc_page(sk)))
787 goto wait_for_memory;
790 /* Time to copy data. We are close to
792 err = skb_copy_to_page(sk, from, skb, page,
795 /* If this page was new, give it to the
796 * socket so it does not get leaked.
805 /* Update the skb. */
807 skb_shinfo(skb)->frags[i - 1].size +=
810 skb_fill_page_desc(skb, i, page, off, copy);
813 } else if (off + copy < PAGE_SIZE) {
819 TCP_OFF(sk) = off + copy;
823 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
825 tp->write_seq += copy;
826 TCP_SKB_CB(skb)->end_seq += copy;
827 skb_shinfo(skb)->tso_segs = 0;
831 if ((seglen -= copy) == 0 && iovlen == 0)
834 if (skb->len < mss_now || (flags & MSG_OOB))
837 if (forced_push(tp)) {
838 tcp_mark_push(tp, skb);
839 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
840 } else if (skb == sk->sk_send_head)
841 tcp_push_one(sk, mss_now);
845 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
848 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
850 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
853 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
854 size_goal = tp->xmit_size_goal;
860 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
867 if (sk->sk_send_head == skb)
868 sk->sk_send_head = NULL;
869 __skb_unlink(skb, &sk->sk_write_queue);
870 sk_stream_free_skb(sk, skb);
877 err = sk_stream_error(sk, flags, err);
884 * Handle reading urgent data. BSD has very simple semantics for
885 * this, no blocking and very strange errors 8)
888 static int tcp_recv_urg(struct sock *sk, long timeo,
889 struct msghdr *msg, int len, int flags,
892 struct tcp_sock *tp = tcp_sk(sk);
894 /* No URG data to read. */
895 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
896 tp->urg_data == TCP_URG_READ)
897 return -EINVAL; /* Yes this is right ! */
899 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
902 if (tp->urg_data & TCP_URG_VALID) {
904 char c = tp->urg_data;
906 if (!(flags & MSG_PEEK))
907 tp->urg_data = TCP_URG_READ;
909 /* Read urgent data. */
910 msg->msg_flags |= MSG_OOB;
913 if (!(flags & MSG_TRUNC))
914 err = memcpy_toiovec(msg->msg_iov, &c, 1);
917 msg->msg_flags |= MSG_TRUNC;
919 return err ? -EFAULT : len;
922 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
925 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
926 * the available implementations agree in this case:
927 * this call should never block, independent of the
928 * blocking state of the socket.
929 * Mike <pall@rz.uni-karlsruhe.de>
934 /* Clean up the receive buffer for full frames taken by the user,
935 * then send an ACK if necessary. COPIED is the number of bytes
936 * tcp_recvmsg has given to the user so far, it speeds up the
937 * calculation of whether or not we must ACK for the sake of
940 static void cleanup_rbuf(struct sock *sk, int copied)
942 struct tcp_sock *tp = tcp_sk(sk);
946 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
948 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
951 if (inet_csk_ack_scheduled(sk)) {
952 const struct inet_connection_sock *icsk = inet_csk(sk);
953 /* Delayed ACKs frequently hit locked sockets during bulk
955 if (icsk->icsk_ack.blocked ||
956 /* Once-per-two-segments ACK was not sent by tcp_input.c */
957 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
959 * If this read emptied read buffer, we send ACK, if
960 * connection is not bidirectional, user drained
961 * receive buffer and there was a small segment
964 (copied > 0 && (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
965 !icsk->icsk_ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
969 /* We send an ACK if we can now advertise a non-zero window
970 * which has been raised "significantly".
972 * Even if window raised up to infinity, do not send window open ACK
973 * in states, where we will not receive more. It is useless.
975 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
976 __u32 rcv_window_now = tcp_receive_window(tp);
978 /* Optimize, __tcp_select_window() is not cheap. */
979 if (2*rcv_window_now <= tp->window_clamp) {
980 __u32 new_window = __tcp_select_window(sk);
982 /* Send ACK now, if this read freed lots of space
983 * in our buffer. Certainly, new_window is new window.
984 * We can advertise it now, if it is not less than current one.
985 * "Lots" means "at least twice" here.
987 if (new_window && new_window >= 2 * rcv_window_now)
995 static void tcp_prequeue_process(struct sock *sk)
998 struct tcp_sock *tp = tcp_sk(sk);
1000 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
1002 /* RX process wants to run with disabled BHs, though it is not
1005 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1006 sk->sk_backlog_rcv(sk, skb);
1009 /* Clear memory counter. */
1010 tp->ucopy.memory = 0;
1013 static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1015 struct sk_buff *skb;
1018 skb_queue_walk(&sk->sk_receive_queue, skb) {
1019 offset = seq - TCP_SKB_CB(skb)->seq;
1022 if (offset < skb->len || skb->h.th->fin) {
1031 * This routine provides an alternative to tcp_recvmsg() for routines
1032 * that would like to handle copying from skbuffs directly in 'sendfile'
1035 * - It is assumed that the socket was locked by the caller.
1036 * - The routine does not block.
1037 * - At present, there is no support for reading OOB data
1038 * or for 'peeking' the socket using this routine
1039 * (although both would be easy to implement).
1041 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1042 sk_read_actor_t recv_actor)
1044 struct sk_buff *skb;
1045 struct tcp_sock *tp = tcp_sk(sk);
1046 u32 seq = tp->copied_seq;
1050 if (sk->sk_state == TCP_LISTEN)
1052 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1053 if (offset < skb->len) {
1056 len = skb->len - offset;
1057 /* Stop reading if we hit a patch of urgent data */
1059 u32 urg_offset = tp->urg_seq - seq;
1060 if (urg_offset < len)
1065 used = recv_actor(desc, skb, offset, len);
1071 if (offset != skb->len)
1074 if (skb->h.th->fin) {
1075 sk_eat_skb(sk, skb);
1079 sk_eat_skb(sk, skb);
1083 tp->copied_seq = seq;
1085 tcp_rcv_space_adjust(sk);
1087 /* Clean up data we have read: This will do ACK frames. */
1089 cleanup_rbuf(sk, copied);
1094 * This routine copies from a sock struct into the user buffer.
1096 * Technical note: in 2.3 we work on _locked_ socket, so that
1097 * tricks with *seq access order and skb->users are not required.
1098 * Probably, code can be easily improved even more.
1101 int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1102 size_t len, int nonblock, int flags, int *addr_len)
1104 struct tcp_sock *tp = tcp_sk(sk);
1110 int target; /* Read at least this many bytes */
1112 struct task_struct *user_recv = NULL;
1116 TCP_CHECK_TIMER(sk);
1119 if (sk->sk_state == TCP_LISTEN)
1122 timeo = sock_rcvtimeo(sk, nonblock);
1124 /* Urgent data needs to be handled specially. */
1125 if (flags & MSG_OOB)
1128 seq = &tp->copied_seq;
1129 if (flags & MSG_PEEK) {
1130 peek_seq = tp->copied_seq;
1134 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1137 struct sk_buff *skb;
1140 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1141 if (tp->urg_data && tp->urg_seq == *seq) {
1144 if (signal_pending(current)) {
1145 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1150 /* Next get a buffer. */
1152 skb = skb_peek(&sk->sk_receive_queue);
1157 /* Now that we have two receive queues this
1160 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1161 printk(KERN_INFO "recvmsg bug: copied %X "
1162 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1165 offset = *seq - TCP_SKB_CB(skb)->seq;
1168 if (offset < skb->len)
1172 BUG_TRAP(flags & MSG_PEEK);
1174 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1176 /* Well, if we have backlog, try to process it now yet. */
1178 if (copied >= target && !sk->sk_backlog.tail)
1183 sk->sk_state == TCP_CLOSE ||
1184 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1186 signal_pending(current) ||
1190 if (sock_flag(sk, SOCK_DONE))
1194 copied = sock_error(sk);
1198 if (sk->sk_shutdown & RCV_SHUTDOWN)
1201 if (sk->sk_state == TCP_CLOSE) {
1202 if (!sock_flag(sk, SOCK_DONE)) {
1203 /* This occurs when user tries to read
1204 * from never connected socket.
1217 if (signal_pending(current)) {
1218 copied = sock_intr_errno(timeo);
1223 cleanup_rbuf(sk, copied);
1225 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1226 /* Install new reader */
1227 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1228 user_recv = current;
1229 tp->ucopy.task = user_recv;
1230 tp->ucopy.iov = msg->msg_iov;
1233 tp->ucopy.len = len;
1235 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1236 (flags & (MSG_PEEK | MSG_TRUNC)));
1238 /* Ugly... If prequeue is not empty, we have to
1239 * process it before releasing socket, otherwise
1240 * order will be broken at second iteration.
1241 * More elegant solution is required!!!
1243 * Look: we have the following (pseudo)queues:
1245 * 1. packets in flight
1250 * Each queue can be processed only if the next ones
1251 * are empty. At this point we have empty receive_queue.
1252 * But prequeue _can_ be not empty after 2nd iteration,
1253 * when we jumped to start of loop because backlog
1254 * processing added something to receive_queue.
1255 * We cannot release_sock(), because backlog contains
1256 * packets arrived _after_ prequeued ones.
1258 * Shortly, algorithm is clear --- to process all
1259 * the queues in order. We could make it more directly,
1260 * requeueing packets from backlog to prequeue, if
1261 * is not empty. It is more elegant, but eats cycles,
1264 if (!skb_queue_empty(&tp->ucopy.prequeue))
1267 /* __ Set realtime policy in scheduler __ */
1270 if (copied >= target) {
1271 /* Do not sleep, just process backlog. */
1275 sk_wait_data(sk, &timeo);
1280 /* __ Restore normal policy in scheduler __ */
1282 if ((chunk = len - tp->ucopy.len) != 0) {
1283 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1288 if (tp->rcv_nxt == tp->copied_seq &&
1289 !skb_queue_empty(&tp->ucopy.prequeue)) {
1291 tcp_prequeue_process(sk);
1293 if ((chunk = len - tp->ucopy.len) != 0) {
1294 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1300 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1301 if (net_ratelimit())
1302 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1303 current->comm, current->pid);
1304 peek_seq = tp->copied_seq;
1309 /* Ok so how much can we use? */
1310 used = skb->len - offset;
1314 /* Do we have urgent data here? */
1316 u32 urg_offset = tp->urg_seq - *seq;
1317 if (urg_offset < used) {
1319 if (!sock_flag(sk, SOCK_URGINLINE)) {
1331 if (!(flags & MSG_TRUNC)) {
1332 err = skb_copy_datagram_iovec(skb, offset,
1333 msg->msg_iov, used);
1335 /* Exception. Bailout! */
1346 tcp_rcv_space_adjust(sk);
1349 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1351 tcp_fast_path_check(sk, tp);
1353 if (used + offset < skb->len)
1358 if (!(flags & MSG_PEEK))
1359 sk_eat_skb(sk, skb);
1363 /* Process the FIN. */
1365 if (!(flags & MSG_PEEK))
1366 sk_eat_skb(sk, skb);
1371 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1374 tp->ucopy.len = copied > 0 ? len : 0;
1376 tcp_prequeue_process(sk);
1378 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1379 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1385 tp->ucopy.task = NULL;
1389 /* According to UNIX98, msg_name/msg_namelen are ignored
1390 * on connected socket. I was just happy when found this 8) --ANK
1393 /* Clean up data we have read: This will do ACK frames. */
1394 cleanup_rbuf(sk, copied);
1396 TCP_CHECK_TIMER(sk);
1401 TCP_CHECK_TIMER(sk);
1406 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1411 * State processing on a close. This implements the state shift for
1412 * sending our FIN frame. Note that we only send a FIN for some
1413 * states. A shutdown() may have already sent the FIN, or we may be
1417 static const unsigned char new_state[16] = {
1418 /* current state: new state: action: */
1419 /* (Invalid) */ TCP_CLOSE,
1420 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1421 /* TCP_SYN_SENT */ TCP_CLOSE,
1422 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1423 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1424 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1425 /* TCP_TIME_WAIT */ TCP_CLOSE,
1426 /* TCP_CLOSE */ TCP_CLOSE,
1427 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1428 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1429 /* TCP_LISTEN */ TCP_CLOSE,
1430 /* TCP_CLOSING */ TCP_CLOSING,
1433 static int tcp_close_state(struct sock *sk)
1435 int next = (int)new_state[sk->sk_state];
1436 int ns = next & TCP_STATE_MASK;
1438 tcp_set_state(sk, ns);
1440 return next & TCP_ACTION_FIN;
1444 * Shutdown the sending side of a connection. Much like close except
1445 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1448 void tcp_shutdown(struct sock *sk, int how)
1450 /* We need to grab some memory, and put together a FIN,
1451 * and then put it into the queue to be sent.
1452 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1454 if (!(how & SEND_SHUTDOWN))
1457 /* If we've already sent a FIN, or it's a closed state, skip this. */
1458 if ((1 << sk->sk_state) &
1459 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1460 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1461 /* Clear out any half completed packets. FIN if needed. */
1462 if (tcp_close_state(sk))
1467 void tcp_close(struct sock *sk, long timeout)
1469 struct sk_buff *skb;
1470 int data_was_unread = 0;
1473 sk->sk_shutdown = SHUTDOWN_MASK;
1475 if (sk->sk_state == TCP_LISTEN) {
1476 tcp_set_state(sk, TCP_CLOSE);
1479 inet_csk_listen_stop(sk);
1481 goto adjudge_to_death;
1484 /* We need to flush the recv. buffs. We do this only on the
1485 * descriptor close, not protocol-sourced closes, because the
1486 * reader process may not have drained the data yet!
1488 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1489 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1491 data_was_unread += len;
1495 sk_stream_mem_reclaim(sk);
1497 /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
1498 * 3.10, we send a RST here because data was lost. To
1499 * witness the awful effects of the old behavior of always
1500 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
1501 * a bulk GET in an FTP client, suspend the process, wait
1502 * for the client to advertise a zero window, then kill -9
1503 * the FTP client, wheee... Note: timeout is always zero
1506 if (data_was_unread) {
1507 /* Unread data was tossed, zap the connection. */
1508 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1509 tcp_set_state(sk, TCP_CLOSE);
1510 tcp_send_active_reset(sk, GFP_KERNEL);
1511 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1512 /* Check zero linger _after_ checking for unread data. */
1513 sk->sk_prot->disconnect(sk, 0);
1514 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1515 } else if (tcp_close_state(sk)) {
1516 /* We FIN if the application ate all the data before
1517 * zapping the connection.
1520 /* RED-PEN. Formally speaking, we have broken TCP state
1521 * machine. State transitions:
1523 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1524 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1525 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1527 * are legal only when FIN has been sent (i.e. in window),
1528 * rather than queued out of window. Purists blame.
1530 * F.e. "RFC state" is ESTABLISHED,
1531 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1533 * The visible declinations are that sometimes
1534 * we enter time-wait state, when it is not required really
1535 * (harmless), do not send active resets, when they are
1536 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1537 * they look as CLOSING or LAST_ACK for Linux)
1538 * Probably, I missed some more holelets.
1544 sk_stream_wait_close(sk, timeout);
1547 /* It is the last release_sock in its life. It will remove backlog. */
1551 /* Now socket is owned by kernel and we acquire BH lock
1552 to finish close. No need to check for user refs.
1556 BUG_TRAP(!sock_owned_by_user(sk));
1561 /* This is a (useful) BSD violating of the RFC. There is a
1562 * problem with TCP as specified in that the other end could
1563 * keep a socket open forever with no application left this end.
1564 * We use a 3 minute timeout (about the same as BSD) then kill
1565 * our end. If they send after that then tough - BUT: long enough
1566 * that we won't make the old 4*rto = almost no time - whoops
1569 * Nope, it was not mistake. It is really desired behaviour
1570 * f.e. on http servers, when such sockets are useless, but
1571 * consume significant resources. Let's do it with special
1572 * linger2 option. --ANK
1575 if (sk->sk_state == TCP_FIN_WAIT2) {
1576 struct tcp_sock *tp = tcp_sk(sk);
1577 if (tp->linger2 < 0) {
1578 tcp_set_state(sk, TCP_CLOSE);
1579 tcp_send_active_reset(sk, GFP_ATOMIC);
1580 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1582 const int tmo = tcp_fin_time(sk);
1584 if (tmo > TCP_TIMEWAIT_LEN) {
1585 inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk));
1587 atomic_inc(sk->sk_prot->orphan_count);
1588 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1593 if (sk->sk_state != TCP_CLOSE) {
1594 sk_stream_mem_reclaim(sk);
1595 if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans ||
1596 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
1597 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
1598 if (net_ratelimit())
1599 printk(KERN_INFO "TCP: too many of orphaned "
1601 tcp_set_state(sk, TCP_CLOSE);
1602 tcp_send_active_reset(sk, GFP_ATOMIC);
1603 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1606 atomic_inc(sk->sk_prot->orphan_count);
1608 if (sk->sk_state == TCP_CLOSE)
1609 inet_csk_destroy_sock(sk);
1610 /* Otherwise, socket is reprieved until protocol close. */
1618 /* These states need RST on ABORT according to RFC793 */
1620 static inline int tcp_need_reset(int state)
1622 return (1 << state) &
1623 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1624 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1627 int tcp_disconnect(struct sock *sk, int flags)
1629 struct inet_sock *inet = inet_sk(sk);
1630 struct inet_connection_sock *icsk = inet_csk(sk);
1631 struct tcp_sock *tp = tcp_sk(sk);
1633 int old_state = sk->sk_state;
1635 if (old_state != TCP_CLOSE)
1636 tcp_set_state(sk, TCP_CLOSE);
1638 /* ABORT function of RFC793 */
1639 if (old_state == TCP_LISTEN) {
1640 inet_csk_listen_stop(sk);
1641 } else if (tcp_need_reset(old_state) ||
1642 (tp->snd_nxt != tp->write_seq &&
1643 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
1644 /* The last check adjusts for discrepancy of Linux wrt. RFC
1647 tcp_send_active_reset(sk, gfp_any());
1648 sk->sk_err = ECONNRESET;
1649 } else if (old_state == TCP_SYN_SENT)
1650 sk->sk_err = ECONNRESET;
1652 tcp_clear_xmit_timers(sk);
1653 __skb_queue_purge(&sk->sk_receive_queue);
1654 sk_stream_writequeue_purge(sk);
1655 __skb_queue_purge(&tp->out_of_order_queue);
1659 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1660 inet_reset_saddr(sk);
1662 sk->sk_shutdown = 0;
1663 sock_reset_flag(sk, SOCK_DONE);
1665 if ((tp->write_seq += tp->max_window + 2) == 0)
1667 icsk->icsk_backoff = 0;
1669 icsk->icsk_probes_out = 0;
1670 tp->packets_out = 0;
1671 tp->snd_ssthresh = 0x7fffffff;
1672 tp->snd_cwnd_cnt = 0;
1673 tp->bytes_acked = 0;
1674 tcp_set_ca_state(sk, TCP_CA_Open);
1675 tcp_clear_retrans(tp);
1676 inet_csk_delack_init(sk);
1677 sk->sk_send_head = NULL;
1678 tp->rx_opt.saw_tstamp = 0;
1679 tcp_sack_reset(&tp->rx_opt);
1682 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
1684 sk->sk_error_report(sk);
1689 * Socket option code for TCP.
1691 static int do_tcp_setsockopt(struct sock *sk, int level,
1692 int optname, char __user *optval, int optlen)
1694 struct tcp_sock *tp = tcp_sk(sk);
1695 struct inet_connection_sock *icsk = inet_csk(sk);
1699 /* This is a string value all the others are int's */
1700 if (optname == TCP_CONGESTION) {
1701 char name[TCP_CA_NAME_MAX];
1706 val = strncpy_from_user(name, optval,
1707 min(TCP_CA_NAME_MAX-1, optlen));
1713 err = tcp_set_congestion_control(sk, name);
1718 if (optlen < sizeof(int))
1721 if (get_user(val, (int __user *)optval))
1728 /* Values greater than interface MTU won't take effect. However
1729 * at the point when this call is done we typically don't yet
1730 * know which interface is going to be used */
1731 if (val < 8 || val > MAX_TCP_WINDOW) {
1735 tp->rx_opt.user_mss = val;
1740 /* TCP_NODELAY is weaker than TCP_CORK, so that
1741 * this option on corked socket is remembered, but
1742 * it is not activated until cork is cleared.
1744 * However, when TCP_NODELAY is set we make
1745 * an explicit push, which overrides even TCP_CORK
1746 * for currently queued segments.
1748 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
1749 tcp_push_pending_frames(sk, tp);
1751 tp->nonagle &= ~TCP_NAGLE_OFF;
1756 /* When set indicates to always queue non-full frames.
1757 * Later the user clears this option and we transmit
1758 * any pending partial frames in the queue. This is
1759 * meant to be used alongside sendfile() to get properly
1760 * filled frames when the user (for example) must write
1761 * out headers with a write() call first and then use
1762 * sendfile to send out the data parts.
1764 * TCP_CORK can be set together with TCP_NODELAY and it is
1765 * stronger than TCP_NODELAY.
1768 tp->nonagle |= TCP_NAGLE_CORK;
1770 tp->nonagle &= ~TCP_NAGLE_CORK;
1771 if (tp->nonagle&TCP_NAGLE_OFF)
1772 tp->nonagle |= TCP_NAGLE_PUSH;
1773 tcp_push_pending_frames(sk, tp);
1778 if (val < 1 || val > MAX_TCP_KEEPIDLE)
1781 tp->keepalive_time = val * HZ;
1782 if (sock_flag(sk, SOCK_KEEPOPEN) &&
1783 !((1 << sk->sk_state) &
1784 (TCPF_CLOSE | TCPF_LISTEN))) {
1785 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
1786 if (tp->keepalive_time > elapsed)
1787 elapsed = tp->keepalive_time - elapsed;
1790 inet_csk_reset_keepalive_timer(sk, elapsed);
1795 if (val < 1 || val > MAX_TCP_KEEPINTVL)
1798 tp->keepalive_intvl = val * HZ;
1801 if (val < 1 || val > MAX_TCP_KEEPCNT)
1804 tp->keepalive_probes = val;
1807 if (val < 1 || val > MAX_TCP_SYNCNT)
1810 icsk->icsk_syn_retries = val;
1816 else if (val > sysctl_tcp_fin_timeout / HZ)
1819 tp->linger2 = val * HZ;
1822 case TCP_DEFER_ACCEPT:
1823 icsk->icsk_accept_queue.rskq_defer_accept = 0;
1825 /* Translate value in seconds to number of
1827 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
1828 val > ((TCP_TIMEOUT_INIT / HZ) <<
1829 icsk->icsk_accept_queue.rskq_defer_accept))
1830 icsk->icsk_accept_queue.rskq_defer_accept++;
1831 icsk->icsk_accept_queue.rskq_defer_accept++;
1835 case TCP_WINDOW_CLAMP:
1837 if (sk->sk_state != TCP_CLOSE) {
1841 tp->window_clamp = 0;
1843 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
1844 SOCK_MIN_RCVBUF / 2 : val;
1849 icsk->icsk_ack.pingpong = 1;
1851 icsk->icsk_ack.pingpong = 0;
1852 if ((1 << sk->sk_state) &
1853 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
1854 inet_csk_ack_scheduled(sk)) {
1855 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
1856 cleanup_rbuf(sk, 1);
1858 icsk->icsk_ack.pingpong = 1;
1871 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1874 struct inet_connection_sock *icsk = inet_csk(sk);
1876 if (level != SOL_TCP)
1877 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
1879 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1882 #ifdef CONFIG_COMPAT
1883 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
1884 char __user *optval, int optlen)
1886 if (level != SOL_TCP)
1887 return inet_csk_compat_setsockopt(sk, level, optname,
1889 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1892 EXPORT_SYMBOL(compat_tcp_setsockopt);
1895 /* Return information about state of tcp endpoint in API format. */
1896 void tcp_get_info(struct sock *sk, struct tcp_info *info)
1898 struct tcp_sock *tp = tcp_sk(sk);
1899 const struct inet_connection_sock *icsk = inet_csk(sk);
1900 u32 now = tcp_time_stamp;
1902 memset(info, 0, sizeof(*info));
1904 info->tcpi_state = sk->sk_state;
1905 info->tcpi_ca_state = icsk->icsk_ca_state;
1906 info->tcpi_retransmits = icsk->icsk_retransmits;
1907 info->tcpi_probes = icsk->icsk_probes_out;
1908 info->tcpi_backoff = icsk->icsk_backoff;
1910 if (tp->rx_opt.tstamp_ok)
1911 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
1912 if (tp->rx_opt.sack_ok)
1913 info->tcpi_options |= TCPI_OPT_SACK;
1914 if (tp->rx_opt.wscale_ok) {
1915 info->tcpi_options |= TCPI_OPT_WSCALE;
1916 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
1917 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
1920 if (tp->ecn_flags&TCP_ECN_OK)
1921 info->tcpi_options |= TCPI_OPT_ECN;
1923 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
1924 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
1925 info->tcpi_snd_mss = tp->mss_cache;
1926 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
1928 info->tcpi_unacked = tp->packets_out;
1929 info->tcpi_sacked = tp->sacked_out;
1930 info->tcpi_lost = tp->lost_out;
1931 info->tcpi_retrans = tp->retrans_out;
1932 info->tcpi_fackets = tp->fackets_out;
1934 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
1935 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
1936 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
1938 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
1939 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
1940 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
1941 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
1942 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
1943 info->tcpi_snd_cwnd = tp->snd_cwnd;
1944 info->tcpi_advmss = tp->advmss;
1945 info->tcpi_reordering = tp->reordering;
1947 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
1948 info->tcpi_rcv_space = tp->rcvq_space.space;
1950 info->tcpi_total_retrans = tp->total_retrans;
1953 EXPORT_SYMBOL_GPL(tcp_get_info);
1955 static int do_tcp_getsockopt(struct sock *sk, int level,
1956 int optname, char __user *optval, int __user *optlen)
1958 struct inet_connection_sock *icsk = inet_csk(sk);
1959 struct tcp_sock *tp = tcp_sk(sk);
1962 if (get_user(len, optlen))
1965 len = min_t(unsigned int, len, sizeof(int));
1972 val = tp->mss_cache;
1973 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
1974 val = tp->rx_opt.user_mss;
1977 val = !!(tp->nonagle&TCP_NAGLE_OFF);
1980 val = !!(tp->nonagle&TCP_NAGLE_CORK);
1983 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
1986 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
1989 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
1992 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
1997 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
1999 case TCP_DEFER_ACCEPT:
2000 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2001 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
2003 case TCP_WINDOW_CLAMP:
2004 val = tp->window_clamp;
2007 struct tcp_info info;
2009 if (get_user(len, optlen))
2012 tcp_get_info(sk, &info);
2014 len = min_t(unsigned int, len, sizeof(info));
2015 if (put_user(len, optlen))
2017 if (copy_to_user(optval, &info, len))
2022 val = !icsk->icsk_ack.pingpong;
2025 case TCP_CONGESTION:
2026 if (get_user(len, optlen))
2028 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2029 if (put_user(len, optlen))
2031 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
2035 return -ENOPROTOOPT;
2038 if (put_user(len, optlen))
2040 if (copy_to_user(optval, &val, len))
2045 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2048 struct inet_connection_sock *icsk = inet_csk(sk);
2050 if (level != SOL_TCP)
2051 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2053 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2056 #ifdef CONFIG_COMPAT
2057 int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2058 char __user *optval, int __user *optlen)
2060 if (level != SOL_TCP)
2061 return inet_csk_compat_getsockopt(sk, level, optname,
2063 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2066 EXPORT_SYMBOL(compat_tcp_getsockopt);
2069 extern void __skb_cb_too_small_for_tcp(int, int);
2070 extern struct tcp_congestion_ops tcp_reno;
2072 static __initdata unsigned long thash_entries;
2073 static int __init set_thash_entries(char *str)
2077 thash_entries = simple_strtoul(str, &str, 0);
2080 __setup("thash_entries=", set_thash_entries);
2082 void __init tcp_init(void)
2084 struct sk_buff *skb = NULL;
2085 unsigned long limit;
2086 int order, i, max_share;
2088 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2089 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2092 tcp_hashinfo.bind_bucket_cachep =
2093 kmem_cache_create("tcp_bind_bucket",
2094 sizeof(struct inet_bind_bucket), 0,
2095 SLAB_HWCACHE_ALIGN, NULL, NULL);
2096 if (!tcp_hashinfo.bind_bucket_cachep)
2097 panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
2099 /* Size and allocate the main established and bind bucket
2102 * The methodology is similar to that of the buffer cache.
2104 tcp_hashinfo.ehash =
2105 alloc_large_system_hash("TCP established",
2106 sizeof(struct inet_ehash_bucket),
2108 (num_physpages >= 128 * 1024) ?
2111 &tcp_hashinfo.ehash_size,
2114 tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
2115 for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
2116 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2117 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
2120 tcp_hashinfo.bhash =
2121 alloc_large_system_hash("TCP bind",
2122 sizeof(struct inet_bind_hashbucket),
2123 tcp_hashinfo.ehash_size,
2124 (num_physpages >= 128 * 1024) ?
2127 &tcp_hashinfo.bhash_size,
2130 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2131 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2132 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2133 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
2136 /* Try to be a bit smarter and adjust defaults depending
2137 * on available memory.
2139 for (order = 0; ((1 << order) << PAGE_SHIFT) <
2140 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
2144 sysctl_local_port_range[0] = 32768;
2145 sysctl_local_port_range[1] = 61000;
2146 tcp_death_row.sysctl_max_tw_buckets = 180000;
2147 sysctl_tcp_max_orphans = 4096 << (order - 4);
2148 sysctl_max_syn_backlog = 1024;
2149 } else if (order < 3) {
2150 sysctl_local_port_range[0] = 1024 * (3 - order);
2151 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
2152 sysctl_tcp_max_orphans >>= (3 - order);
2153 sysctl_max_syn_backlog = 128;
2156 sysctl_tcp_mem[0] = 768 << order;
2157 sysctl_tcp_mem[1] = 1024 << order;
2158 sysctl_tcp_mem[2] = 1536 << order;
2160 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2161 max_share = min(4UL*1024*1024, limit);
2163 sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
2164 sysctl_tcp_wmem[1] = 16*1024;
2165 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2167 sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
2168 sysctl_tcp_rmem[1] = 87380;
2169 sysctl_tcp_rmem[2] = max(87380, max_share);
2171 printk(KERN_INFO "TCP: Hash tables configured "
2172 "(established %d bind %d)\n",
2173 tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
2175 tcp_register_congestion_control(&tcp_reno);
2178 EXPORT_SYMBOL(tcp_close);
2179 EXPORT_SYMBOL(tcp_disconnect);
2180 EXPORT_SYMBOL(tcp_getsockopt);
2181 EXPORT_SYMBOL(tcp_ioctl);
2182 EXPORT_SYMBOL(tcp_poll);
2183 EXPORT_SYMBOL(tcp_read_sock);
2184 EXPORT_SYMBOL(tcp_recvmsg);
2185 EXPORT_SYMBOL(tcp_sendmsg);
2186 EXPORT_SYMBOL(tcp_sendpage);
2187 EXPORT_SYMBOL(tcp_setsockopt);
2188 EXPORT_SYMBOL(tcp_shutdown);
2189 EXPORT_SYMBOL(tcp_statistics);