2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
21 * Alan Cox : Numerous verify_area() calls
22 * Alan Cox : Set the ACK bit on a reset
23 * Alan Cox : Stopped it crashing if it closed while
24 * sk->inuse=1 and was trying to connect
26 * Alan Cox : All icmp error handling was broken
27 * pointers passed where wrong and the
28 * socket was looked up backwards. Nobody
29 * tested any icmp error code obviously.
30 * Alan Cox : tcp_err() now handled properly. It
31 * wakes people on errors. poll
32 * behaves and the icmp error race
33 * has gone by moving it into sock.c
34 * Alan Cox : tcp_send_reset() fixed to work for
35 * everything not just packets for
37 * Alan Cox : tcp option processing.
38 * Alan Cox : Reset tweaked (still not 100%) [Had
40 * Herp Rosmanith : More reset fixes
41 * Alan Cox : No longer acks invalid rst frames.
42 * Acking any kind of RST is right out.
43 * Alan Cox : Sets an ignore me flag on an rst
44 * receive otherwise odd bits of prattle
46 * Alan Cox : Fixed another acking RST frame bug.
47 * Should stop LAN workplace lockups.
48 * Alan Cox : Some tidyups using the new skb list
50 * Alan Cox : sk->keepopen now seems to work
51 * Alan Cox : Pulls options out correctly on accepts
52 * Alan Cox : Fixed assorted sk->rqueue->next errors
53 * Alan Cox : PSH doesn't end a TCP read. Switched a
55 * Alan Cox : Tidied tcp_data to avoid a potential
57 * Alan Cox : Added some better commenting, as the
58 * tcp is hard to follow
59 * Alan Cox : Removed incorrect check for 20 * psh
60 * Michael O'Reilly : ack < copied bug fix.
61 * Johannes Stille : Misc tcp fixes (not all in yet).
62 * Alan Cox : FIN with no memory -> CRASH
63 * Alan Cox : Added socket option proto entries.
64 * Also added awareness of them to accept.
65 * Alan Cox : Added TCP options (SOL_TCP)
66 * Alan Cox : Switched wakeup calls to callbacks,
67 * so the kernel can layer network
69 * Alan Cox : Use ip_tos/ip_ttl settings.
70 * Alan Cox : Handle FIN (more) properly (we hope).
71 * Alan Cox : RST frames sent on unsynchronised
73 * Alan Cox : Put in missing check for SYN bit.
74 * Alan Cox : Added tcp_select_window() aka NET2E
75 * window non shrink trick.
76 * Alan Cox : Added a couple of small NET2E timer
78 * Charles Hedrick : TCP fixes
79 * Toomas Tamm : TCP window fixes
80 * Alan Cox : Small URG fix to rlogin ^C ack fight
81 * Charles Hedrick : Rewrote most of it to actually work
82 * Linus : Rewrote tcp_read() and URG handling
84 * Gerhard Koerting: Fixed some missing timer handling
85 * Matthew Dillon : Reworked TCP machine states as per RFC
86 * Gerhard Koerting: PC/TCP workarounds
87 * Adam Caldwell : Assorted timer/timing errors
88 * Matthew Dillon : Fixed another RST bug
89 * Alan Cox : Move to kernel side addressing changes.
90 * Alan Cox : Beginning work on TCP fastpathing
92 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
93 * Alan Cox : TCP fast path debugging
94 * Alan Cox : Window clamping
95 * Michael Riepe : Bug in tcp_check()
96 * Matt Dillon : More TCP improvements and RST bug fixes
97 * Matt Dillon : Yet more small nasties remove from the
98 * TCP code (Be very nice to this man if
99 * tcp finally works 100%) 8)
100 * Alan Cox : BSD accept semantics.
101 * Alan Cox : Reset on closedown bug.
102 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
103 * Michael Pall : Handle poll() after URG properly in
105 * Michael Pall : Undo the last fix in tcp_read_urg()
106 * (multi URG PUSH broke rlogin).
107 * Michael Pall : Fix the multi URG PUSH problem in
108 * tcp_readable(), poll() after URG
110 * Michael Pall : recv(...,MSG_OOB) never blocks in the
112 * Alan Cox : Changed the semantics of sk->socket to
113 * fix a race and a signal problem with
114 * accept() and async I/O.
115 * Alan Cox : Relaxed the rules on tcp_sendto().
116 * Yury Shevchuk : Really fixed accept() blocking problem.
117 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
118 * clients/servers which listen in on
120 * Alan Cox : Cleaned the above up and shrank it to
121 * a sensible code size.
122 * Alan Cox : Self connect lockup fix.
123 * Alan Cox : No connect to multicast.
124 * Ross Biro : Close unaccepted children on master
126 * Alan Cox : Reset tracing code.
127 * Alan Cox : Spurious resets on shutdown.
128 * Alan Cox : Giant 15 minute/60 second timer error
129 * Alan Cox : Small whoops in polling before an
131 * Alan Cox : Kept the state trace facility since
132 * it's handy for debugging.
133 * Alan Cox : More reset handler fixes.
134 * Alan Cox : Started rewriting the code based on
135 * the RFC's for other useful protocol
136 * references see: Comer, KA9Q NOS, and
137 * for a reference on the difference
138 * between specifications and how BSD
139 * works see the 4.4lite source.
140 * A.N.Kuznetsov : Don't time wait on completion of tidy
142 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
143 * Linus Torvalds : Fixed BSD port reuse to work first syn
144 * Alan Cox : Reimplemented timers as per the RFC
145 * and using multiple timers for sanity.
146 * Alan Cox : Small bug fixes, and a lot of new
148 * Alan Cox : Fixed dual reader crash by locking
149 * the buffers (much like datagram.c)
150 * Alan Cox : Fixed stuck sockets in probe. A probe
151 * now gets fed up of retrying without
152 * (even a no space) answer.
153 * Alan Cox : Extracted closing code better
154 * Alan Cox : Fixed the closing state machine to
156 * Alan Cox : More 'per spec' fixes.
157 * Jorge Cwik : Even faster checksumming.
158 * Alan Cox : tcp_data() doesn't ack illegal PSH
159 * only frames. At least one pc tcp stack
161 * Alan Cox : Cache last socket.
162 * Alan Cox : Per route irtt.
163 * Matt Day : poll()->select() match BSD precisely on error
164 * Alan Cox : New buffers
165 * Marc Tamsky : Various sk->prot->retransmits and
166 * sk->retransmits misupdating fixed.
167 * Fixed tcp_write_timeout: stuck close,
168 * and TCP syn retries gets used now.
169 * Mark Yarvis : In tcp_read_wakeup(), don't send an
170 * ack if state is TCP_CLOSED.
171 * Alan Cox : Look up device on a retransmit - routes may
172 * change. Doesn't yet cope with MSS shrink right
174 * Marc Tamsky : Closing in closing fixes.
175 * Mike Shaver : RFC1122 verifications.
176 * Alan Cox : rcv_saddr errors.
177 * Alan Cox : Block double connect().
178 * Alan Cox : Small hooks for enSKIP.
179 * Alexey Kuznetsov: Path MTU discovery.
180 * Alan Cox : Support soft errors.
181 * Alan Cox : Fix MTU discovery pathological case
182 * when the remote claims no mtu!
183 * Marc Tamsky : TCP_CLOSE fix.
184 * Colin (G3TNE) : Send a reset on syn ack replies in
185 * window but wrong (fixes NT lpd problems)
186 * Pedro Roque : Better TCP window handling, delayed ack.
187 * Joerg Reuter : No modification of locked buffers in
188 * tcp_do_retransmit()
189 * Eric Schenk : Changed receiver side silly window
190 * avoidance algorithm to BSD style
191 * algorithm. This doubles throughput
192 * against machines running Solaris,
193 * and seems to result in general
195 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
196 * Willy Konynenberg : Transparent proxying support.
197 * Mike McLagan : Routing by source
198 * Keith Owens : Do proper merging with partial SKB's in
199 * tcp_do_sendmsg to avoid burstiness.
200 * Eric Schenk : Fix fast close down bug with
201 * shutdown() followed by close().
202 * Andi Kleen : Make poll agree with SIGIO
203 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
204 * lingertime == 0 (RFC 793 ABORT Call)
205 * Hirokazu Takahashi : Use copy_from_user() instead of
206 * csum_and_copy_from_user() if possible.
208 * This program is free software; you can redistribute it and/or
209 * modify it under the terms of the GNU General Public License
210 * as published by the Free Software Foundation; either version
211 * 2 of the License, or(at your option) any later version.
213 * Description of States:
215 * TCP_SYN_SENT sent a connection request, waiting for ack
217 * TCP_SYN_RECV received a connection request, sent ack,
218 * waiting for final ack in three-way handshake.
220 * TCP_ESTABLISHED connection established
222 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
223 * transmission of remaining buffered data
225 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * TCP_CLOSING both sides have shutdown but we still have
229 * data we have to finish sending
231 * TCP_TIME_WAIT timeout to catch resent junk before entering
232 * closed, can only be entered from FIN_WAIT2
233 * or CLOSING. Required because the other end
234 * may not have gotten our last ACK causing it
235 * to retransmit the data packet (which we ignore)
237 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
238 * us to finish writing our data and to shutdown
239 * (we have to close() to move on to LAST_ACK)
241 * TCP_LAST_ACK out side has shutdown after remote has
242 * shutdown. There may still be data in our
243 * buffer that we have to finish sending
245 * TCP_CLOSE socket is finished
248 #include <linux/kernel.h>
249 #include <linux/module.h>
250 #include <linux/types.h>
251 #include <linux/fcntl.h>
252 #include <linux/poll.h>
253 #include <linux/init.h>
254 #include <linux/fs.h>
255 #include <linux/skbuff.h>
256 #include <linux/splice.h>
257 #include <linux/net.h>
258 #include <linux/socket.h>
259 #include <linux/random.h>
260 #include <linux/bootmem.h>
261 #include <linux/cache.h>
262 #include <linux/err.h>
263 #include <linux/crypto.h>
265 #include <net/icmp.h>
267 #include <net/xfrm.h>
269 #include <net/netdma.h>
270 #include <net/sock.h>
272 #include <asm/uaccess.h>
273 #include <asm/ioctls.h>
275 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
277 DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
279 atomic_t tcp_orphan_count = ATOMIC_INIT(0);
281 EXPORT_SYMBOL_GPL(tcp_orphan_count);
283 int sysctl_tcp_mem[3] __read_mostly;
284 int sysctl_tcp_wmem[3] __read_mostly;
285 int sysctl_tcp_rmem[3] __read_mostly;
287 EXPORT_SYMBOL(sysctl_tcp_mem);
288 EXPORT_SYMBOL(sysctl_tcp_rmem);
289 EXPORT_SYMBOL(sysctl_tcp_wmem);
291 atomic_t tcp_memory_allocated; /* Current allocated memory. */
292 atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
294 EXPORT_SYMBOL(tcp_memory_allocated);
295 EXPORT_SYMBOL(tcp_sockets_allocated);
300 struct tcp_splice_state {
301 struct pipe_inode_info *pipe;
307 * Pressure flag: try to collapse.
308 * Technical note: it is used by multiple contexts non atomically.
309 * All the __sk_mem_schedule() is of this nature: accounting
310 * is strict, actions are advisory and have some latency.
312 int tcp_memory_pressure __read_mostly;
314 EXPORT_SYMBOL(tcp_memory_pressure);
316 void tcp_enter_memory_pressure(void)
318 if (!tcp_memory_pressure) {
319 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
320 tcp_memory_pressure = 1;
324 EXPORT_SYMBOL(tcp_enter_memory_pressure);
327 * Wait for a TCP event.
329 * Note that we don't need to lock the socket, as the upper poll layers
330 * take care of normal races (between the test and the event) and we don't
331 * go look at any of the socket buffers directly.
333 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
336 struct sock *sk = sock->sk;
337 struct tcp_sock *tp = tcp_sk(sk);
339 poll_wait(file, sk->sk_sleep, wait);
340 if (sk->sk_state == TCP_LISTEN)
341 return inet_csk_listen_poll(sk);
343 /* Socket is not locked. We are protected from async events
344 by poll logic and correct handling of state changes
345 made by another threads is impossible in any case.
353 * POLLHUP is certainly not done right. But poll() doesn't
354 * have a notion of HUP in just one direction, and for a
355 * socket the read side is more interesting.
357 * Some poll() documentation says that POLLHUP is incompatible
358 * with the POLLOUT/POLLWR flags, so somebody should check this
359 * all. But careful, it tends to be safer to return too many
360 * bits than too few, and you can easily break real applications
361 * if you don't tell them that something has hung up!
365 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
366 * our fs/select.c). It means that after we received EOF,
367 * poll always returns immediately, making impossible poll() on write()
368 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
369 * if and only if shutdown has been made in both directions.
370 * Actually, it is interesting to look how Solaris and DUX
371 * solve this dilemma. I would prefer, if PULLHUP were maskable,
372 * then we could set it on SND_SHUTDOWN. BTW examples given
373 * in Stevens' books assume exactly this behaviour, it explains
374 * why PULLHUP is incompatible with POLLOUT. --ANK
376 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
377 * blocking on fresh not-connected or disconnected socket. --ANK
379 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
381 if (sk->sk_shutdown & RCV_SHUTDOWN)
382 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
385 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
386 /* Potential race condition. If read of tp below will
387 * escape above sk->sk_state, we can be illegally awaken
388 * in SYN_* states. */
389 if ((tp->rcv_nxt != tp->copied_seq) &&
390 (tp->urg_seq != tp->copied_seq ||
391 tp->rcv_nxt != tp->copied_seq + 1 ||
392 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
393 mask |= POLLIN | POLLRDNORM;
395 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
396 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
397 mask |= POLLOUT | POLLWRNORM;
398 } else { /* send SIGIO later */
399 set_bit(SOCK_ASYNC_NOSPACE,
400 &sk->sk_socket->flags);
401 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
403 /* Race breaker. If space is freed after
404 * wspace test but before the flags are set,
405 * IO signal will be lost.
407 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
408 mask |= POLLOUT | POLLWRNORM;
412 if (tp->urg_data & TCP_URG_VALID)
418 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
420 struct tcp_sock *tp = tcp_sk(sk);
425 if (sk->sk_state == TCP_LISTEN)
429 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
431 else if (sock_flag(sk, SOCK_URGINLINE) ||
433 before(tp->urg_seq, tp->copied_seq) ||
434 !before(tp->urg_seq, tp->rcv_nxt)) {
435 answ = tp->rcv_nxt - tp->copied_seq;
437 /* Subtract 1, if FIN is in queue. */
438 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
440 tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin;
442 answ = tp->urg_seq - tp->copied_seq;
446 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
449 if (sk->sk_state == TCP_LISTEN)
452 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
455 answ = tp->write_seq - tp->snd_una;
461 return put_user(answ, (int __user *)arg);
464 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
466 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
467 tp->pushed_seq = tp->write_seq;
470 static inline int forced_push(struct tcp_sock *tp)
472 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
475 static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
477 struct tcp_sock *tp = tcp_sk(sk);
478 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
481 tcb->seq = tcb->end_seq = tp->write_seq;
482 tcb->flags = TCPCB_FLAG_ACK;
484 skb_header_release(skb);
485 tcp_add_write_queue_tail(sk, skb);
486 sk->sk_wmem_queued += skb->truesize;
487 sk_mem_charge(sk, skb->truesize);
488 if (tp->nonagle & TCP_NAGLE_PUSH)
489 tp->nonagle &= ~TCP_NAGLE_PUSH;
492 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
495 if (flags & MSG_OOB) {
497 tp->snd_up = tp->write_seq;
501 static inline void tcp_push(struct sock *sk, int flags, int mss_now,
504 struct tcp_sock *tp = tcp_sk(sk);
506 if (tcp_send_head(sk)) {
507 struct sk_buff *skb = tcp_write_queue_tail(sk);
508 if (!(flags & MSG_MORE) || forced_push(tp))
509 tcp_mark_push(tp, skb);
510 tcp_mark_urg(tp, flags, skb);
511 __tcp_push_pending_frames(sk, mss_now,
512 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
516 static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
517 unsigned int offset, size_t len)
519 struct tcp_splice_state *tss = rd_desc->arg.data;
521 return skb_splice_bits(skb, offset, tss->pipe, tss->len, tss->flags);
524 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
526 /* Store TCP splice context information in read_descriptor_t. */
527 read_descriptor_t rd_desc = {
531 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
535 * tcp_splice_read - splice data from TCP socket to a pipe
536 * @sock: socket to splice from
537 * @ppos: position (not valid)
538 * @pipe: pipe to splice to
539 * @len: number of bytes to splice
540 * @flags: splice modifier flags
543 * Will read pages from given socket and fill them into a pipe.
546 ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
547 struct pipe_inode_info *pipe, size_t len,
550 struct sock *sk = sock->sk;
551 struct tcp_splice_state tss = {
561 * We can't seek on a socket input
570 timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
572 ret = __tcp_splice_read(sk, &tss);
578 if (flags & SPLICE_F_NONBLOCK) {
582 if (sock_flag(sk, SOCK_DONE))
585 ret = sock_error(sk);
588 if (sk->sk_shutdown & RCV_SHUTDOWN)
590 if (sk->sk_state == TCP_CLOSE) {
592 * This occurs when user tries to read
593 * from never connected socket.
595 if (!sock_flag(sk, SOCK_DONE))
603 sk_wait_data(sk, &timeo);
604 if (signal_pending(current)) {
605 ret = sock_intr_errno(timeo);
616 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
617 (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo ||
618 signal_pending(current))
630 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
634 /* The TCP header must be at least 32-bit aligned. */
635 size = ALIGN(size, 4);
637 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
639 if (sk_wmem_schedule(sk, skb->truesize)) {
641 * Make sure that we have exactly size bytes
642 * available to the caller, no more, no less.
644 skb_reserve(skb, skb_tailroom(skb) - size);
649 sk->sk_prot->enter_memory_pressure();
650 sk_stream_moderate_sndbuf(sk);
655 static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
656 size_t psize, int flags)
658 struct tcp_sock *tp = tcp_sk(sk);
659 int mss_now, size_goal;
662 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
664 /* Wait for a connection to finish. */
665 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
666 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
669 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
671 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
672 size_goal = tp->xmit_size_goal;
676 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
680 struct sk_buff *skb = tcp_write_queue_tail(sk);
681 struct page *page = pages[poffset / PAGE_SIZE];
682 int copy, i, can_coalesce;
683 int offset = poffset % PAGE_SIZE;
684 int size = min_t(size_t, psize, PAGE_SIZE - offset);
686 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
688 if (!sk_stream_memory_free(sk))
689 goto wait_for_sndbuf;
691 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
693 goto wait_for_memory;
702 i = skb_shinfo(skb)->nr_frags;
703 can_coalesce = skb_can_coalesce(skb, i, page, offset);
704 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
705 tcp_mark_push(tp, skb);
708 if (!sk_wmem_schedule(sk, copy))
709 goto wait_for_memory;
712 skb_shinfo(skb)->frags[i - 1].size += copy;
715 skb_fill_page_desc(skb, i, page, offset, copy);
719 skb->data_len += copy;
720 skb->truesize += copy;
721 sk->sk_wmem_queued += copy;
722 sk_mem_charge(sk, copy);
723 skb->ip_summed = CHECKSUM_PARTIAL;
724 tp->write_seq += copy;
725 TCP_SKB_CB(skb)->end_seq += copy;
726 skb_shinfo(skb)->gso_segs = 0;
729 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
733 if (!(psize -= copy))
736 if (skb->len < size_goal || (flags & MSG_OOB))
739 if (forced_push(tp)) {
740 tcp_mark_push(tp, skb);
741 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
742 } else if (skb == tcp_send_head(sk))
743 tcp_push_one(sk, mss_now);
747 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
750 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
752 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
755 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
756 size_goal = tp->xmit_size_goal;
761 tcp_push(sk, flags, mss_now, tp->nonagle);
768 return sk_stream_error(sk, flags, err);
771 ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
772 size_t size, int flags)
775 struct sock *sk = sock->sk;
777 if (!(sk->sk_route_caps & NETIF_F_SG) ||
778 !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
779 return sock_no_sendpage(sock, page, offset, size, flags);
783 res = do_tcp_sendpages(sk, &page, offset, size, flags);
789 #define TCP_PAGE(sk) (sk->sk_sndmsg_page)
790 #define TCP_OFF(sk) (sk->sk_sndmsg_off)
792 static inline int select_size(struct sock *sk)
794 struct tcp_sock *tp = tcp_sk(sk);
795 int tmp = tp->mss_cache;
797 if (sk->sk_route_caps & NETIF_F_SG) {
801 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
803 if (tmp >= pgbreak &&
804 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
812 int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
815 struct sock *sk = sock->sk;
817 struct tcp_sock *tp = tcp_sk(sk);
820 int mss_now, size_goal;
827 flags = msg->msg_flags;
828 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
830 /* Wait for a connection to finish. */
831 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
832 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
835 /* This should be in poll */
836 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
838 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
839 size_goal = tp->xmit_size_goal;
841 /* Ok commence sending. */
842 iovlen = msg->msg_iovlen;
847 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
850 while (--iovlen >= 0) {
851 int seglen = iov->iov_len;
852 unsigned char __user *from = iov->iov_base;
859 skb = tcp_write_queue_tail(sk);
861 if (!tcp_send_head(sk) ||
862 (copy = size_goal - skb->len) <= 0) {
865 /* Allocate new segment. If the interface is SG,
866 * allocate skb fitting to single page.
868 if (!sk_stream_memory_free(sk))
869 goto wait_for_sndbuf;
871 skb = sk_stream_alloc_skb(sk, select_size(sk),
874 goto wait_for_memory;
877 * Check whether we can use HW checksum.
879 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
880 skb->ip_summed = CHECKSUM_PARTIAL;
886 /* Try to append data to the end of skb. */
890 /* Where to copy to? */
891 if (skb_tailroom(skb) > 0) {
892 /* We have some space in skb head. Superb! */
893 if (copy > skb_tailroom(skb))
894 copy = skb_tailroom(skb);
895 if ((err = skb_add_data(skb, from, copy)) != 0)
899 int i = skb_shinfo(skb)->nr_frags;
900 struct page *page = TCP_PAGE(sk);
901 int off = TCP_OFF(sk);
903 if (skb_can_coalesce(skb, i, page, off) &&
905 /* We can extend the last page
908 } else if (i == MAX_SKB_FRAGS ||
910 !(sk->sk_route_caps & NETIF_F_SG))) {
911 /* Need to add new fragment and cannot
912 * do this because interface is non-SG,
913 * or because all the page slots are
915 tcp_mark_push(tp, skb);
918 if (off == PAGE_SIZE) {
920 TCP_PAGE(sk) = page = NULL;
926 if (copy > PAGE_SIZE - off)
927 copy = PAGE_SIZE - off;
929 if (!sk_wmem_schedule(sk, copy))
930 goto wait_for_memory;
933 /* Allocate new cache page. */
934 if (!(page = sk_stream_alloc_page(sk)))
935 goto wait_for_memory;
938 /* Time to copy data. We are close to
940 err = skb_copy_to_page(sk, from, skb, page,
943 /* If this page was new, give it to the
944 * socket so it does not get leaked.
953 /* Update the skb. */
955 skb_shinfo(skb)->frags[i - 1].size +=
958 skb_fill_page_desc(skb, i, page, off, copy);
961 } else if (off + copy < PAGE_SIZE) {
967 TCP_OFF(sk) = off + copy;
971 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
973 tp->write_seq += copy;
974 TCP_SKB_CB(skb)->end_seq += copy;
975 skb_shinfo(skb)->gso_segs = 0;
979 if ((seglen -= copy) == 0 && iovlen == 0)
982 if (skb->len < size_goal || (flags & MSG_OOB))
985 if (forced_push(tp)) {
986 tcp_mark_push(tp, skb);
987 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
988 } else if (skb == tcp_send_head(sk))
989 tcp_push_one(sk, mss_now);
993 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
996 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
998 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
1001 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
1002 size_goal = tp->xmit_size_goal;
1008 tcp_push(sk, flags, mss_now, tp->nonagle);
1009 TCP_CHECK_TIMER(sk);
1015 tcp_unlink_write_queue(skb, sk);
1016 /* It is the one place in all of TCP, except connection
1017 * reset, where we can be unlinking the send_head.
1019 tcp_check_send_head(sk, skb);
1020 sk_wmem_free_skb(sk, skb);
1027 err = sk_stream_error(sk, flags, err);
1028 TCP_CHECK_TIMER(sk);
1034 * Handle reading urgent data. BSD has very simple semantics for
1035 * this, no blocking and very strange errors 8)
1038 static int tcp_recv_urg(struct sock *sk, long timeo,
1039 struct msghdr *msg, int len, int flags,
1042 struct tcp_sock *tp = tcp_sk(sk);
1044 /* No URG data to read. */
1045 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1046 tp->urg_data == TCP_URG_READ)
1047 return -EINVAL; /* Yes this is right ! */
1049 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1052 if (tp->urg_data & TCP_URG_VALID) {
1054 char c = tp->urg_data;
1056 if (!(flags & MSG_PEEK))
1057 tp->urg_data = TCP_URG_READ;
1059 /* Read urgent data. */
1060 msg->msg_flags |= MSG_OOB;
1063 if (!(flags & MSG_TRUNC))
1064 err = memcpy_toiovec(msg->msg_iov, &c, 1);
1067 msg->msg_flags |= MSG_TRUNC;
1069 return err ? -EFAULT : len;
1072 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1075 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1076 * the available implementations agree in this case:
1077 * this call should never block, independent of the
1078 * blocking state of the socket.
1079 * Mike <pall@rz.uni-karlsruhe.de>
1084 /* Clean up the receive buffer for full frames taken by the user,
1085 * then send an ACK if necessary. COPIED is the number of bytes
1086 * tcp_recvmsg has given to the user so far, it speeds up the
1087 * calculation of whether or not we must ACK for the sake of
1090 void tcp_cleanup_rbuf(struct sock *sk, int copied)
1092 struct tcp_sock *tp = tcp_sk(sk);
1093 int time_to_ack = 0;
1096 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1098 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
1101 if (inet_csk_ack_scheduled(sk)) {
1102 const struct inet_connection_sock *icsk = inet_csk(sk);
1103 /* Delayed ACKs frequently hit locked sockets during bulk
1105 if (icsk->icsk_ack.blocked ||
1106 /* Once-per-two-segments ACK was not sent by tcp_input.c */
1107 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1109 * If this read emptied read buffer, we send ACK, if
1110 * connection is not bidirectional, user drained
1111 * receive buffer and there was a small segment
1115 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1116 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1117 !icsk->icsk_ack.pingpong)) &&
1118 !atomic_read(&sk->sk_rmem_alloc)))
1122 /* We send an ACK if we can now advertise a non-zero window
1123 * which has been raised "significantly".
1125 * Even if window raised up to infinity, do not send window open ACK
1126 * in states, where we will not receive more. It is useless.
1128 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1129 __u32 rcv_window_now = tcp_receive_window(tp);
1131 /* Optimize, __tcp_select_window() is not cheap. */
1132 if (2*rcv_window_now <= tp->window_clamp) {
1133 __u32 new_window = __tcp_select_window(sk);
1135 /* Send ACK now, if this read freed lots of space
1136 * in our buffer. Certainly, new_window is new window.
1137 * We can advertise it now, if it is not less than current one.
1138 * "Lots" means "at least twice" here.
1140 if (new_window && new_window >= 2 * rcv_window_now)
1148 static void tcp_prequeue_process(struct sock *sk)
1150 struct sk_buff *skb;
1151 struct tcp_sock *tp = tcp_sk(sk);
1153 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
1155 /* RX process wants to run with disabled BHs, though it is not
1158 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1159 sk->sk_backlog_rcv(sk, skb);
1162 /* Clear memory counter. */
1163 tp->ucopy.memory = 0;
1166 static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1168 struct sk_buff *skb;
1171 skb_queue_walk(&sk->sk_receive_queue, skb) {
1172 offset = seq - TCP_SKB_CB(skb)->seq;
1173 if (tcp_hdr(skb)->syn)
1175 if (offset < skb->len || tcp_hdr(skb)->fin) {
1184 * This routine provides an alternative to tcp_recvmsg() for routines
1185 * that would like to handle copying from skbuffs directly in 'sendfile'
1188 * - It is assumed that the socket was locked by the caller.
1189 * - The routine does not block.
1190 * - At present, there is no support for reading OOB data
1191 * or for 'peeking' the socket using this routine
1192 * (although both would be easy to implement).
1194 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1195 sk_read_actor_t recv_actor)
1197 struct sk_buff *skb;
1198 struct tcp_sock *tp = tcp_sk(sk);
1199 u32 seq = tp->copied_seq;
1203 if (sk->sk_state == TCP_LISTEN)
1205 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1206 if (offset < skb->len) {
1209 len = skb->len - offset;
1210 /* Stop reading if we hit a patch of urgent data */
1212 u32 urg_offset = tp->urg_seq - seq;
1213 if (urg_offset < len)
1218 used = recv_actor(desc, skb, offset, len);
1223 } else if (used <= len) {
1229 * If recv_actor drops the lock (e.g. TCP splice
1230 * receive) the skb pointer might be invalid when
1231 * getting here: tcp_collapse might have deleted it
1232 * while aggregating skbs from the socket queue.
1234 skb = tcp_recv_skb(sk, seq-1, &offset);
1235 if (!skb || (offset+1 != skb->len))
1238 if (tcp_hdr(skb)->fin) {
1239 sk_eat_skb(sk, skb, 0);
1243 sk_eat_skb(sk, skb, 0);
1247 tp->copied_seq = seq;
1249 tcp_rcv_space_adjust(sk);
1251 /* Clean up data we have read: This will do ACK frames. */
1253 tcp_cleanup_rbuf(sk, copied);
1258 * This routine copies from a sock struct into the user buffer.
1260 * Technical note: in 2.3 we work on _locked_ socket, so that
1261 * tricks with *seq access order and skb->users are not required.
1262 * Probably, code can be easily improved even more.
1265 int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1266 size_t len, int nonblock, int flags, int *addr_len)
1268 struct tcp_sock *tp = tcp_sk(sk);
1274 int target; /* Read at least this many bytes */
1276 struct task_struct *user_recv = NULL;
1277 int copied_early = 0;
1278 struct sk_buff *skb;
1282 TCP_CHECK_TIMER(sk);
1285 if (sk->sk_state == TCP_LISTEN)
1288 timeo = sock_rcvtimeo(sk, nonblock);
1290 /* Urgent data needs to be handled specially. */
1291 if (flags & MSG_OOB)
1294 seq = &tp->copied_seq;
1295 if (flags & MSG_PEEK) {
1296 peek_seq = tp->copied_seq;
1300 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1302 #ifdef CONFIG_NET_DMA
1303 tp->ucopy.dma_chan = NULL;
1305 skb = skb_peek_tail(&sk->sk_receive_queue);
1310 available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1311 if ((available < target) &&
1312 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1313 !sysctl_tcp_low_latency &&
1314 __get_cpu_var(softnet_data).net_dma) {
1315 preempt_enable_no_resched();
1316 tp->ucopy.pinned_list =
1317 dma_pin_iovec_pages(msg->msg_iov, len);
1319 preempt_enable_no_resched();
1327 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1328 if (tp->urg_data && tp->urg_seq == *seq) {
1331 if (signal_pending(current)) {
1332 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1337 /* Next get a buffer. */
1339 skb = skb_peek(&sk->sk_receive_queue);
1344 /* Now that we have two receive queues this
1347 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1348 printk(KERN_INFO "recvmsg bug: copied %X "
1349 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1352 offset = *seq - TCP_SKB_CB(skb)->seq;
1353 if (tcp_hdr(skb)->syn)
1355 if (offset < skb->len)
1357 if (tcp_hdr(skb)->fin)
1359 BUG_TRAP(flags & MSG_PEEK);
1361 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1363 /* Well, if we have backlog, try to process it now yet. */
1365 if (copied >= target && !sk->sk_backlog.tail)
1370 sk->sk_state == TCP_CLOSE ||
1371 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1373 signal_pending(current) ||
1377 if (sock_flag(sk, SOCK_DONE))
1381 copied = sock_error(sk);
1385 if (sk->sk_shutdown & RCV_SHUTDOWN)
1388 if (sk->sk_state == TCP_CLOSE) {
1389 if (!sock_flag(sk, SOCK_DONE)) {
1390 /* This occurs when user tries to read
1391 * from never connected socket.
1404 if (signal_pending(current)) {
1405 copied = sock_intr_errno(timeo);
1410 tcp_cleanup_rbuf(sk, copied);
1412 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1413 /* Install new reader */
1414 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1415 user_recv = current;
1416 tp->ucopy.task = user_recv;
1417 tp->ucopy.iov = msg->msg_iov;
1420 tp->ucopy.len = len;
1422 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1423 (flags & (MSG_PEEK | MSG_TRUNC)));
1425 /* Ugly... If prequeue is not empty, we have to
1426 * process it before releasing socket, otherwise
1427 * order will be broken at second iteration.
1428 * More elegant solution is required!!!
1430 * Look: we have the following (pseudo)queues:
1432 * 1. packets in flight
1437 * Each queue can be processed only if the next ones
1438 * are empty. At this point we have empty receive_queue.
1439 * But prequeue _can_ be not empty after 2nd iteration,
1440 * when we jumped to start of loop because backlog
1441 * processing added something to receive_queue.
1442 * We cannot release_sock(), because backlog contains
1443 * packets arrived _after_ prequeued ones.
1445 * Shortly, algorithm is clear --- to process all
1446 * the queues in order. We could make it more directly,
1447 * requeueing packets from backlog to prequeue, if
1448 * is not empty. It is more elegant, but eats cycles,
1451 if (!skb_queue_empty(&tp->ucopy.prequeue))
1454 /* __ Set realtime policy in scheduler __ */
1457 if (copied >= target) {
1458 /* Do not sleep, just process backlog. */
1462 sk_wait_data(sk, &timeo);
1464 #ifdef CONFIG_NET_DMA
1465 tp->ucopy.wakeup = 0;
1471 /* __ Restore normal policy in scheduler __ */
1473 if ((chunk = len - tp->ucopy.len) != 0) {
1474 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1479 if (tp->rcv_nxt == tp->copied_seq &&
1480 !skb_queue_empty(&tp->ucopy.prequeue)) {
1482 tcp_prequeue_process(sk);
1484 if ((chunk = len - tp->ucopy.len) != 0) {
1485 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1491 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1492 if (net_ratelimit())
1493 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1494 current->comm, task_pid_nr(current));
1495 peek_seq = tp->copied_seq;
1500 /* Ok so how much can we use? */
1501 used = skb->len - offset;
1505 /* Do we have urgent data here? */
1507 u32 urg_offset = tp->urg_seq - *seq;
1508 if (urg_offset < used) {
1510 if (!sock_flag(sk, SOCK_URGINLINE)) {
1522 if (!(flags & MSG_TRUNC)) {
1523 #ifdef CONFIG_NET_DMA
1524 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1525 tp->ucopy.dma_chan = get_softnet_dma();
1527 if (tp->ucopy.dma_chan) {
1528 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1529 tp->ucopy.dma_chan, skb, offset,
1531 tp->ucopy.pinned_list);
1533 if (tp->ucopy.dma_cookie < 0) {
1535 printk(KERN_ALERT "dma_cookie < 0\n");
1537 /* Exception. Bailout! */
1542 if ((offset + used) == skb->len)
1548 err = skb_copy_datagram_iovec(skb, offset,
1549 msg->msg_iov, used);
1551 /* Exception. Bailout! */
1563 tcp_rcv_space_adjust(sk);
1566 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1568 tcp_fast_path_check(sk);
1570 if (used + offset < skb->len)
1573 if (tcp_hdr(skb)->fin)
1575 if (!(flags & MSG_PEEK)) {
1576 sk_eat_skb(sk, skb, copied_early);
1582 /* Process the FIN. */
1584 if (!(flags & MSG_PEEK)) {
1585 sk_eat_skb(sk, skb, copied_early);
1592 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1595 tp->ucopy.len = copied > 0 ? len : 0;
1597 tcp_prequeue_process(sk);
1599 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1600 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1606 tp->ucopy.task = NULL;
1610 #ifdef CONFIG_NET_DMA
1611 if (tp->ucopy.dma_chan) {
1612 dma_cookie_t done, used;
1614 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1616 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1617 tp->ucopy.dma_cookie, &done,
1618 &used) == DMA_IN_PROGRESS) {
1619 /* do partial cleanup of sk_async_wait_queue */
1620 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1621 (dma_async_is_complete(skb->dma_cookie, done,
1622 used) == DMA_SUCCESS)) {
1623 __skb_dequeue(&sk->sk_async_wait_queue);
1628 /* Safe to free early-copied skbs now */
1629 __skb_queue_purge(&sk->sk_async_wait_queue);
1630 dma_chan_put(tp->ucopy.dma_chan);
1631 tp->ucopy.dma_chan = NULL;
1633 if (tp->ucopy.pinned_list) {
1634 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1635 tp->ucopy.pinned_list = NULL;
1639 /* According to UNIX98, msg_name/msg_namelen are ignored
1640 * on connected socket. I was just happy when found this 8) --ANK
1643 /* Clean up data we have read: This will do ACK frames. */
1644 tcp_cleanup_rbuf(sk, copied);
1646 TCP_CHECK_TIMER(sk);
1651 TCP_CHECK_TIMER(sk);
1656 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1660 void tcp_set_state(struct sock *sk, int state)
1662 int oldstate = sk->sk_state;
1665 case TCP_ESTABLISHED:
1666 if (oldstate != TCP_ESTABLISHED)
1667 TCP_INC_STATS(TCP_MIB_CURRESTAB);
1671 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
1672 TCP_INC_STATS(TCP_MIB_ESTABRESETS);
1674 sk->sk_prot->unhash(sk);
1675 if (inet_csk(sk)->icsk_bind_hash &&
1676 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
1680 if (oldstate==TCP_ESTABLISHED)
1681 TCP_DEC_STATS(TCP_MIB_CURRESTAB);
1684 /* Change state AFTER socket is unhashed to avoid closed
1685 * socket sitting in hash tables.
1687 sk->sk_state = state;
1690 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
1693 EXPORT_SYMBOL_GPL(tcp_set_state);
1696 * State processing on a close. This implements the state shift for
1697 * sending our FIN frame. Note that we only send a FIN for some
1698 * states. A shutdown() may have already sent the FIN, or we may be
1702 static const unsigned char new_state[16] = {
1703 /* current state: new state: action: */
1704 /* (Invalid) */ TCP_CLOSE,
1705 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1706 /* TCP_SYN_SENT */ TCP_CLOSE,
1707 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1708 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1709 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1710 /* TCP_TIME_WAIT */ TCP_CLOSE,
1711 /* TCP_CLOSE */ TCP_CLOSE,
1712 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1713 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1714 /* TCP_LISTEN */ TCP_CLOSE,
1715 /* TCP_CLOSING */ TCP_CLOSING,
1718 static int tcp_close_state(struct sock *sk)
1720 int next = (int)new_state[sk->sk_state];
1721 int ns = next & TCP_STATE_MASK;
1723 tcp_set_state(sk, ns);
1725 return next & TCP_ACTION_FIN;
1729 * Shutdown the sending side of a connection. Much like close except
1730 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
1733 void tcp_shutdown(struct sock *sk, int how)
1735 /* We need to grab some memory, and put together a FIN,
1736 * and then put it into the queue to be sent.
1737 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1739 if (!(how & SEND_SHUTDOWN))
1742 /* If we've already sent a FIN, or it's a closed state, skip this. */
1743 if ((1 << sk->sk_state) &
1744 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1745 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1746 /* Clear out any half completed packets. FIN if needed. */
1747 if (tcp_close_state(sk))
1752 void tcp_close(struct sock *sk, long timeout)
1754 struct sk_buff *skb;
1755 int data_was_unread = 0;
1759 sk->sk_shutdown = SHUTDOWN_MASK;
1761 if (sk->sk_state == TCP_LISTEN) {
1762 tcp_set_state(sk, TCP_CLOSE);
1765 inet_csk_listen_stop(sk);
1767 goto adjudge_to_death;
1770 /* We need to flush the recv. buffs. We do this only on the
1771 * descriptor close, not protocol-sourced closes, because the
1772 * reader process may not have drained the data yet!
1774 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1775 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1777 data_was_unread += len;
1783 /* As outlined in RFC 2525, section 2.17, we send a RST here because
1784 * data was lost. To witness the awful effects of the old behavior of
1785 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
1786 * GET in an FTP client, suspend the process, wait for the client to
1787 * advertise a zero window, then kill -9 the FTP client, wheee...
1788 * Note: timeout is always zero in such a case.
1790 if (data_was_unread) {
1791 /* Unread data was tossed, zap the connection. */
1792 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1793 tcp_set_state(sk, TCP_CLOSE);
1794 tcp_send_active_reset(sk, GFP_KERNEL);
1795 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1796 /* Check zero linger _after_ checking for unread data. */
1797 sk->sk_prot->disconnect(sk, 0);
1798 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1799 } else if (tcp_close_state(sk)) {
1800 /* We FIN if the application ate all the data before
1801 * zapping the connection.
1804 /* RED-PEN. Formally speaking, we have broken TCP state
1805 * machine. State transitions:
1807 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1808 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1809 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1811 * are legal only when FIN has been sent (i.e. in window),
1812 * rather than queued out of window. Purists blame.
1814 * F.e. "RFC state" is ESTABLISHED,
1815 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1817 * The visible declinations are that sometimes
1818 * we enter time-wait state, when it is not required really
1819 * (harmless), do not send active resets, when they are
1820 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1821 * they look as CLOSING or LAST_ACK for Linux)
1822 * Probably, I missed some more holelets.
1828 sk_stream_wait_close(sk, timeout);
1831 state = sk->sk_state;
1834 atomic_inc(sk->sk_prot->orphan_count);
1836 /* It is the last release_sock in its life. It will remove backlog. */
1840 /* Now socket is owned by kernel and we acquire BH lock
1841 to finish close. No need to check for user refs.
1845 BUG_TRAP(!sock_owned_by_user(sk));
1847 /* Have we already been destroyed by a softirq or backlog? */
1848 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1851 /* This is a (useful) BSD violating of the RFC. There is a
1852 * problem with TCP as specified in that the other end could
1853 * keep a socket open forever with no application left this end.
1854 * We use a 3 minute timeout (about the same as BSD) then kill
1855 * our end. If they send after that then tough - BUT: long enough
1856 * that we won't make the old 4*rto = almost no time - whoops
1859 * Nope, it was not mistake. It is really desired behaviour
1860 * f.e. on http servers, when such sockets are useless, but
1861 * consume significant resources. Let's do it with special
1862 * linger2 option. --ANK
1865 if (sk->sk_state == TCP_FIN_WAIT2) {
1866 struct tcp_sock *tp = tcp_sk(sk);
1867 if (tp->linger2 < 0) {
1868 tcp_set_state(sk, TCP_CLOSE);
1869 tcp_send_active_reset(sk, GFP_ATOMIC);
1870 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1872 const int tmo = tcp_fin_time(sk);
1874 if (tmo > TCP_TIMEWAIT_LEN) {
1875 inet_csk_reset_keepalive_timer(sk,
1876 tmo - TCP_TIMEWAIT_LEN);
1878 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1883 if (sk->sk_state != TCP_CLOSE) {
1885 if (tcp_too_many_orphans(sk,
1886 atomic_read(sk->sk_prot->orphan_count))) {
1887 if (net_ratelimit())
1888 printk(KERN_INFO "TCP: too many of orphaned "
1890 tcp_set_state(sk, TCP_CLOSE);
1891 tcp_send_active_reset(sk, GFP_ATOMIC);
1892 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1896 if (sk->sk_state == TCP_CLOSE)
1897 inet_csk_destroy_sock(sk);
1898 /* Otherwise, socket is reprieved until protocol close. */
1906 /* These states need RST on ABORT according to RFC793 */
1908 static inline int tcp_need_reset(int state)
1910 return (1 << state) &
1911 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1912 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1915 int tcp_disconnect(struct sock *sk, int flags)
1917 struct inet_sock *inet = inet_sk(sk);
1918 struct inet_connection_sock *icsk = inet_csk(sk);
1919 struct tcp_sock *tp = tcp_sk(sk);
1921 int old_state = sk->sk_state;
1923 if (old_state != TCP_CLOSE)
1924 tcp_set_state(sk, TCP_CLOSE);
1926 /* ABORT function of RFC793 */
1927 if (old_state == TCP_LISTEN) {
1928 inet_csk_listen_stop(sk);
1929 } else if (tcp_need_reset(old_state) ||
1930 (tp->snd_nxt != tp->write_seq &&
1931 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
1932 /* The last check adjusts for discrepancy of Linux wrt. RFC
1935 tcp_send_active_reset(sk, gfp_any());
1936 sk->sk_err = ECONNRESET;
1937 } else if (old_state == TCP_SYN_SENT)
1938 sk->sk_err = ECONNRESET;
1940 tcp_clear_xmit_timers(sk);
1941 __skb_queue_purge(&sk->sk_receive_queue);
1942 tcp_write_queue_purge(sk);
1943 __skb_queue_purge(&tp->out_of_order_queue);
1944 #ifdef CONFIG_NET_DMA
1945 __skb_queue_purge(&sk->sk_async_wait_queue);
1950 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1951 inet_reset_saddr(sk);
1953 sk->sk_shutdown = 0;
1954 sock_reset_flag(sk, SOCK_DONE);
1956 if ((tp->write_seq += tp->max_window + 2) == 0)
1958 icsk->icsk_backoff = 0;
1960 icsk->icsk_probes_out = 0;
1961 tp->packets_out = 0;
1962 tp->snd_ssthresh = 0x7fffffff;
1963 tp->snd_cwnd_cnt = 0;
1964 tp->bytes_acked = 0;
1965 tcp_set_ca_state(sk, TCP_CA_Open);
1966 tcp_clear_retrans(tp);
1967 inet_csk_delack_init(sk);
1968 tcp_init_send_head(sk);
1969 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
1972 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
1974 sk->sk_error_report(sk);
1979 * Socket option code for TCP.
1981 static int do_tcp_setsockopt(struct sock *sk, int level,
1982 int optname, char __user *optval, int optlen)
1984 struct tcp_sock *tp = tcp_sk(sk);
1985 struct inet_connection_sock *icsk = inet_csk(sk);
1989 /* This is a string value all the others are int's */
1990 if (optname == TCP_CONGESTION) {
1991 char name[TCP_CA_NAME_MAX];
1996 val = strncpy_from_user(name, optval,
1997 min(TCP_CA_NAME_MAX-1, optlen));
2003 err = tcp_set_congestion_control(sk, name);
2008 if (optlen < sizeof(int))
2011 if (get_user(val, (int __user *)optval))
2018 /* Values greater than interface MTU won't take effect. However
2019 * at the point when this call is done we typically don't yet
2020 * know which interface is going to be used */
2021 if (val < 8 || val > MAX_TCP_WINDOW) {
2025 tp->rx_opt.user_mss = val;
2030 /* TCP_NODELAY is weaker than TCP_CORK, so that
2031 * this option on corked socket is remembered, but
2032 * it is not activated until cork is cleared.
2034 * However, when TCP_NODELAY is set we make
2035 * an explicit push, which overrides even TCP_CORK
2036 * for currently queued segments.
2038 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
2039 tcp_push_pending_frames(sk);
2041 tp->nonagle &= ~TCP_NAGLE_OFF;
2046 /* When set indicates to always queue non-full frames.
2047 * Later the user clears this option and we transmit
2048 * any pending partial frames in the queue. This is
2049 * meant to be used alongside sendfile() to get properly
2050 * filled frames when the user (for example) must write
2051 * out headers with a write() call first and then use
2052 * sendfile to send out the data parts.
2054 * TCP_CORK can be set together with TCP_NODELAY and it is
2055 * stronger than TCP_NODELAY.
2058 tp->nonagle |= TCP_NAGLE_CORK;
2060 tp->nonagle &= ~TCP_NAGLE_CORK;
2061 if (tp->nonagle&TCP_NAGLE_OFF)
2062 tp->nonagle |= TCP_NAGLE_PUSH;
2063 tcp_push_pending_frames(sk);
2068 if (val < 1 || val > MAX_TCP_KEEPIDLE)
2071 tp->keepalive_time = val * HZ;
2072 if (sock_flag(sk, SOCK_KEEPOPEN) &&
2073 !((1 << sk->sk_state) &
2074 (TCPF_CLOSE | TCPF_LISTEN))) {
2075 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
2076 if (tp->keepalive_time > elapsed)
2077 elapsed = tp->keepalive_time - elapsed;
2080 inet_csk_reset_keepalive_timer(sk, elapsed);
2085 if (val < 1 || val > MAX_TCP_KEEPINTVL)
2088 tp->keepalive_intvl = val * HZ;
2091 if (val < 1 || val > MAX_TCP_KEEPCNT)
2094 tp->keepalive_probes = val;
2097 if (val < 1 || val > MAX_TCP_SYNCNT)
2100 icsk->icsk_syn_retries = val;
2106 else if (val > sysctl_tcp_fin_timeout / HZ)
2109 tp->linger2 = val * HZ;
2112 case TCP_DEFER_ACCEPT:
2113 icsk->icsk_accept_queue.rskq_defer_accept = 0;
2115 /* Translate value in seconds to number of
2117 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
2118 val > ((TCP_TIMEOUT_INIT / HZ) <<
2119 icsk->icsk_accept_queue.rskq_defer_accept))
2120 icsk->icsk_accept_queue.rskq_defer_accept++;
2121 icsk->icsk_accept_queue.rskq_defer_accept++;
2125 case TCP_WINDOW_CLAMP:
2127 if (sk->sk_state != TCP_CLOSE) {
2131 tp->window_clamp = 0;
2133 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2134 SOCK_MIN_RCVBUF / 2 : val;
2139 icsk->icsk_ack.pingpong = 1;
2141 icsk->icsk_ack.pingpong = 0;
2142 if ((1 << sk->sk_state) &
2143 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2144 inet_csk_ack_scheduled(sk)) {
2145 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
2146 tcp_cleanup_rbuf(sk, 1);
2148 icsk->icsk_ack.pingpong = 1;
2153 #ifdef CONFIG_TCP_MD5SIG
2155 /* Read the IP->Key mappings from userspace */
2156 err = tp->af_specific->md5_parse(sk, optval, optlen);
2169 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2172 struct inet_connection_sock *icsk = inet_csk(sk);
2174 if (level != SOL_TCP)
2175 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
2177 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2180 #ifdef CONFIG_COMPAT
2181 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2182 char __user *optval, int optlen)
2184 if (level != SOL_TCP)
2185 return inet_csk_compat_setsockopt(sk, level, optname,
2187 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2190 EXPORT_SYMBOL(compat_tcp_setsockopt);
2193 /* Return information about state of tcp endpoint in API format. */
2194 void tcp_get_info(struct sock *sk, struct tcp_info *info)
2196 struct tcp_sock *tp = tcp_sk(sk);
2197 const struct inet_connection_sock *icsk = inet_csk(sk);
2198 u32 now = tcp_time_stamp;
2200 memset(info, 0, sizeof(*info));
2202 info->tcpi_state = sk->sk_state;
2203 info->tcpi_ca_state = icsk->icsk_ca_state;
2204 info->tcpi_retransmits = icsk->icsk_retransmits;
2205 info->tcpi_probes = icsk->icsk_probes_out;
2206 info->tcpi_backoff = icsk->icsk_backoff;
2208 if (tp->rx_opt.tstamp_ok)
2209 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2210 if (tcp_is_sack(tp))
2211 info->tcpi_options |= TCPI_OPT_SACK;
2212 if (tp->rx_opt.wscale_ok) {
2213 info->tcpi_options |= TCPI_OPT_WSCALE;
2214 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2215 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2218 if (tp->ecn_flags&TCP_ECN_OK)
2219 info->tcpi_options |= TCPI_OPT_ECN;
2221 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2222 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2223 info->tcpi_snd_mss = tp->mss_cache;
2224 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2226 if (sk->sk_state == TCP_LISTEN) {
2227 info->tcpi_unacked = sk->sk_ack_backlog;
2228 info->tcpi_sacked = sk->sk_max_ack_backlog;
2230 info->tcpi_unacked = tp->packets_out;
2231 info->tcpi_sacked = tp->sacked_out;
2233 info->tcpi_lost = tp->lost_out;
2234 info->tcpi_retrans = tp->retrans_out;
2235 info->tcpi_fackets = tp->fackets_out;
2237 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2238 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
2239 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2241 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
2242 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2243 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2244 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2245 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2246 info->tcpi_snd_cwnd = tp->snd_cwnd;
2247 info->tcpi_advmss = tp->advmss;
2248 info->tcpi_reordering = tp->reordering;
2250 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2251 info->tcpi_rcv_space = tp->rcvq_space.space;
2253 info->tcpi_total_retrans = tp->total_retrans;
2256 EXPORT_SYMBOL_GPL(tcp_get_info);
2258 static int do_tcp_getsockopt(struct sock *sk, int level,
2259 int optname, char __user *optval, int __user *optlen)
2261 struct inet_connection_sock *icsk = inet_csk(sk);
2262 struct tcp_sock *tp = tcp_sk(sk);
2265 if (get_user(len, optlen))
2268 len = min_t(unsigned int, len, sizeof(int));
2275 val = tp->mss_cache;
2276 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2277 val = tp->rx_opt.user_mss;
2280 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2283 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2286 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2289 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2292 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2295 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
2300 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2302 case TCP_DEFER_ACCEPT:
2303 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2304 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
2306 case TCP_WINDOW_CLAMP:
2307 val = tp->window_clamp;
2310 struct tcp_info info;
2312 if (get_user(len, optlen))
2315 tcp_get_info(sk, &info);
2317 len = min_t(unsigned int, len, sizeof(info));
2318 if (put_user(len, optlen))
2320 if (copy_to_user(optval, &info, len))
2325 val = !icsk->icsk_ack.pingpong;
2328 case TCP_CONGESTION:
2329 if (get_user(len, optlen))
2331 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2332 if (put_user(len, optlen))
2334 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
2338 return -ENOPROTOOPT;
2341 if (put_user(len, optlen))
2343 if (copy_to_user(optval, &val, len))
2348 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2351 struct inet_connection_sock *icsk = inet_csk(sk);
2353 if (level != SOL_TCP)
2354 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2356 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2359 #ifdef CONFIG_COMPAT
2360 int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2361 char __user *optval, int __user *optlen)
2363 if (level != SOL_TCP)
2364 return inet_csk_compat_getsockopt(sk, level, optname,
2366 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2369 EXPORT_SYMBOL(compat_tcp_getsockopt);
2372 struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2374 struct sk_buff *segs = ERR_PTR(-EINVAL);
2379 unsigned int oldlen;
2382 if (!pskb_may_pull(skb, sizeof(*th)))
2386 thlen = th->doff * 4;
2387 if (thlen < sizeof(*th))
2390 if (!pskb_may_pull(skb, thlen))
2393 oldlen = (u16)~skb->len;
2394 __skb_pull(skb, thlen);
2396 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2397 /* Packet is from an untrusted source, reset gso_segs. */
2398 int type = skb_shinfo(skb)->gso_type;
2407 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2410 mss = skb_shinfo(skb)->gso_size;
2411 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
2417 segs = skb_segment(skb, features);
2421 len = skb_shinfo(skb)->gso_size;
2422 delta = htonl(oldlen + (thlen + len));
2426 seq = ntohl(th->seq);
2429 th->fin = th->psh = 0;
2431 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2432 (__force u32)delta));
2433 if (skb->ip_summed != CHECKSUM_PARTIAL)
2435 csum_fold(csum_partial(skb_transport_header(skb),
2442 th->seq = htonl(seq);
2444 } while (skb->next);
2446 delta = htonl(oldlen + (skb->tail - skb->transport_header) +
2448 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2449 (__force u32)delta));
2450 if (skb->ip_summed != CHECKSUM_PARTIAL)
2451 th->check = csum_fold(csum_partial(skb_transport_header(skb),
2457 EXPORT_SYMBOL(tcp_tso_segment);
2459 #ifdef CONFIG_TCP_MD5SIG
2460 static unsigned long tcp_md5sig_users;
2461 static struct tcp_md5sig_pool **tcp_md5sig_pool;
2462 static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2464 int tcp_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
2466 struct tcphdr *th, unsigned int tcplen,
2467 struct tcp_md5sig_pool *hp)
2469 struct scatterlist sg[4];
2473 struct hash_desc *desc = &hp->md5_desc;
2475 unsigned int nbytes = 0;
2477 sg_init_table(sg, 4);
2479 /* 1. The TCP pseudo-header */
2480 sg_set_buf(&sg[block++], &hp->md5_blk, bplen);
2483 /* 2. The TCP header, excluding options, and assuming a
2488 sg_set_buf(&sg[block++], th, sizeof(*th));
2489 nbytes += sizeof(*th);
2491 /* 3. The TCP segment data (if any) */
2492 data_len = tcplen - (th->doff << 2);
2494 u8 *data = (u8 *)th + (th->doff << 2);
2495 sg_set_buf(&sg[block++], data, data_len);
2499 /* 4. an independently-specified key or password, known to both
2500 * TCPs and presumably connection-specific
2502 sg_set_buf(&sg[block++], key->key, key->keylen);
2503 nbytes += key->keylen;
2505 sg_mark_end(&sg[block - 1]);
2507 /* Now store the hash into the packet */
2508 err = crypto_hash_init(desc);
2510 if (net_ratelimit())
2511 printk(KERN_WARNING "%s(): hash_init failed\n", __func__);
2514 err = crypto_hash_update(desc, sg, nbytes);
2516 if (net_ratelimit())
2517 printk(KERN_WARNING "%s(): hash_update failed\n", __func__);
2520 err = crypto_hash_final(desc, md5_hash);
2522 if (net_ratelimit())
2523 printk(KERN_WARNING "%s(): hash_final failed\n", __func__);
2532 EXPORT_SYMBOL(tcp_calc_md5_hash);
2534 static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2537 for_each_possible_cpu(cpu) {
2538 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2540 if (p->md5_desc.tfm)
2541 crypto_free_hash(p->md5_desc.tfm);
2549 void tcp_free_md5sig_pool(void)
2551 struct tcp_md5sig_pool **pool = NULL;
2553 spin_lock_bh(&tcp_md5sig_pool_lock);
2554 if (--tcp_md5sig_users == 0) {
2555 pool = tcp_md5sig_pool;
2556 tcp_md5sig_pool = NULL;
2558 spin_unlock_bh(&tcp_md5sig_pool_lock);
2560 __tcp_free_md5sig_pool(pool);
2563 EXPORT_SYMBOL(tcp_free_md5sig_pool);
2565 static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
2568 struct tcp_md5sig_pool **pool;
2570 pool = alloc_percpu(struct tcp_md5sig_pool *);
2574 for_each_possible_cpu(cpu) {
2575 struct tcp_md5sig_pool *p;
2576 struct crypto_hash *hash;
2578 p = kzalloc(sizeof(*p), GFP_KERNEL);
2581 *per_cpu_ptr(pool, cpu) = p;
2583 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2584 if (!hash || IS_ERR(hash))
2587 p->md5_desc.tfm = hash;
2591 __tcp_free_md5sig_pool(pool);
2595 struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
2597 struct tcp_md5sig_pool **pool;
2601 spin_lock_bh(&tcp_md5sig_pool_lock);
2602 pool = tcp_md5sig_pool;
2603 if (tcp_md5sig_users++ == 0) {
2605 spin_unlock_bh(&tcp_md5sig_pool_lock);
2608 spin_unlock_bh(&tcp_md5sig_pool_lock);
2612 spin_unlock_bh(&tcp_md5sig_pool_lock);
2615 /* we cannot hold spinlock here because this may sleep. */
2616 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
2617 spin_lock_bh(&tcp_md5sig_pool_lock);
2620 spin_unlock_bh(&tcp_md5sig_pool_lock);
2623 pool = tcp_md5sig_pool;
2625 /* oops, it has already been assigned. */
2626 spin_unlock_bh(&tcp_md5sig_pool_lock);
2627 __tcp_free_md5sig_pool(p);
2629 tcp_md5sig_pool = pool = p;
2630 spin_unlock_bh(&tcp_md5sig_pool_lock);
2636 EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2638 struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2640 struct tcp_md5sig_pool **p;
2641 spin_lock_bh(&tcp_md5sig_pool_lock);
2642 p = tcp_md5sig_pool;
2645 spin_unlock_bh(&tcp_md5sig_pool_lock);
2646 return (p ? *per_cpu_ptr(p, cpu) : NULL);
2649 EXPORT_SYMBOL(__tcp_get_md5sig_pool);
2651 void __tcp_put_md5sig_pool(void)
2653 tcp_free_md5sig_pool();
2656 EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2659 void tcp_done(struct sock *sk)
2661 if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
2662 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
2664 tcp_set_state(sk, TCP_CLOSE);
2665 tcp_clear_xmit_timers(sk);
2667 sk->sk_shutdown = SHUTDOWN_MASK;
2669 if (!sock_flag(sk, SOCK_DEAD))
2670 sk->sk_state_change(sk);
2672 inet_csk_destroy_sock(sk);
2674 EXPORT_SYMBOL_GPL(tcp_done);
2676 extern struct tcp_congestion_ops tcp_reno;
2678 static __initdata unsigned long thash_entries;
2679 static int __init set_thash_entries(char *str)
2683 thash_entries = simple_strtoul(str, &str, 0);
2686 __setup("thash_entries=", set_thash_entries);
2688 void __init tcp_init(void)
2690 struct sk_buff *skb = NULL;
2691 unsigned long limit;
2692 int order, i, max_share;
2694 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
2696 tcp_hashinfo.bind_bucket_cachep =
2697 kmem_cache_create("tcp_bind_bucket",
2698 sizeof(struct inet_bind_bucket), 0,
2699 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2701 /* Size and allocate the main established and bind bucket
2704 * The methodology is similar to that of the buffer cache.
2706 tcp_hashinfo.ehash =
2707 alloc_large_system_hash("TCP established",
2708 sizeof(struct inet_ehash_bucket),
2710 (num_physpages >= 128 * 1024) ?
2713 &tcp_hashinfo.ehash_size,
2715 thash_entries ? 0 : 512 * 1024);
2716 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
2717 for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
2718 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
2719 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
2721 if (inet_ehash_locks_alloc(&tcp_hashinfo))
2722 panic("TCP: failed to alloc ehash_locks");
2723 tcp_hashinfo.bhash =
2724 alloc_large_system_hash("TCP bind",
2725 sizeof(struct inet_bind_hashbucket),
2726 tcp_hashinfo.ehash_size,
2727 (num_physpages >= 128 * 1024) ?
2730 &tcp_hashinfo.bhash_size,
2733 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2734 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2735 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2736 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
2739 /* Try to be a bit smarter and adjust defaults depending
2740 * on available memory.
2742 for (order = 0; ((1 << order) << PAGE_SHIFT) <
2743 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
2747 tcp_death_row.sysctl_max_tw_buckets = 180000;
2748 sysctl_tcp_max_orphans = 4096 << (order - 4);
2749 sysctl_max_syn_backlog = 1024;
2750 } else if (order < 3) {
2751 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
2752 sysctl_tcp_max_orphans >>= (3 - order);
2753 sysctl_max_syn_backlog = 128;
2756 /* Set the pressure threshold to be a fraction of global memory that
2757 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
2758 * memory, with a floor of 128 pages.
2760 limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
2761 limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
2762 limit = max(limit, 128UL);
2763 sysctl_tcp_mem[0] = limit / 4 * 3;
2764 sysctl_tcp_mem[1] = limit;
2765 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
2767 /* Set per-socket limits to no more than 1/128 the pressure threshold */
2768 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2769 max_share = min(4UL*1024*1024, limit);
2771 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
2772 sysctl_tcp_wmem[1] = 16*1024;
2773 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2775 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
2776 sysctl_tcp_rmem[1] = 87380;
2777 sysctl_tcp_rmem[2] = max(87380, max_share);
2779 printk(KERN_INFO "TCP: Hash tables configured "
2780 "(established %d bind %d)\n",
2781 tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size);
2783 tcp_register_congestion_control(&tcp_reno);
2786 EXPORT_SYMBOL(tcp_close);
2787 EXPORT_SYMBOL(tcp_disconnect);
2788 EXPORT_SYMBOL(tcp_getsockopt);
2789 EXPORT_SYMBOL(tcp_ioctl);
2790 EXPORT_SYMBOL(tcp_poll);
2791 EXPORT_SYMBOL(tcp_read_sock);
2792 EXPORT_SYMBOL(tcp_recvmsg);
2793 EXPORT_SYMBOL(tcp_sendmsg);
2794 EXPORT_SYMBOL(tcp_splice_read);
2795 EXPORT_SYMBOL(tcp_sendpage);
2796 EXPORT_SYMBOL(tcp_setsockopt);
2797 EXPORT_SYMBOL(tcp_shutdown);
2798 EXPORT_SYMBOL(tcp_statistics);