2 * linux/net/sunrpc/svcsock.c
4 * These are the RPC server socket internals.
6 * The server scheduling algorithm does not always distribute the load
7 * evenly when servicing a single client. May need to modify the
8 * svc_sock_enqueue procedure...
10 * TCP support is largely untested and may be a little slow. The problem
11 * is that we currently do two separate recvfrom's, one for the 4-byte
12 * record length, and the second for the actual record. This could possibly
13 * be improved by always reading a minimum size of around 100 bytes and
14 * tucking any superfluous bytes away in a temporary store. Still, that
15 * leaves write requests out in the rain. An alternative may be to peek at
16 * the first skb in the queue, and if it matches the next TCP sequence
17 * number, to extract the record marker. Yuck.
19 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/errno.h>
25 #include <linux/fcntl.h>
26 #include <linux/net.h>
28 #include <linux/inet.h>
29 #include <linux/udp.h>
30 #include <linux/tcp.h>
31 #include <linux/unistd.h>
32 #include <linux/slab.h>
33 #include <linux/netdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/file.h>
36 #include <linux/freezer.h>
38 #include <net/checksum.h>
41 #include <net/tcp_states.h>
42 #include <asm/uaccess.h>
43 #include <asm/ioctls.h>
45 #include <linux/sunrpc/types.h>
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/sunrpc/xdr.h>
48 #include <linux/sunrpc/svcsock.h>
49 #include <linux/sunrpc/stats.h>
51 /* SMP locking strategy:
53 * svc_pool->sp_lock protects most of the fields of that pool.
54 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
55 * when both need to be taken (rare), svc_serv->sv_lock is first.
56 * BKL protects svc_serv->sv_nrthread.
57 * svc_sock->sk_lock protects the svc_sock->sk_deferred list
58 * and the ->sk_info_authunix cache.
59 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply.
61 * Some flags can be set to certain values at any time
62 * providing that certain rules are followed:
64 * SK_CONN, SK_DATA, can be set or cleared at any time.
65 * after a set, svc_sock_enqueue must be called.
66 * after a clear, the socket must be read/accepted
67 * if this succeeds, it must be set again.
68 * SK_CLOSE can set at any time. It is never cleared.
69 * sk_inuse contains a bias of '1' until SK_DEAD is set.
70 * so when sk_inuse hits zero, we know the socket is dead
71 * and no-one is using it.
72 * SK_DEAD can only be set while SK_BUSY is held which ensures
73 * no other thread will be using the socket or will try to
78 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
81 static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
82 int *errp, int flags);
83 static void svc_delete_socket(struct svc_sock *svsk);
84 static void svc_udp_data_ready(struct sock *, int);
85 static int svc_udp_recvfrom(struct svc_rqst *);
86 static int svc_udp_sendto(struct svc_rqst *);
87 static void svc_close_socket(struct svc_sock *svsk);
89 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk);
90 static int svc_deferred_recv(struct svc_rqst *rqstp);
91 static struct cache_deferred_req *svc_defer(struct cache_req *req);
93 /* apparently the "standard" is that clients close
94 * idle connections after 5 minutes, servers after
96 * http://www.connectathon.org/talks96/nfstcp.pdf
98 static int svc_conn_age_period = 6*60;
100 #ifdef CONFIG_DEBUG_LOCK_ALLOC
101 static struct lock_class_key svc_key[2];
102 static struct lock_class_key svc_slock_key[2];
104 static inline void svc_reclassify_socket(struct socket *sock)
106 struct sock *sk = sock->sk;
107 BUG_ON(sock_owned_by_user(sk));
108 switch (sk->sk_family) {
110 sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
111 &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]);
115 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD",
116 &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]);
124 static inline void svc_reclassify_socket(struct socket *sock)
129 static char *__svc_print_addr(struct sockaddr *addr, char *buf, size_t len)
131 switch (addr->sa_family) {
133 snprintf(buf, len, "%u.%u.%u.%u, port=%u",
134 NIPQUAD(((struct sockaddr_in *) addr)->sin_addr),
135 ntohs(((struct sockaddr_in *) addr)->sin_port));
139 snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u",
140 NIP6(((struct sockaddr_in6 *) addr)->sin6_addr),
141 ntohs(((struct sockaddr_in6 *) addr)->sin6_port));
145 snprintf(buf, len, "unknown address type: %d", addr->sa_family);
152 * svc_print_addr - Format rq_addr field for printing
153 * @rqstp: svc_rqst struct containing address to print
154 * @buf: target buffer for formatted address
155 * @len: length of target buffer
158 char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
160 return __svc_print_addr(svc_addr(rqstp), buf, len);
162 EXPORT_SYMBOL_GPL(svc_print_addr);
165 * Queue up an idle server thread. Must have pool->sp_lock held.
166 * Note: this is really a stack rather than a queue, so that we only
167 * use as many different threads as we need, and the rest don't pollute
171 svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp)
173 list_add(&rqstp->rq_list, &pool->sp_threads);
177 * Dequeue an nfsd thread. Must have pool->sp_lock held.
180 svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
182 list_del(&rqstp->rq_list);
186 * Release an skbuff after use
189 svc_release_skb(struct svc_rqst *rqstp)
191 struct sk_buff *skb = rqstp->rq_skbuff;
192 struct svc_deferred_req *dr = rqstp->rq_deferred;
195 rqstp->rq_skbuff = NULL;
197 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
198 skb_free_datagram(rqstp->rq_sock->sk_sk, skb);
201 rqstp->rq_deferred = NULL;
207 * Any space to write?
209 static inline unsigned long
210 svc_sock_wspace(struct svc_sock *svsk)
214 if (svsk->sk_sock->type == SOCK_STREAM)
215 wspace = sk_stream_wspace(svsk->sk_sk);
217 wspace = sock_wspace(svsk->sk_sk);
223 * Queue up a socket with data pending. If there are idle nfsd
224 * processes, wake 'em up.
228 svc_sock_enqueue(struct svc_sock *svsk)
230 struct svc_serv *serv = svsk->sk_server;
231 struct svc_pool *pool;
232 struct svc_rqst *rqstp;
235 if (!(svsk->sk_flags &
236 ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) ))
238 if (test_bit(SK_DEAD, &svsk->sk_flags))
242 pool = svc_pool_for_cpu(svsk->sk_server, cpu);
245 spin_lock_bh(&pool->sp_lock);
247 if (!list_empty(&pool->sp_threads) &&
248 !list_empty(&pool->sp_sockets))
250 "svc_sock_enqueue: threads and sockets both waiting??\n");
252 if (test_bit(SK_DEAD, &svsk->sk_flags)) {
253 /* Don't enqueue dead sockets */
254 dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk);
258 /* Mark socket as busy. It will remain in this state until the
259 * server has processed all pending data and put the socket back
260 * on the idle list. We update SK_BUSY atomically because
261 * it also guards against trying to enqueue the svc_sock twice.
263 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) {
264 /* Don't enqueue socket while already enqueued */
265 dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk);
268 BUG_ON(svsk->sk_pool != NULL);
269 svsk->sk_pool = pool;
271 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
272 if (((atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg)*2
273 > svc_sock_wspace(svsk))
274 && !test_bit(SK_CLOSE, &svsk->sk_flags)
275 && !test_bit(SK_CONN, &svsk->sk_flags)) {
276 /* Don't enqueue while not enough space for reply */
277 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n",
278 svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_max_mesg,
279 svc_sock_wspace(svsk));
280 svsk->sk_pool = NULL;
281 clear_bit(SK_BUSY, &svsk->sk_flags);
284 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
287 if (!list_empty(&pool->sp_threads)) {
288 rqstp = list_entry(pool->sp_threads.next,
291 dprintk("svc: socket %p served by daemon %p\n",
293 svc_thread_dequeue(pool, rqstp);
296 "svc_sock_enqueue: server %p, rq_sock=%p!\n",
297 rqstp, rqstp->rq_sock);
298 rqstp->rq_sock = svsk;
299 atomic_inc(&svsk->sk_inuse);
300 rqstp->rq_reserved = serv->sv_max_mesg;
301 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
302 BUG_ON(svsk->sk_pool != pool);
303 wake_up(&rqstp->rq_wait);
305 dprintk("svc: socket %p put into queue\n", svsk->sk_sk);
306 list_add_tail(&svsk->sk_ready, &pool->sp_sockets);
307 BUG_ON(svsk->sk_pool != pool);
311 spin_unlock_bh(&pool->sp_lock);
315 * Dequeue the first socket. Must be called with the pool->sp_lock held.
317 static inline struct svc_sock *
318 svc_sock_dequeue(struct svc_pool *pool)
320 struct svc_sock *svsk;
322 if (list_empty(&pool->sp_sockets))
325 svsk = list_entry(pool->sp_sockets.next,
326 struct svc_sock, sk_ready);
327 list_del_init(&svsk->sk_ready);
329 dprintk("svc: socket %p dequeued, inuse=%d\n",
330 svsk->sk_sk, atomic_read(&svsk->sk_inuse));
336 * Having read something from a socket, check whether it
337 * needs to be re-enqueued.
338 * Note: SK_DATA only gets cleared when a read-attempt finds
339 * no (or insufficient) data.
342 svc_sock_received(struct svc_sock *svsk)
344 svsk->sk_pool = NULL;
345 clear_bit(SK_BUSY, &svsk->sk_flags);
346 svc_sock_enqueue(svsk);
351 * svc_reserve - change the space reserved for the reply to a request.
352 * @rqstp: The request in question
353 * @space: new max space to reserve
355 * Each request reserves some space on the output queue of the socket
356 * to make sure the reply fits. This function reduces that reserved
357 * space to be the amount of space used already, plus @space.
360 void svc_reserve(struct svc_rqst *rqstp, int space)
362 space += rqstp->rq_res.head[0].iov_len;
364 if (space < rqstp->rq_reserved) {
365 struct svc_sock *svsk = rqstp->rq_sock;
366 atomic_sub((rqstp->rq_reserved - space), &svsk->sk_reserved);
367 rqstp->rq_reserved = space;
369 svc_sock_enqueue(svsk);
374 * Release a socket after use.
377 svc_sock_put(struct svc_sock *svsk)
379 if (atomic_dec_and_test(&svsk->sk_inuse)) {
380 BUG_ON(! test_bit(SK_DEAD, &svsk->sk_flags));
382 dprintk("svc: releasing dead socket\n");
383 if (svsk->sk_sock->file)
384 sockfd_put(svsk->sk_sock);
386 sock_release(svsk->sk_sock);
387 if (svsk->sk_info_authunix != NULL)
388 svcauth_unix_info_release(svsk->sk_info_authunix);
394 svc_sock_release(struct svc_rqst *rqstp)
396 struct svc_sock *svsk = rqstp->rq_sock;
398 svc_release_skb(rqstp);
400 svc_free_res_pages(rqstp);
401 rqstp->rq_res.page_len = 0;
402 rqstp->rq_res.page_base = 0;
405 /* Reset response buffer and release
407 * But first, check that enough space was reserved
408 * for the reply, otherwise we have a bug!
410 if ((rqstp->rq_res.len) > rqstp->rq_reserved)
411 printk(KERN_ERR "RPC request reserved %d but used %d\n",
415 rqstp->rq_res.head[0].iov_len = 0;
416 svc_reserve(rqstp, 0);
417 rqstp->rq_sock = NULL;
423 * External function to wake up a server waiting for data
424 * This really only makes sense for services like lockd
425 * which have exactly one thread anyway.
428 svc_wake_up(struct svc_serv *serv)
430 struct svc_rqst *rqstp;
432 struct svc_pool *pool;
434 for (i = 0; i < serv->sv_nrpools; i++) {
435 pool = &serv->sv_pools[i];
437 spin_lock_bh(&pool->sp_lock);
438 if (!list_empty(&pool->sp_threads)) {
439 rqstp = list_entry(pool->sp_threads.next,
442 dprintk("svc: daemon %p woken up.\n", rqstp);
444 svc_thread_dequeue(pool, rqstp);
445 rqstp->rq_sock = NULL;
447 wake_up(&rqstp->rq_wait);
449 spin_unlock_bh(&pool->sp_lock);
453 union svc_pktinfo_u {
454 struct in_pktinfo pkti;
455 struct in6_pktinfo pkti6;
457 #define SVC_PKTINFO_SPACE \
458 CMSG_SPACE(sizeof(union svc_pktinfo_u))
460 static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
462 switch (rqstp->rq_sock->sk_sk->sk_family) {
464 struct in_pktinfo *pki = CMSG_DATA(cmh);
466 cmh->cmsg_level = SOL_IP;
467 cmh->cmsg_type = IP_PKTINFO;
468 pki->ipi_ifindex = 0;
469 pki->ipi_spec_dst.s_addr = rqstp->rq_daddr.addr.s_addr;
470 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
475 struct in6_pktinfo *pki = CMSG_DATA(cmh);
477 cmh->cmsg_level = SOL_IPV6;
478 cmh->cmsg_type = IPV6_PKTINFO;
479 pki->ipi6_ifindex = 0;
480 ipv6_addr_copy(&pki->ipi6_addr,
481 &rqstp->rq_daddr.addr6);
482 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
490 * Generic sendto routine
493 svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
495 struct svc_sock *svsk = rqstp->rq_sock;
496 struct socket *sock = svsk->sk_sock;
500 long all[SVC_PKTINFO_SPACE / sizeof(long)];
502 struct cmsghdr *cmh = &buffer.hdr;
506 struct page **ppage = xdr->pages;
507 size_t base = xdr->page_base;
508 unsigned int pglen = xdr->page_len;
509 unsigned int flags = MSG_MORE;
510 char buf[RPC_MAX_ADDRBUFLEN];
514 if (rqstp->rq_prot == IPPROTO_UDP) {
515 struct msghdr msg = {
516 .msg_name = &rqstp->rq_addr,
517 .msg_namelen = rqstp->rq_addrlen,
519 .msg_controllen = sizeof(buffer),
520 .msg_flags = MSG_MORE,
523 svc_set_cmsg_data(rqstp, cmh);
525 if (sock_sendmsg(sock, &msg, 0) < 0)
530 if (slen == xdr->head[0].iov_len)
532 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0,
533 xdr->head[0].iov_len, flags);
534 if (len != xdr->head[0].iov_len)
536 slen -= xdr->head[0].iov_len;
541 size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen;
545 result = kernel_sendpage(sock, *ppage, base, size, flags);
552 size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen;
557 if (xdr->tail[0].iov_len) {
558 result = kernel_sendpage(sock, rqstp->rq_respages[0],
559 ((unsigned long)xdr->tail[0].iov_base)
561 xdr->tail[0].iov_len, 0);
567 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n",
568 rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len,
569 xdr->len, len, svc_print_addr(rqstp, buf, sizeof(buf)));
575 * Report socket names for nfsdfs
577 static int one_sock_name(char *buf, struct svc_sock *svsk)
581 switch(svsk->sk_sk->sk_family) {
583 len = sprintf(buf, "ipv4 %s %u.%u.%u.%u %d\n",
584 svsk->sk_sk->sk_protocol==IPPROTO_UDP?
586 NIPQUAD(inet_sk(svsk->sk_sk)->rcv_saddr),
587 inet_sk(svsk->sk_sk)->num);
590 len = sprintf(buf, "*unknown-%d*\n",
591 svsk->sk_sk->sk_family);
597 svc_sock_names(char *buf, struct svc_serv *serv, char *toclose)
599 struct svc_sock *svsk, *closesk = NULL;
604 spin_lock_bh(&serv->sv_lock);
605 list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) {
606 int onelen = one_sock_name(buf+len, svsk);
607 if (toclose && strcmp(toclose, buf+len) == 0)
612 spin_unlock_bh(&serv->sv_lock);
614 /* Should unregister with portmap, but you cannot
615 * unregister just one protocol...
617 svc_close_socket(closesk);
622 EXPORT_SYMBOL(svc_sock_names);
625 * Check input queue length
628 svc_recv_available(struct svc_sock *svsk)
630 struct socket *sock = svsk->sk_sock;
633 err = kernel_sock_ioctl(sock, TIOCINQ, (unsigned long) &avail);
635 return (err >= 0)? avail : err;
639 * Generic recvfrom routine.
642 svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen)
644 struct svc_sock *svsk = rqstp->rq_sock;
645 struct msghdr msg = {
646 .msg_flags = MSG_DONTWAIT,
648 struct sockaddr *sin;
651 len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen,
654 /* sock_recvmsg doesn't fill in the name/namelen, so we must..
656 memcpy(&rqstp->rq_addr, &svsk->sk_remote, svsk->sk_remotelen);
657 rqstp->rq_addrlen = svsk->sk_remotelen;
659 /* Destination address in request is needed for binding the
660 * source address in RPC callbacks later.
662 sin = (struct sockaddr *)&svsk->sk_local;
663 switch (sin->sa_family) {
665 rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr;
668 rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr;
672 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n",
673 svsk, iov[0].iov_base, iov[0].iov_len, len);
679 * Set socket snd and rcv buffer lengths
682 svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv)
686 oldfs = get_fs(); set_fs(KERNEL_DS);
687 sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
688 (char*)&snd, sizeof(snd));
689 sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
690 (char*)&rcv, sizeof(rcv));
692 /* sock_setsockopt limits use to sysctl_?mem_max,
693 * which isn't acceptable. Until that is made conditional
694 * on not having CAP_SYS_RESOURCE or similar, we go direct...
695 * DaveM said I could!
698 sock->sk->sk_sndbuf = snd * 2;
699 sock->sk->sk_rcvbuf = rcv * 2;
700 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
701 release_sock(sock->sk);
705 * INET callback when data has been received on the socket.
708 svc_udp_data_ready(struct sock *sk, int count)
710 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
713 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
714 svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags));
715 set_bit(SK_DATA, &svsk->sk_flags);
716 svc_sock_enqueue(svsk);
718 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
719 wake_up_interruptible(sk->sk_sleep);
723 * INET callback when space is newly available on the socket.
726 svc_write_space(struct sock *sk)
728 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
731 dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
732 svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags));
733 svc_sock_enqueue(svsk);
736 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) {
737 dprintk("RPC svc_write_space: someone sleeping on %p\n",
739 wake_up_interruptible(sk->sk_sleep);
743 static inline void svc_udp_get_dest_address(struct svc_rqst *rqstp,
746 switch (rqstp->rq_sock->sk_sk->sk_family) {
748 struct in_pktinfo *pki = CMSG_DATA(cmh);
749 rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr;
753 struct in6_pktinfo *pki = CMSG_DATA(cmh);
754 ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr);
761 * Receive a datagram from a UDP socket.
764 svc_udp_recvfrom(struct svc_rqst *rqstp)
766 struct svc_sock *svsk = rqstp->rq_sock;
767 struct svc_serv *serv = svsk->sk_server;
771 long all[SVC_PKTINFO_SPACE / sizeof(long)];
773 struct cmsghdr *cmh = &buffer.hdr;
775 struct msghdr msg = {
776 .msg_name = svc_addr(rqstp),
778 .msg_controllen = sizeof(buffer),
779 .msg_flags = MSG_DONTWAIT,
782 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
783 /* udp sockets need large rcvbuf as all pending
784 * requests are still in that buffer. sndbuf must
785 * also be large enough that there is enough space
786 * for one reply per thread. We count all threads
787 * rather than threads in a particular pool, which
788 * provides an upper bound on the number of threads
789 * which will access the socket.
791 svc_sock_setbufsize(svsk->sk_sock,
792 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
793 (serv->sv_nrthreads+3) * serv->sv_max_mesg);
795 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
796 svc_sock_received(svsk);
797 return svc_deferred_recv(rqstp);
800 if (test_bit(SK_CLOSE, &svsk->sk_flags)) {
801 svc_delete_socket(svsk);
805 clear_bit(SK_DATA, &svsk->sk_flags);
807 err = kernel_recvmsg(svsk->sk_sock, &msg, NULL,
808 0, 0, MSG_PEEK | MSG_DONTWAIT);
810 skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err);
813 if (err != -EAGAIN) {
814 /* possibly an icmp error */
815 dprintk("svc: recvfrom returned error %d\n", -err);
816 set_bit(SK_DATA, &svsk->sk_flags);
818 svc_sock_received(svsk);
821 rqstp->rq_addrlen = sizeof(rqstp->rq_addr);
822 if (skb->tstamp.tv64 == 0) {
823 skb->tstamp = ktime_get_real();
824 /* Don't enable netstamp, sunrpc doesn't
825 need that much accuracy */
827 svsk->sk_sk->sk_stamp = skb->tstamp;
828 set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */
831 * Maybe more packets - kick another thread ASAP.
833 svc_sock_received(svsk);
835 len = skb->len - sizeof(struct udphdr);
836 rqstp->rq_arg.len = len;
838 rqstp->rq_prot = IPPROTO_UDP;
840 if (cmh->cmsg_level != IPPROTO_IP ||
841 cmh->cmsg_type != IP_PKTINFO) {
843 printk("rpcsvc: received unknown control message:"
845 cmh->cmsg_level, cmh->cmsg_type);
846 skb_free_datagram(svsk->sk_sk, skb);
849 svc_udp_get_dest_address(rqstp, cmh);
851 if (skb_is_nonlinear(skb)) {
852 /* we have to copy */
854 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
857 skb_free_datagram(svsk->sk_sk, skb);
861 skb_free_datagram(svsk->sk_sk, skb);
863 /* we can use it in-place */
864 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr);
865 rqstp->rq_arg.head[0].iov_len = len;
866 if (skb_checksum_complete(skb)) {
867 skb_free_datagram(svsk->sk_sk, skb);
870 rqstp->rq_skbuff = skb;
873 rqstp->rq_arg.page_base = 0;
874 if (len <= rqstp->rq_arg.head[0].iov_len) {
875 rqstp->rq_arg.head[0].iov_len = len;
876 rqstp->rq_arg.page_len = 0;
877 rqstp->rq_respages = rqstp->rq_pages+1;
879 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
880 rqstp->rq_respages = rqstp->rq_pages + 1 +
881 DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE);
885 serv->sv_stats->netudpcnt++;
891 svc_udp_sendto(struct svc_rqst *rqstp)
895 error = svc_sendto(rqstp, &rqstp->rq_res);
896 if (error == -ECONNREFUSED)
897 /* ICMP error on earlier request. */
898 error = svc_sendto(rqstp, &rqstp->rq_res);
903 static struct svc_xprt_ops svc_udp_ops = {
906 static struct svc_xprt_class svc_udp_class = {
908 .xcl_ops = &svc_udp_ops,
912 svc_udp_init(struct svc_sock *svsk)
917 svc_xprt_init(&svc_udp_class, &svsk->sk_xprt);
918 svsk->sk_sk->sk_data_ready = svc_udp_data_ready;
919 svsk->sk_sk->sk_write_space = svc_write_space;
920 svsk->sk_recvfrom = svc_udp_recvfrom;
921 svsk->sk_sendto = svc_udp_sendto;
923 /* initialise setting must have enough space to
924 * receive and respond to one request.
925 * svc_udp_recvfrom will re-adjust if necessary
927 svc_sock_setbufsize(svsk->sk_sock,
928 3 * svsk->sk_server->sv_max_mesg,
929 3 * svsk->sk_server->sv_max_mesg);
931 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */
932 set_bit(SK_CHNGBUF, &svsk->sk_flags);
936 /* make sure we get destination address info */
937 svsk->sk_sock->ops->setsockopt(svsk->sk_sock, IPPROTO_IP, IP_PKTINFO,
938 (char __user *)&one, sizeof(one));
943 * A data_ready event on a listening socket means there's a connection
944 * pending. Do not use state_change as a substitute for it.
947 svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
949 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
951 dprintk("svc: socket %p TCP (listen) state change %d\n",
955 * This callback may called twice when a new connection
956 * is established as a child socket inherits everything
957 * from a parent LISTEN socket.
958 * 1) data_ready method of the parent socket will be called
959 * when one of child sockets become ESTABLISHED.
960 * 2) data_ready method of the child socket may be called
961 * when it receives data before the socket is accepted.
962 * In case of 2, we should ignore it silently.
964 if (sk->sk_state == TCP_LISTEN) {
966 set_bit(SK_CONN, &svsk->sk_flags);
967 svc_sock_enqueue(svsk);
969 printk("svc: socket %p: no user data\n", sk);
972 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
973 wake_up_interruptible_all(sk->sk_sleep);
977 * A state change on a connected socket means it's dying or dead.
980 svc_tcp_state_change(struct sock *sk)
982 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
984 dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
985 sk, sk->sk_state, sk->sk_user_data);
988 printk("svc: socket %p: no user data\n", sk);
990 set_bit(SK_CLOSE, &svsk->sk_flags);
991 svc_sock_enqueue(svsk);
993 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
994 wake_up_interruptible_all(sk->sk_sleep);
998 svc_tcp_data_ready(struct sock *sk, int count)
1000 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
1002 dprintk("svc: socket %p TCP data ready (svsk %p)\n",
1003 sk, sk->sk_user_data);
1005 set_bit(SK_DATA, &svsk->sk_flags);
1006 svc_sock_enqueue(svsk);
1008 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1009 wake_up_interruptible(sk->sk_sleep);
1012 static inline int svc_port_is_privileged(struct sockaddr *sin)
1014 switch (sin->sa_family) {
1016 return ntohs(((struct sockaddr_in *)sin)->sin_port)
1019 return ntohs(((struct sockaddr_in6 *)sin)->sin6_port)
1027 * Accept a TCP connection
1030 svc_tcp_accept(struct svc_sock *svsk)
1032 struct sockaddr_storage addr;
1033 struct sockaddr *sin = (struct sockaddr *) &addr;
1034 struct svc_serv *serv = svsk->sk_server;
1035 struct socket *sock = svsk->sk_sock;
1036 struct socket *newsock;
1037 struct svc_sock *newsvsk;
1039 char buf[RPC_MAX_ADDRBUFLEN];
1041 dprintk("svc: tcp_accept %p sock %p\n", svsk, sock);
1045 clear_bit(SK_CONN, &svsk->sk_flags);
1046 err = kernel_accept(sock, &newsock, O_NONBLOCK);
1049 printk(KERN_WARNING "%s: no more sockets!\n",
1051 else if (err != -EAGAIN && net_ratelimit())
1052 printk(KERN_WARNING "%s: accept failed (err %d)!\n",
1053 serv->sv_name, -err);
1057 set_bit(SK_CONN, &svsk->sk_flags);
1058 svc_sock_enqueue(svsk);
1060 err = kernel_getpeername(newsock, sin, &slen);
1062 if (net_ratelimit())
1063 printk(KERN_WARNING "%s: peername failed (err %d)!\n",
1064 serv->sv_name, -err);
1065 goto failed; /* aborted connection or whatever */
1068 /* Ideally, we would want to reject connections from unauthorized
1069 * hosts here, but when we get encryption, the IP of the host won't
1070 * tell us anything. For now just warn about unpriv connections.
1072 if (!svc_port_is_privileged(sin)) {
1073 dprintk(KERN_WARNING
1074 "%s: connect from unprivileged port: %s\n",
1076 __svc_print_addr(sin, buf, sizeof(buf)));
1078 dprintk("%s: connect from %s\n", serv->sv_name,
1079 __svc_print_addr(sin, buf, sizeof(buf)));
1081 /* make sure that a write doesn't block forever when
1084 newsock->sk->sk_sndtimeo = HZ*30;
1086 if (!(newsvsk = svc_setup_socket(serv, newsock, &err,
1087 (SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY))))
1089 memcpy(&newsvsk->sk_remote, sin, slen);
1090 newsvsk->sk_remotelen = slen;
1091 err = kernel_getsockname(newsock, sin, &slen);
1092 if (unlikely(err < 0)) {
1093 dprintk("svc_tcp_accept: kernel_getsockname error %d\n", -err);
1094 slen = offsetof(struct sockaddr, sa_data);
1096 memcpy(&newsvsk->sk_local, sin, slen);
1098 svc_sock_received(newsvsk);
1100 /* make sure that we don't have too many active connections.
1101 * If we have, something must be dropped.
1103 * There's no point in trying to do random drop here for
1104 * DoS prevention. The NFS clients does 1 reconnect in 15
1105 * seconds. An attacker can easily beat that.
1107 * The only somewhat efficient mechanism would be if drop
1108 * old connections from the same IP first. But right now
1109 * we don't even record the client IP in svc_sock.
1111 if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) {
1112 struct svc_sock *svsk = NULL;
1113 spin_lock_bh(&serv->sv_lock);
1114 if (!list_empty(&serv->sv_tempsocks)) {
1115 if (net_ratelimit()) {
1116 /* Try to help the admin */
1117 printk(KERN_NOTICE "%s: too many open TCP "
1118 "sockets, consider increasing the "
1119 "number of nfsd threads\n",
1122 "%s: last TCP connect from %s\n",
1123 serv->sv_name, __svc_print_addr(sin,
1127 * Always select the oldest socket. It's not fair,
1130 svsk = list_entry(serv->sv_tempsocks.prev,
1133 set_bit(SK_CLOSE, &svsk->sk_flags);
1134 atomic_inc(&svsk->sk_inuse);
1136 spin_unlock_bh(&serv->sv_lock);
1139 svc_sock_enqueue(svsk);
1146 serv->sv_stats->nettcpconn++;
1151 sock_release(newsock);
1156 * Receive data from a TCP socket.
1159 svc_tcp_recvfrom(struct svc_rqst *rqstp)
1161 struct svc_sock *svsk = rqstp->rq_sock;
1162 struct svc_serv *serv = svsk->sk_server;
1167 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
1168 svsk, test_bit(SK_DATA, &svsk->sk_flags),
1169 test_bit(SK_CONN, &svsk->sk_flags),
1170 test_bit(SK_CLOSE, &svsk->sk_flags));
1172 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
1173 svc_sock_received(svsk);
1174 return svc_deferred_recv(rqstp);
1177 if (test_bit(SK_CLOSE, &svsk->sk_flags)) {
1178 svc_delete_socket(svsk);
1182 if (svsk->sk_sk->sk_state == TCP_LISTEN) {
1183 svc_tcp_accept(svsk);
1184 svc_sock_received(svsk);
1188 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
1189 /* sndbuf needs to have room for one request
1190 * per thread, otherwise we can stall even when the
1191 * network isn't a bottleneck.
1193 * We count all threads rather than threads in a
1194 * particular pool, which provides an upper bound
1195 * on the number of threads which will access the socket.
1197 * rcvbuf just needs to be able to hold a few requests.
1198 * Normally they will be removed from the queue
1199 * as soon a a complete request arrives.
1201 svc_sock_setbufsize(svsk->sk_sock,
1202 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
1203 3 * serv->sv_max_mesg);
1205 clear_bit(SK_DATA, &svsk->sk_flags);
1207 /* Receive data. If we haven't got the record length yet, get
1208 * the next four bytes. Otherwise try to gobble up as much as
1209 * possible up to the complete record length.
1211 if (svsk->sk_tcplen < 4) {
1212 unsigned long want = 4 - svsk->sk_tcplen;
1215 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
1217 if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0)
1219 svsk->sk_tcplen += len;
1222 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
1224 svc_sock_received(svsk);
1225 return -EAGAIN; /* record header not complete */
1228 svsk->sk_reclen = ntohl(svsk->sk_reclen);
1229 if (!(svsk->sk_reclen & 0x80000000)) {
1230 /* FIXME: technically, a record can be fragmented,
1231 * and non-terminal fragments will not have the top
1232 * bit set in the fragment length header.
1233 * But apparently no known nfs clients send fragmented
1235 if (net_ratelimit())
1236 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx"
1237 " (non-terminal)\n",
1238 (unsigned long) svsk->sk_reclen);
1241 svsk->sk_reclen &= 0x7fffffff;
1242 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
1243 if (svsk->sk_reclen > serv->sv_max_mesg) {
1244 if (net_ratelimit())
1245 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx"
1247 (unsigned long) svsk->sk_reclen);
1252 /* Check whether enough data is available */
1253 len = svc_recv_available(svsk);
1257 if (len < svsk->sk_reclen) {
1258 dprintk("svc: incomplete TCP record (%d of %d)\n",
1259 len, svsk->sk_reclen);
1260 svc_sock_received(svsk);
1261 return -EAGAIN; /* record not complete */
1263 len = svsk->sk_reclen;
1264 set_bit(SK_DATA, &svsk->sk_flags);
1266 vec = rqstp->rq_vec;
1267 vec[0] = rqstp->rq_arg.head[0];
1270 while (vlen < len) {
1271 vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]);
1272 vec[pnum].iov_len = PAGE_SIZE;
1276 rqstp->rq_respages = &rqstp->rq_pages[pnum];
1278 /* Now receive data */
1279 len = svc_recvfrom(rqstp, vec, pnum, len);
1283 dprintk("svc: TCP complete record (%d bytes)\n", len);
1284 rqstp->rq_arg.len = len;
1285 rqstp->rq_arg.page_base = 0;
1286 if (len <= rqstp->rq_arg.head[0].iov_len) {
1287 rqstp->rq_arg.head[0].iov_len = len;
1288 rqstp->rq_arg.page_len = 0;
1290 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
1293 rqstp->rq_skbuff = NULL;
1294 rqstp->rq_prot = IPPROTO_TCP;
1296 /* Reset TCP read info */
1297 svsk->sk_reclen = 0;
1298 svsk->sk_tcplen = 0;
1300 svc_sock_received(svsk);
1302 serv->sv_stats->nettcpcnt++;
1307 svc_delete_socket(svsk);
1311 if (len == -EAGAIN) {
1312 dprintk("RPC: TCP recvfrom got EAGAIN\n");
1313 svc_sock_received(svsk);
1315 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
1316 svsk->sk_server->sv_name, -len);
1324 * Send out data on TCP socket.
1327 svc_tcp_sendto(struct svc_rqst *rqstp)
1329 struct xdr_buf *xbufp = &rqstp->rq_res;
1333 /* Set up the first element of the reply kvec.
1334 * Any other kvecs that may be in use have been taken
1335 * care of by the server implementation itself.
1337 reclen = htonl(0x80000000|((xbufp->len ) - 4));
1338 memcpy(xbufp->head[0].iov_base, &reclen, 4);
1340 if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags))
1343 sent = svc_sendto(rqstp, &rqstp->rq_res);
1344 if (sent != xbufp->len) {
1345 printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
1346 rqstp->rq_sock->sk_server->sv_name,
1347 (sent<0)?"got error":"sent only",
1349 set_bit(SK_CLOSE, &rqstp->rq_sock->sk_flags);
1350 svc_sock_enqueue(rqstp->rq_sock);
1356 static struct svc_xprt_ops svc_tcp_ops = {
1359 static struct svc_xprt_class svc_tcp_class = {
1361 .xcl_ops = &svc_tcp_ops,
1364 void svc_init_xprt_sock(void)
1366 svc_reg_xprt_class(&svc_tcp_class);
1367 svc_reg_xprt_class(&svc_udp_class);
1370 void svc_cleanup_xprt_sock(void)
1372 svc_unreg_xprt_class(&svc_tcp_class);
1373 svc_unreg_xprt_class(&svc_udp_class);
1377 svc_tcp_init(struct svc_sock *svsk)
1379 struct sock *sk = svsk->sk_sk;
1380 struct tcp_sock *tp = tcp_sk(sk);
1382 svc_xprt_init(&svc_tcp_class, &svsk->sk_xprt);
1383 svsk->sk_recvfrom = svc_tcp_recvfrom;
1384 svsk->sk_sendto = svc_tcp_sendto;
1386 if (sk->sk_state == TCP_LISTEN) {
1387 dprintk("setting up TCP socket for listening\n");
1388 sk->sk_data_ready = svc_tcp_listen_data_ready;
1389 set_bit(SK_CONN, &svsk->sk_flags);
1391 dprintk("setting up TCP socket for reading\n");
1392 sk->sk_state_change = svc_tcp_state_change;
1393 sk->sk_data_ready = svc_tcp_data_ready;
1394 sk->sk_write_space = svc_write_space;
1396 svsk->sk_reclen = 0;
1397 svsk->sk_tcplen = 0;
1399 tp->nonagle = 1; /* disable Nagle's algorithm */
1401 /* initialise setting must have enough space to
1402 * receive and respond to one request.
1403 * svc_tcp_recvfrom will re-adjust if necessary
1405 svc_sock_setbufsize(svsk->sk_sock,
1406 3 * svsk->sk_server->sv_max_mesg,
1407 3 * svsk->sk_server->sv_max_mesg);
1409 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1410 set_bit(SK_DATA, &svsk->sk_flags);
1411 if (sk->sk_state != TCP_ESTABLISHED)
1412 set_bit(SK_CLOSE, &svsk->sk_flags);
1417 svc_sock_update_bufs(struct svc_serv *serv)
1420 * The number of server threads has changed. Update
1421 * rcvbuf and sndbuf accordingly on all sockets
1423 struct list_head *le;
1425 spin_lock_bh(&serv->sv_lock);
1426 list_for_each(le, &serv->sv_permsocks) {
1427 struct svc_sock *svsk =
1428 list_entry(le, struct svc_sock, sk_list);
1429 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1431 list_for_each(le, &serv->sv_tempsocks) {
1432 struct svc_sock *svsk =
1433 list_entry(le, struct svc_sock, sk_list);
1434 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1436 spin_unlock_bh(&serv->sv_lock);
1440 * Receive the next request on any socket. This code is carefully
1441 * organised not to touch any cachelines in the shared svc_serv
1442 * structure, only cachelines in the local svc_pool.
1445 svc_recv(struct svc_rqst *rqstp, long timeout)
1447 struct svc_sock *svsk = NULL;
1448 struct svc_serv *serv = rqstp->rq_server;
1449 struct svc_pool *pool = rqstp->rq_pool;
1452 struct xdr_buf *arg;
1453 DECLARE_WAITQUEUE(wait, current);
1455 dprintk("svc: server %p waiting for data (to = %ld)\n",
1460 "svc_recv: service %p, socket not NULL!\n",
1462 if (waitqueue_active(&rqstp->rq_wait))
1464 "svc_recv: service %p, wait queue active!\n",
1468 /* now allocate needed pages. If we get a failure, sleep briefly */
1469 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
1470 for (i=0; i < pages ; i++)
1471 while (rqstp->rq_pages[i] == NULL) {
1472 struct page *p = alloc_page(GFP_KERNEL);
1474 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1475 rqstp->rq_pages[i] = p;
1477 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
1478 BUG_ON(pages >= RPCSVC_MAXPAGES);
1480 /* Make arg->head point to first page and arg->pages point to rest */
1481 arg = &rqstp->rq_arg;
1482 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
1483 arg->head[0].iov_len = PAGE_SIZE;
1484 arg->pages = rqstp->rq_pages + 1;
1486 /* save at least one page for response */
1487 arg->page_len = (pages-2)*PAGE_SIZE;
1488 arg->len = (pages-1)*PAGE_SIZE;
1489 arg->tail[0].iov_len = 0;
1496 spin_lock_bh(&pool->sp_lock);
1497 if ((svsk = svc_sock_dequeue(pool)) != NULL) {
1498 rqstp->rq_sock = svsk;
1499 atomic_inc(&svsk->sk_inuse);
1500 rqstp->rq_reserved = serv->sv_max_mesg;
1501 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
1503 /* No data pending. Go to sleep */
1504 svc_thread_enqueue(pool, rqstp);
1507 * We have to be able to interrupt this wait
1508 * to bring down the daemons ...
1510 set_current_state(TASK_INTERRUPTIBLE);
1511 add_wait_queue(&rqstp->rq_wait, &wait);
1512 spin_unlock_bh(&pool->sp_lock);
1514 schedule_timeout(timeout);
1518 spin_lock_bh(&pool->sp_lock);
1519 remove_wait_queue(&rqstp->rq_wait, &wait);
1521 if (!(svsk = rqstp->rq_sock)) {
1522 svc_thread_dequeue(pool, rqstp);
1523 spin_unlock_bh(&pool->sp_lock);
1524 dprintk("svc: server %p, no data yet\n", rqstp);
1525 return signalled()? -EINTR : -EAGAIN;
1528 spin_unlock_bh(&pool->sp_lock);
1530 dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n",
1531 rqstp, pool->sp_id, svsk, atomic_read(&svsk->sk_inuse));
1532 len = svsk->sk_recvfrom(rqstp);
1533 dprintk("svc: got len=%d\n", len);
1535 /* No data, incomplete (TCP) read, or accept() */
1536 if (len == 0 || len == -EAGAIN) {
1537 rqstp->rq_res.len = 0;
1538 svc_sock_release(rqstp);
1541 svsk->sk_lastrecv = get_seconds();
1542 clear_bit(SK_OLD, &svsk->sk_flags);
1544 rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
1545 rqstp->rq_chandle.defer = svc_defer;
1548 serv->sv_stats->netcnt++;
1556 svc_drop(struct svc_rqst *rqstp)
1558 dprintk("svc: socket %p dropped request\n", rqstp->rq_sock);
1559 svc_sock_release(rqstp);
1563 * Return reply to client.
1566 svc_send(struct svc_rqst *rqstp)
1568 struct svc_sock *svsk;
1572 if ((svsk = rqstp->rq_sock) == NULL) {
1573 printk(KERN_WARNING "NULL socket pointer in %s:%d\n",
1574 __FILE__, __LINE__);
1578 /* release the receive skb before sending the reply */
1579 svc_release_skb(rqstp);
1581 /* calculate over-all length */
1582 xb = & rqstp->rq_res;
1583 xb->len = xb->head[0].iov_len +
1585 xb->tail[0].iov_len;
1587 /* Grab svsk->sk_mutex to serialize outgoing data. */
1588 mutex_lock(&svsk->sk_mutex);
1589 if (test_bit(SK_DEAD, &svsk->sk_flags))
1592 len = svsk->sk_sendto(rqstp);
1593 mutex_unlock(&svsk->sk_mutex);
1594 svc_sock_release(rqstp);
1596 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
1602 * Timer function to close old temporary sockets, using
1603 * a mark-and-sweep algorithm.
1606 svc_age_temp_sockets(unsigned long closure)
1608 struct svc_serv *serv = (struct svc_serv *)closure;
1609 struct svc_sock *svsk;
1610 struct list_head *le, *next;
1611 LIST_HEAD(to_be_aged);
1613 dprintk("svc_age_temp_sockets\n");
1615 if (!spin_trylock_bh(&serv->sv_lock)) {
1616 /* busy, try again 1 sec later */
1617 dprintk("svc_age_temp_sockets: busy\n");
1618 mod_timer(&serv->sv_temptimer, jiffies + HZ);
1622 list_for_each_safe(le, next, &serv->sv_tempsocks) {
1623 svsk = list_entry(le, struct svc_sock, sk_list);
1625 if (!test_and_set_bit(SK_OLD, &svsk->sk_flags))
1627 if (atomic_read(&svsk->sk_inuse) > 1 || test_bit(SK_BUSY, &svsk->sk_flags))
1629 atomic_inc(&svsk->sk_inuse);
1630 list_move(le, &to_be_aged);
1631 set_bit(SK_CLOSE, &svsk->sk_flags);
1632 set_bit(SK_DETACHED, &svsk->sk_flags);
1634 spin_unlock_bh(&serv->sv_lock);
1636 while (!list_empty(&to_be_aged)) {
1637 le = to_be_aged.next;
1638 /* fiddling the sk_list node is safe 'cos we're SK_DETACHED */
1640 svsk = list_entry(le, struct svc_sock, sk_list);
1642 dprintk("queuing svsk %p for closing, %lu seconds old\n",
1643 svsk, get_seconds() - svsk->sk_lastrecv);
1645 /* a thread will dequeue and close it soon */
1646 svc_sock_enqueue(svsk);
1650 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
1654 * Initialize socket for RPC use and create svc_sock struct
1655 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
1657 static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1658 struct socket *sock,
1659 int *errp, int flags)
1661 struct svc_sock *svsk;
1663 int pmap_register = !(flags & SVC_SOCK_ANONYMOUS);
1664 int is_temporary = flags & SVC_SOCK_TEMPORARY;
1666 dprintk("svc: svc_setup_socket %p\n", sock);
1667 if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) {
1674 /* Register socket with portmapper */
1675 if (*errp >= 0 && pmap_register)
1676 *errp = svc_register(serv, inet->sk_protocol,
1677 ntohs(inet_sk(inet)->sport));
1684 set_bit(SK_BUSY, &svsk->sk_flags);
1685 inet->sk_user_data = svsk;
1686 svsk->sk_sock = sock;
1688 svsk->sk_ostate = inet->sk_state_change;
1689 svsk->sk_odata = inet->sk_data_ready;
1690 svsk->sk_owspace = inet->sk_write_space;
1691 svsk->sk_server = serv;
1692 atomic_set(&svsk->sk_inuse, 1);
1693 svsk->sk_lastrecv = get_seconds();
1694 spin_lock_init(&svsk->sk_lock);
1695 INIT_LIST_HEAD(&svsk->sk_deferred);
1696 INIT_LIST_HEAD(&svsk->sk_ready);
1697 mutex_init(&svsk->sk_mutex);
1699 /* Initialize the socket */
1700 if (sock->type == SOCK_DGRAM)
1705 spin_lock_bh(&serv->sv_lock);
1707 set_bit(SK_TEMP, &svsk->sk_flags);
1708 list_add(&svsk->sk_list, &serv->sv_tempsocks);
1710 if (serv->sv_temptimer.function == NULL) {
1711 /* setup timer to age temp sockets */
1712 setup_timer(&serv->sv_temptimer, svc_age_temp_sockets,
1713 (unsigned long)serv);
1714 mod_timer(&serv->sv_temptimer,
1715 jiffies + svc_conn_age_period * HZ);
1718 clear_bit(SK_TEMP, &svsk->sk_flags);
1719 list_add(&svsk->sk_list, &serv->sv_permsocks);
1721 spin_unlock_bh(&serv->sv_lock);
1723 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1729 int svc_addsock(struct svc_serv *serv,
1735 struct socket *so = sockfd_lookup(fd, &err);
1736 struct svc_sock *svsk = NULL;
1740 if (so->sk->sk_family != AF_INET)
1741 err = -EAFNOSUPPORT;
1742 else if (so->sk->sk_protocol != IPPROTO_TCP &&
1743 so->sk->sk_protocol != IPPROTO_UDP)
1744 err = -EPROTONOSUPPORT;
1745 else if (so->state > SS_UNCONNECTED)
1748 svsk = svc_setup_socket(serv, so, &err, SVC_SOCK_DEFAULTS);
1750 svc_sock_received(svsk);
1758 if (proto) *proto = so->sk->sk_protocol;
1759 return one_sock_name(name_return, svsk);
1761 EXPORT_SYMBOL_GPL(svc_addsock);
1764 * Create socket for RPC service.
1766 static int svc_create_socket(struct svc_serv *serv, int protocol,
1767 struct sockaddr *sin, int len, int flags)
1769 struct svc_sock *svsk;
1770 struct socket *sock;
1773 char buf[RPC_MAX_ADDRBUFLEN];
1775 dprintk("svc: svc_create_socket(%s, %d, %s)\n",
1776 serv->sv_program->pg_name, protocol,
1777 __svc_print_addr(sin, buf, sizeof(buf)));
1779 if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) {
1780 printk(KERN_WARNING "svc: only UDP and TCP "
1781 "sockets supported\n");
1784 type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;
1786 error = sock_create_kern(sin->sa_family, type, protocol, &sock);
1790 svc_reclassify_socket(sock);
1792 if (type == SOCK_STREAM)
1793 sock->sk->sk_reuse = 1; /* allow address reuse */
1794 error = kernel_bind(sock, sin, len);
1798 if (protocol == IPPROTO_TCP) {
1799 if ((error = kernel_listen(sock, 64)) < 0)
1803 if ((svsk = svc_setup_socket(serv, sock, &error, flags)) != NULL) {
1804 svc_sock_received(svsk);
1805 return ntohs(inet_sk(svsk->sk_sk)->sport);
1809 dprintk("svc: svc_create_socket error = %d\n", -error);
1815 * Remove a dead socket
1818 svc_delete_socket(struct svc_sock *svsk)
1820 struct svc_serv *serv;
1823 dprintk("svc: svc_delete_socket(%p)\n", svsk);
1825 serv = svsk->sk_server;
1828 sk->sk_state_change = svsk->sk_ostate;
1829 sk->sk_data_ready = svsk->sk_odata;
1830 sk->sk_write_space = svsk->sk_owspace;
1832 spin_lock_bh(&serv->sv_lock);
1834 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags))
1835 list_del_init(&svsk->sk_list);
1837 * We used to delete the svc_sock from whichever list
1838 * it's sk_ready node was on, but we don't actually
1839 * need to. This is because the only time we're called
1840 * while still attached to a queue, the queue itself
1841 * is about to be destroyed (in svc_destroy).
1843 if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags)) {
1844 BUG_ON(atomic_read(&svsk->sk_inuse)<2);
1845 atomic_dec(&svsk->sk_inuse);
1846 if (test_bit(SK_TEMP, &svsk->sk_flags))
1850 spin_unlock_bh(&serv->sv_lock);
1853 static void svc_close_socket(struct svc_sock *svsk)
1855 set_bit(SK_CLOSE, &svsk->sk_flags);
1856 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags))
1857 /* someone else will have to effect the close */
1860 atomic_inc(&svsk->sk_inuse);
1861 svc_delete_socket(svsk);
1862 clear_bit(SK_BUSY, &svsk->sk_flags);
1866 void svc_force_close_socket(struct svc_sock *svsk)
1868 set_bit(SK_CLOSE, &svsk->sk_flags);
1869 if (test_bit(SK_BUSY, &svsk->sk_flags)) {
1870 /* Waiting to be processed, but no threads left,
1871 * So just remove it from the waiting list
1873 list_del_init(&svsk->sk_ready);
1874 clear_bit(SK_BUSY, &svsk->sk_flags);
1876 svc_close_socket(svsk);
1880 * svc_makesock - Make a socket for nfsd and lockd
1881 * @serv: RPC server structure
1882 * @protocol: transport protocol to use
1883 * @port: port to use
1884 * @flags: requested socket characteristics
1887 int svc_makesock(struct svc_serv *serv, int protocol, unsigned short port,
1890 struct sockaddr_in sin = {
1891 .sin_family = AF_INET,
1892 .sin_addr.s_addr = INADDR_ANY,
1893 .sin_port = htons(port),
1896 dprintk("svc: creating socket proto = %d\n", protocol);
1897 return svc_create_socket(serv, protocol, (struct sockaddr *) &sin,
1898 sizeof(sin), flags);
1902 * Handle defer and revisit of requests
1905 static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1907 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle);
1908 struct svc_sock *svsk;
1911 svc_sock_put(dr->svsk);
1915 dprintk("revisit queued\n");
1918 spin_lock(&svsk->sk_lock);
1919 list_add(&dr->handle.recent, &svsk->sk_deferred);
1920 spin_unlock(&svsk->sk_lock);
1921 set_bit(SK_DEFERRED, &svsk->sk_flags);
1922 svc_sock_enqueue(svsk);
1926 static struct cache_deferred_req *
1927 svc_defer(struct cache_req *req)
1929 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
1930 int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len);
1931 struct svc_deferred_req *dr;
1933 if (rqstp->rq_arg.page_len)
1934 return NULL; /* if more than a page, give up FIXME */
1935 if (rqstp->rq_deferred) {
1936 dr = rqstp->rq_deferred;
1937 rqstp->rq_deferred = NULL;
1939 int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
1940 /* FIXME maybe discard if size too large */
1941 dr = kmalloc(size, GFP_KERNEL);
1945 dr->handle.owner = rqstp->rq_server;
1946 dr->prot = rqstp->rq_prot;
1947 memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen);
1948 dr->addrlen = rqstp->rq_addrlen;
1949 dr->daddr = rqstp->rq_daddr;
1950 dr->argslen = rqstp->rq_arg.len >> 2;
1951 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2);
1953 atomic_inc(&rqstp->rq_sock->sk_inuse);
1954 dr->svsk = rqstp->rq_sock;
1956 dr->handle.revisit = svc_revisit;
1961 * recv data from a deferred request into an active one
1963 static int svc_deferred_recv(struct svc_rqst *rqstp)
1965 struct svc_deferred_req *dr = rqstp->rq_deferred;
1967 rqstp->rq_arg.head[0].iov_base = dr->args;
1968 rqstp->rq_arg.head[0].iov_len = dr->argslen<<2;
1969 rqstp->rq_arg.page_len = 0;
1970 rqstp->rq_arg.len = dr->argslen<<2;
1971 rqstp->rq_prot = dr->prot;
1972 memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen);
1973 rqstp->rq_addrlen = dr->addrlen;
1974 rqstp->rq_daddr = dr->daddr;
1975 rqstp->rq_respages = rqstp->rq_pages;
1976 return dr->argslen<<2;
1980 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
1982 struct svc_deferred_req *dr = NULL;
1984 if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
1986 spin_lock(&svsk->sk_lock);
1987 clear_bit(SK_DEFERRED, &svsk->sk_flags);
1988 if (!list_empty(&svsk->sk_deferred)) {
1989 dr = list_entry(svsk->sk_deferred.next,
1990 struct svc_deferred_req,
1992 list_del_init(&dr->handle.recent);
1993 set_bit(SK_DEFERRED, &svsk->sk_flags);
1995 spin_unlock(&svsk->sk_lock);