static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
int *errp, int flags);
-static void svc_delete_socket(struct svc_sock *svsk);
+static void svc_delete_xprt(struct svc_xprt *xprt);
static void svc_udp_data_ready(struct sock *, int);
static int svc_udp_recvfrom(struct svc_rqst *);
static int svc_udp_sendto(struct svc_rqst *);
-static void svc_close_socket(struct svc_sock *svsk);
+static void svc_close_xprt(struct svc_xprt *xprt);
static void svc_sock_detach(struct svc_xprt *);
static void svc_sock_free(struct svc_xprt *);
wake_up(&rqstp->rq_wait);
} else {
dprintk("svc: socket %p put into queue\n", svsk->sk_sk);
- list_add_tail(&svsk->sk_ready, &pool->sp_sockets);
+ list_add_tail(&svsk->sk_xprt.xpt_ready, &pool->sp_sockets);
BUG_ON(svsk->sk_xprt.xpt_pool != pool);
}
return NULL;
svsk = list_entry(pool->sp_sockets.next,
- struct svc_sock, sk_ready);
- list_del_init(&svsk->sk_ready);
+ struct svc_sock, sk_xprt.xpt_ready);
+ list_del_init(&svsk->sk_xprt.xpt_ready);
dprintk("svc: socket %p dequeued, inuse=%d\n",
svsk->sk_sk, atomic_read(&svsk->sk_xprt.xpt_ref.refcount));
if (!serv)
return 0;
spin_lock_bh(&serv->sv_lock);
- list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) {
+ list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list) {
int onelen = one_sock_name(buf+len, svsk);
if (toclose && strcmp(toclose, buf+len) == 0)
closesk = svsk;
/* Should unregister with portmap, but you cannot
* unregister just one protocol...
*/
- svc_close_socket(closesk);
+ svc_close_xprt(&closesk->sk_xprt);
else if (toclose)
return -ENOENT;
return len;
spin_lock_bh(&serv->sv_lock);
list_for_each(le, &serv->sv_permsocks) {
struct svc_sock *svsk =
- list_entry(le, struct svc_sock, sk_list);
+ list_entry(le, struct svc_sock, sk_xprt.xpt_list);
set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
}
list_for_each(le, &serv->sv_tempsocks) {
struct svc_sock *svsk =
- list_entry(le, struct svc_sock, sk_list);
+ list_entry(le, struct svc_sock, sk_xprt.xpt_list);
set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
}
spin_unlock_bh(&serv->sv_lock);
*/
svsk = list_entry(serv->sv_tempsocks.prev,
struct svc_sock,
- sk_list);
+ sk_xprt.xpt_list);
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
svc_xprt_get(&svsk->sk_xprt);
}
len = 0;
if (test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)) {
dprintk("svc_recv: found XPT_CLOSE\n");
- svc_delete_socket(svsk);
+ svc_delete_xprt(&svsk->sk_xprt);
} else if (test_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags)) {
struct svc_xprt *newxpt;
newxpt = svsk->sk_xprt.xpt_ops->xpo_accept(&svsk->sk_xprt);
}
list_for_each_safe(le, next, &serv->sv_tempsocks) {
- svsk = list_entry(le, struct svc_sock, sk_list);
+ svsk = list_entry(le, struct svc_sock, sk_xprt.xpt_list);
if (!test_and_set_bit(XPT_OLD, &svsk->sk_xprt.xpt_flags))
continue;
while (!list_empty(&to_be_aged)) {
le = to_be_aged.next;
- /* fiddling the sk_list node is safe 'cos we're XPT_DETACHED */
+ /* fiddling the sk_xprt.xpt_list node is safe 'cos we're XPT_DETACHED */
list_del_init(le);
- svsk = list_entry(le, struct svc_sock, sk_list);
+ svsk = list_entry(le, struct svc_sock, sk_xprt.xpt_list);
dprintk("queuing svsk %p for closing, %lu seconds old\n",
svsk, get_seconds() - svsk->sk_lastrecv);
svsk->sk_lastrecv = get_seconds();
spin_lock_init(&svsk->sk_lock);
INIT_LIST_HEAD(&svsk->sk_deferred);
- INIT_LIST_HEAD(&svsk->sk_ready);
mutex_init(&svsk->sk_mutex);
/* Initialize the socket */
spin_lock_bh(&serv->sv_lock);
if (is_temporary) {
set_bit(XPT_TEMP, &svsk->sk_xprt.xpt_flags);
- list_add(&svsk->sk_list, &serv->sv_tempsocks);
+ list_add(&svsk->sk_xprt.xpt_list, &serv->sv_tempsocks);
serv->sv_tmpcnt++;
if (serv->sv_temptimer.function == NULL) {
/* setup timer to age temp sockets */
}
} else {
clear_bit(XPT_TEMP, &svsk->sk_xprt.xpt_flags);
- list_add(&svsk->sk_list, &serv->sv_permsocks);
+ list_add(&svsk->sk_xprt.xpt_list, &serv->sv_permsocks);
}
spin_unlock_bh(&serv->sv_lock);
}
/*
- * Remove a dead socket
+ * Remove a dead transport
*/
-static void
-svc_delete_socket(struct svc_sock *svsk)
+static void svc_delete_xprt(struct svc_xprt *xprt)
{
- struct svc_serv *serv;
- struct sock *sk;
-
- dprintk("svc: svc_delete_socket(%p)\n", svsk);
-
- serv = svsk->sk_xprt.xpt_server;
- sk = svsk->sk_sk;
+ struct svc_serv *serv = xprt->xpt_server;
- svsk->sk_xprt.xpt_ops->xpo_detach(&svsk->sk_xprt);
+ dprintk("svc: svc_delete_xprt(%p)\n", xprt);
+ xprt->xpt_ops->xpo_detach(xprt);
spin_lock_bh(&serv->sv_lock);
-
- if (!test_and_set_bit(XPT_DETACHED, &svsk->sk_xprt.xpt_flags))
- list_del_init(&svsk->sk_list);
+ if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
+ list_del_init(&xprt->xpt_list);
/*
- * We used to delete the svc_sock from whichever list
- * it's sk_ready node was on, but we don't actually
+ * We used to delete the transport from whichever list
+ * it's sk_xprt.xpt_ready node was on, but we don't actually
* need to. This is because the only time we're called
* while still attached to a queue, the queue itself
* is about to be destroyed (in svc_destroy).
*/
- if (!test_and_set_bit(XPT_DEAD, &svsk->sk_xprt.xpt_flags)) {
- BUG_ON(atomic_read(&svsk->sk_xprt.xpt_ref.refcount) < 2);
- if (test_bit(XPT_TEMP, &svsk->sk_xprt.xpt_flags))
+ if (!test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) {
+ BUG_ON(atomic_read(&xprt->xpt_ref.refcount) < 2);
+ if (test_bit(XPT_TEMP, &xprt->xpt_flags))
serv->sv_tmpcnt--;
- svc_xprt_put(&svsk->sk_xprt);
+ svc_xprt_put(xprt);
}
-
spin_unlock_bh(&serv->sv_lock);
}
-static void svc_close_socket(struct svc_sock *svsk)
+static void svc_close_xprt(struct svc_xprt *xprt)
{
- set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
- if (test_and_set_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags))
+ set_bit(XPT_CLOSE, &xprt->xpt_flags);
+ if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
/* someone else will have to effect the close */
return;
- svc_xprt_get(&svsk->sk_xprt);
- svc_delete_socket(svsk);
- clear_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags);
- svc_xprt_put(&svsk->sk_xprt);
+ svc_xprt_get(xprt);
+ svc_delete_xprt(xprt);
+ clear_bit(XPT_BUSY, &xprt->xpt_flags);
+ svc_xprt_put(xprt);
}
-void svc_force_close_socket(struct svc_sock *svsk)
+void svc_close_all(struct list_head *xprt_list)
{
- set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
- if (test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)) {
- /* Waiting to be processed, but no threads left,
- * So just remove it from the waiting list
- */
- list_del_init(&svsk->sk_ready);
- clear_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags);
+ struct svc_xprt *xprt;
+ struct svc_xprt *tmp;
+
+ list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
+ set_bit(XPT_CLOSE, &xprt->xpt_flags);
+ if (test_bit(XPT_BUSY, &xprt->xpt_flags)) {
+ /* Waiting to be processed, but no threads left,
+ * So just remove it from the waiting list
+ */
+ list_del_init(&xprt->xpt_ready);
+ clear_bit(XPT_BUSY, &xprt->xpt_flags);
+ }
+ svc_close_xprt(xprt);
}
- svc_close_socket(svsk);
}
/*