void rpc_exit_task(struct rpc_task *);
void rpc_release_calldata(const struct rpc_call_ops *, void *);
void rpc_killall_tasks(struct rpc_clnt *);
-int rpc_execute(struct rpc_task *);
+void rpc_execute(struct rpc_task *);
void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
void rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *,
/* Mask signals on RPC calls _and_ GSS_AUTH upcalls */
rpc_task_sigmask(task, &oldset);
- rpc_call_setup(task, msg, 0);
-
/* Set up the call info struct and execute the task */
+ rpc_call_setup(task, msg, 0);
+ if (task->tk_status == 0) {
+ atomic_inc(&task->tk_count);
+ rpc_execute(task);
+ }
status = task->tk_status;
- if (status != 0)
- goto out;
- atomic_inc(&task->tk_count);
- status = rpc_execute(task);
- if (status == 0)
- status = task->tk_status;
-out:
rpc_put_task(task);
rpc_restore_sigmask(&oldset);
return status;
/*
* This is the RPC `scheduler' (or rather, the finite state machine).
*/
-static int __rpc_execute(struct rpc_task *task)
+static void __rpc_execute(struct rpc_task *task)
{
int status = 0;
if (RPC_IS_ASYNC(task)) {
/* Careful! we may have raced... */
if (RPC_IS_QUEUED(task))
- return 0;
+ return;
if (rpc_test_and_set_running(task))
- return 0;
+ return;
continue;
}
dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status);
/* Release all resources associated with the task */
rpc_release_task(task);
- return status;
}
/*
* released. In particular note that tk_release() will have
* been called, so your task memory may have been freed.
*/
-int
-rpc_execute(struct rpc_task *task)
+void rpc_execute(struct rpc_task *task)
{
rpc_set_active(task);
rpc_set_running(task);
- return __rpc_execute(task);
+ __rpc_execute(task);
}
static void rpc_async_schedule(struct work_struct *work)