set_task_state(tsk, TASK_UNINTERRUPTIBLE);
while (ctx->reqs_active) {
spin_unlock_irq(&ctx->ctx_lock);
- schedule();
+ io_schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
spin_lock_irq(&ctx->ctx_lock);
}
/* wait_on_sync_kiocb:
* Waits on the given sync kiocb to complete.
*/
-ssize_t fastcall wait_on_sync_kiocb(struct kiocb *iocb)
+ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
{
while (iocb->ki_users) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (!iocb->ki_users)
break;
- schedule();
+ io_schedule();
}
__set_current_state(TASK_RUNNING);
return iocb->ki_user_data;
* go away, they will call put_ioctx and release any pinned memory
* associated with the request (held via struct page * references).
*/
-void fastcall exit_aio(struct mm_struct *mm)
+void exit_aio(struct mm_struct *mm)
{
struct kioctx *ctx = mm->ioctx_list;
mm->ioctx_list = NULL;
* Called when the last user of an aio context has gone away,
* and the struct needs to be freed.
*/
-void fastcall __put_ioctx(struct kioctx *ctx)
+void __put_ioctx(struct kioctx *ctx)
{
unsigned nr_events = ctx->max_reqs;
* This prevents races between the aio code path referencing the
* req (after submitting it) and aio_complete() freeing the req.
*/
-static struct kiocb *FASTCALL(__aio_get_req(struct kioctx *ctx));
-static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx)
+static struct kiocb *__aio_get_req(struct kioctx *ctx)
{
struct kiocb *req = NULL;
struct aio_ring *ring;
* Returns true if this put was the last user of the kiocb,
* false if the request is still in use.
*/
-int fastcall aio_put_req(struct kiocb *req)
+int aio_put_req(struct kiocb *req)
{
struct kioctx *ctx = req->ki_ctx;
int ret;
/*
* Now we are all set to call the retry method in async
- * context. By setting this thread's io_wait context
- * to point to the wait queue entry inside the currently
- * running iocb for the duration of the retry, we ensure
- * that async notification wakeups are queued by the
- * operation instead of blocking waits, and when notified,
- * cause the iocb to be kicked for continuation (through
- * the aio_wake_function callback).
+ * context.
*/
- BUG_ON(current->io_wait != NULL);
- current->io_wait = &iocb->ki_wait;
ret = retry(iocb);
- current->io_wait = NULL;
if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
BUG_ON(!list_empty(&iocb->ki_wait.task_list));
* The retry is usually executed by aio workqueue
* threads (See aio_kick_handler).
*/
-void fastcall kick_iocb(struct kiocb *iocb)
+void kick_iocb(struct kiocb *iocb)
{
/* sync iocbs are easy: they can only ever be executing from a
* single context. */
* Returns true if this is the last user of the request. The
* only other user of the request can be the cancellation code.
*/
-int fastcall aio_complete(struct kiocb *iocb, long res, long res2)
+int aio_complete(struct kiocb *iocb, long res, long res2)
{
struct kioctx *ctx = iocb->ki_ctx;
struct aio_ring_info *info;
ret = 0;
if (to.timed_out) /* Only check after read evt */
break;
- schedule();
+ /* Try to only show up in io wait if there are ops
+ * in flight */
+ if (ctx->reqs_active)
+ io_schedule();
+ else
+ schedule();
if (signal_pending(tsk)) {
ret = -EINTR;
break;
opcode = IOCB_CMD_PWRITEV;
}
+ /* This matches the pread()/pwrite() logic */
+ if (iocb->ki_pos < 0)
+ return -EINVAL;
+
do {
ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg],
iocb->ki_nr_segs - iocb->ki_cur_seg,
if ((ret == 0) || (iocb->ki_left == 0))
ret = iocb->ki_nbytes - iocb->ki_left;
+ /* If we managed to write some out we return that, rather than
+ * the eventual error. */
+ if (opcode == IOCB_CMD_PWRITEV
+ && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY
+ && iocb->ki_nbytes - iocb->ki_left)
+ ret = iocb->ki_nbytes - iocb->ki_left;
+
return ret;
}
* Simply triggers a retry of the operation via kick_iocb.
*
* This callback is specified in the wait queue entry in
- * a kiocb (current->io_wait points to this wait queue
- * entry when an aio operation executes; it is used
- * instead of a synchronous wait when an i/o blocking
- * condition is encountered during aio).
+ * a kiocb.
*
* Note:
* This routine is executed with the wait queue lock held.
return 1;
}
-int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
+int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
struct iocb *iocb)
{
struct kiocb *req;