X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=fs%2Faio.c;h=4f641abac3c0927f2036f81f3f70106a57a09e22;hb=580b2e3c0183818adf6151e60270405b02ea8504;hp=9f807a541fbeb0fd0811e331e1088d4bd752860d;hpb=212079cf4ee99e492a57b817e796825d423a30bb;p=linux-2.6 diff --git a/fs/aio.c b/fs/aio.c index 9f807a541f..4f641abac3 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -40,9 +40,6 @@ #define dprintk(x...) do { ; } while (0) #endif -static long aio_run = 0; /* for testing only */ -static long aio_wakeups = 0; /* for testing only */ - /*------ sysctl variables----*/ atomic_t aio_nr = ATOMIC_INIT(0); /* current system wide number of aio requests */ unsigned aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ @@ -61,6 +58,7 @@ static DEFINE_SPINLOCK(fput_lock); static LIST_HEAD(fput_head); static void aio_kick_handler(void *); +static void aio_queue_work(struct kioctx *); /* aio_setup * Creates the slab caches used by the aio routines, panic on @@ -569,6 +567,10 @@ static void use_mm(struct mm_struct *mm) atomic_inc(&mm->mm_count); tsk->mm = mm; tsk->active_mm = mm; + /* + * Note that on UML this *requires* PF_BORROWED_MM to be set, otherwise + * it won't work. Update it accordingly if you change it here + */ activate_mm(active_mm, mm); task_unlock(tsk); @@ -617,7 +619,6 @@ static inline int __queue_kicked_iocb(struct kiocb *iocb) if (list_empty(&iocb->ki_run_list)) { list_add_tail(&iocb->ki_run_list, &ctx->run_list); - iocb->ki_queued++; return 1; } return 0; @@ -658,10 +659,8 @@ static ssize_t aio_run_iocb(struct kiocb *iocb) } if (!(iocb->ki_retried & 0xff)) { - pr_debug("%ld retry: %d of %d (kick %ld, Q %ld run %ld, wake %ld)\n", - iocb->ki_retried, - iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes, - iocb->ki_kicked, iocb->ki_queued, aio_run, aio_wakeups); + pr_debug("%ld retry: %d of %d\n", iocb->ki_retried, + iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes); } if (!(retry = iocb->ki_retry)) { @@ -753,6 +752,14 @@ out: * has already been kicked */ if (kiocbIsKicked(iocb)) { __queue_kicked_iocb(iocb); + + /* + * __queue_kicked_iocb will always return 1 here, because + * iocb->ki_run_list is empty at this point so it should + * be safe to unconditionally queue the context into the + * work queue. + */ + aio_queue_work(ctx); } } return ret; @@ -768,7 +775,6 @@ out: static int __aio_run_iocbs(struct kioctx *ctx) { struct kiocb *iocb; - int count = 0; LIST_HEAD(run_list); list_splice_init(&ctx->run_list, &run_list); @@ -783,9 +789,7 @@ static int __aio_run_iocbs(struct kioctx *ctx) aio_run_iocb(iocb); if (__aio_put_req(ctx, iocb)) /* drop extra ref */ put_ioctx(ctx); - count++; } - aio_run++; if (!list_empty(&ctx->run_list)) return 1; return 0; @@ -884,10 +888,8 @@ static void queue_kicked_iocb(struct kiocb *iocb) spin_lock_irqsave(&ctx->ctx_lock, flags); run = __queue_kicked_iocb(iocb); spin_unlock_irqrestore(&ctx->ctx_lock, flags); - if (run) { + if (run) aio_queue_work(ctx); - aio_wakeups++; - } } /* @@ -907,7 +909,6 @@ void fastcall kick_iocb(struct kiocb *iocb) return; } - iocb->ki_kicked++; /* If its already kicked we shouldn't queue it again */ if (!kiocbTryKick(iocb)) { queue_kicked_iocb(iocb); @@ -978,7 +979,8 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2) tail = info->tail; event = aio_ring_event(info, tail, KM_IRQ0); - tail = (tail + 1) % info->nr; + if (++tail >= info->nr) + tail = 0; event->obj = (u64)(unsigned long)iocb->ki_obj.user; event->data = iocb->ki_user_data; @@ -1002,10 +1004,8 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2) pr_debug("added to ring %p at [%lu]\n", iocb, tail); - pr_debug("%ld retries: %d of %d (kicked %ld, Q %ld run %ld wake %ld)\n", - iocb->ki_retried, - iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes, - iocb->ki_kicked, iocb->ki_queued, aio_run, aio_wakeups); + pr_debug("%ld retries: %d of %d\n", iocb->ki_retried, + iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes); put_rq: /* everything turned out well, dispose of the aiocb. */ ret = __aio_put_req(ctx, iocb); @@ -1113,7 +1113,6 @@ static int read_events(struct kioctx *ctx, int i = 0; struct io_event ent; struct aio_timeout to; - int event_loop = 0; /* testing only */ int retry = 0; /* needed to zero any padding within an entry (there shouldn't be @@ -1180,7 +1179,6 @@ retry: if (to.timed_out) /* Only check after read evt */ break; schedule(); - event_loop++; if (signal_pending(tsk)) { ret = -EINTR; break; @@ -1208,9 +1206,6 @@ retry: if (timeout) clear_timeout(&to); out: - pr_debug("event loop executed %d times\n", event_loop); - pr_debug("aio_run %ld\n", aio_run); - pr_debug("aio_wakeups %ld\n", aio_wakeups); return i ? i : ret; } @@ -1525,10 +1520,6 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, init_waitqueue_func_entry(&req->ki_wait, aio_wake_function); INIT_LIST_HEAD(&req->ki_wait.task_list); req->ki_retried = 0; - req->ki_kicked = 0; - req->ki_queued = 0; - aio_run = 0; - aio_wakeups = 0; ret = aio_setup_iocb(req); @@ -1536,10 +1527,14 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, goto out_put_req; spin_lock_irq(&ctx->ctx_lock); - list_add_tail(&req->ki_run_list, &ctx->run_list); - /* drain the run list */ - while (__aio_run_iocbs(ctx)) - ; + if (likely(list_empty(&ctx->run_list))) { + aio_run_iocb(req); + } else { + list_add_tail(&req->ki_run_list, &ctx->run_list); + /* drain the run list */ + while (__aio_run_iocbs(ctx)) + ; + } spin_unlock_irq(&ctx->ctx_lock); aio_put_req(req); /* drop extra ref to req */ return 0;