2 * Deadline i/o scheduler.
4 * Copyright (C) 2002 Jens Axboe <axboe@suse.de>
6 #include <linux/kernel.h>
8 #include <linux/blkdev.h>
9 #include <linux/elevator.h>
10 #include <linux/bio.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/compiler.h>
15 #include <linux/rbtree.h>
18 * See Documentation/block/deadline-iosched.txt
20 static const int read_expire = HZ / 2; /* max time before a read is submitted. */
21 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
22 static const int writes_starved = 2; /* max times reads can starve a write */
23 static const int fifo_batch = 16; /* # of sequential requests treated as one
24 by the above parameters. For throughput. */
26 struct deadline_data {
32 * requests (deadline_rq s) are present on both sort_list and fifo_list
34 struct rb_root sort_list[2];
35 struct list_head fifo_list[2];
38 * next in sort order. read, write or both are NULL
40 struct deadline_rq *next_drq[2];
41 unsigned int batching; /* number of sequential requests made */
42 sector_t last_sector; /* head position */
43 unsigned int starved; /* times reads have starved writes */
46 * settings that change how the i/o scheduler behaves
60 struct request *request;
65 struct list_head fifo;
66 unsigned long expires;
69 static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq);
71 static kmem_cache_t *drq_pool;
73 #define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private)
75 #define RQ_RB_ROOT(dd, rq) (&(dd)->sort_list[rq_data_dir((rq))])
76 #define DRQ_RB_ROOT(dd, drq) RQ_RB_ROOT((drq)->request)
79 deadline_add_drq_rb(struct deadline_data *dd, struct request *rq)
81 struct rb_root *root = RQ_RB_ROOT(dd, rq);
82 struct request *__alias;
85 __alias = elv_rb_add(root, rq);
86 if (unlikely(__alias)) {
87 deadline_move_request(dd, RQ_DATA(__alias));
93 deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
95 struct request *rq = drq->request;
96 const int data_dir = rq_data_dir(rq);
98 if (dd->next_drq[data_dir] == drq) {
99 struct rb_node *rbnext = rb_next(&rq->rb_node);
101 dd->next_drq[data_dir] = NULL;
103 dd->next_drq[data_dir] = RQ_DATA(rb_entry_rq(rbnext));
106 elv_rb_del(RQ_RB_ROOT(dd, rq), rq);
110 * add drq to rbtree and fifo
113 deadline_add_request(struct request_queue *q, struct request *rq)
115 struct deadline_data *dd = q->elevator->elevator_data;
116 struct deadline_rq *drq = RQ_DATA(rq);
117 const int data_dir = rq_data_dir(drq->request);
119 deadline_add_drq_rb(dd, rq);
122 * set expire time (only used for reads) and add to fifo list
124 drq->expires = jiffies + dd->fifo_expire[data_dir];
125 list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
129 * remove rq from rbtree and fifo.
131 static void deadline_remove_request(request_queue_t *q, struct request *rq)
133 struct deadline_rq *drq = RQ_DATA(rq);
134 struct deadline_data *dd = q->elevator->elevator_data;
136 list_del_init(&drq->fifo);
137 deadline_del_drq_rb(dd, drq);
141 deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
143 struct deadline_data *dd = q->elevator->elevator_data;
144 struct request *__rq;
148 * check for front merge
150 if (dd->front_merges) {
151 sector_t sector = bio->bi_sector + bio_sectors(bio);
153 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
155 BUG_ON(sector != __rq->sector);
157 if (elv_rq_merge_ok(__rq, bio)) {
158 ret = ELEVATOR_FRONT_MERGE;
164 return ELEVATOR_NO_MERGE;
170 static void deadline_merged_request(request_queue_t *q, struct request *req,
173 struct deadline_data *dd = q->elevator->elevator_data;
176 * if the merge was a front merge, we need to reposition request
178 if (type == ELEVATOR_FRONT_MERGE) {
179 elv_rb_del(RQ_RB_ROOT(dd, req), req);
180 deadline_add_drq_rb(dd, req);
185 deadline_merged_requests(request_queue_t *q, struct request *req,
186 struct request *next)
188 struct deadline_rq *drq = RQ_DATA(req);
189 struct deadline_rq *dnext = RQ_DATA(next);
195 * if dnext expires before drq, assign its expire time to drq
196 * and move into dnext position (dnext will be deleted) in fifo
198 if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) {
199 if (time_before(dnext->expires, drq->expires)) {
200 list_move(&drq->fifo, &dnext->fifo);
201 drq->expires = dnext->expires;
206 * kill knowledge of next, this one is a goner
208 deadline_remove_request(q, next);
212 * move request from sort list to dispatch queue.
215 deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq)
217 request_queue_t *q = drq->request->q;
219 deadline_remove_request(q, drq->request);
220 elv_dispatch_add_tail(q, drq->request);
224 * move an entry to dispatch queue
227 deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq)
229 struct request *rq = drq->request;
230 const int data_dir = rq_data_dir(rq);
231 struct rb_node *rbnext = rb_next(&rq->rb_node);
233 dd->next_drq[READ] = NULL;
234 dd->next_drq[WRITE] = NULL;
237 dd->next_drq[data_dir] = RQ_DATA(rb_entry_rq(rbnext));
239 dd->last_sector = drq->request->sector + drq->request->nr_sectors;
242 * take it off the sort and fifo list, move
245 deadline_move_to_dispatch(dd, drq);
248 #define list_entry_fifo(ptr) list_entry((ptr), struct deadline_rq, fifo)
251 * deadline_check_fifo returns 0 if there are no expired reads on the fifo,
252 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
254 static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
256 struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next);
261 if (time_after(jiffies, drq->expires))
268 * deadline_dispatch_requests selects the best request according to
269 * read/write expire, fifo_batch, etc
271 static int deadline_dispatch_requests(request_queue_t *q, int force)
273 struct deadline_data *dd = q->elevator->elevator_data;
274 const int reads = !list_empty(&dd->fifo_list[READ]);
275 const int writes = !list_empty(&dd->fifo_list[WRITE]);
276 struct deadline_rq *drq;
280 * batches are currently reads XOR writes
282 if (dd->next_drq[WRITE])
283 drq = dd->next_drq[WRITE];
285 drq = dd->next_drq[READ];
288 /* we have a "next request" */
290 if (dd->last_sector != drq->request->sector)
291 /* end the batch on a non sequential request */
292 dd->batching += dd->fifo_batch;
294 if (dd->batching < dd->fifo_batch)
295 /* we are still entitled to batch */
296 goto dispatch_request;
300 * at this point we are not running a batch. select the appropriate
301 * data direction (read / write)
305 BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
307 if (writes && (dd->starved++ >= dd->writes_starved))
308 goto dispatch_writes;
312 goto dispatch_find_request;
316 * there are either no reads or writes have been starved
321 BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
327 goto dispatch_find_request;
332 dispatch_find_request:
334 * we are not running a batch, find best request for selected data_dir
336 if (deadline_check_fifo(dd, data_dir)) {
337 /* An expired request exists - satisfy it */
339 drq = list_entry_fifo(dd->fifo_list[data_dir].next);
341 } else if (dd->next_drq[data_dir]) {
343 * The last req was the same dir and we have a next request in
344 * sort order. No expired requests so continue on from here.
346 drq = dd->next_drq[data_dir];
351 * The last req was the other direction or we have run out of
352 * higher-sectored requests. Go back to the lowest sectored
353 * request (1 way elevator) and start a new batch.
356 n = rb_first(&dd->sort_list[data_dir]);
358 drq = RQ_DATA(rb_entry_rq(n));
363 * drq is the selected appropriate request.
366 deadline_move_request(dd, drq);
371 static int deadline_queue_empty(request_queue_t *q)
373 struct deadline_data *dd = q->elevator->elevator_data;
375 return list_empty(&dd->fifo_list[WRITE])
376 && list_empty(&dd->fifo_list[READ]);
379 static void deadline_exit_queue(elevator_t *e)
381 struct deadline_data *dd = e->elevator_data;
383 BUG_ON(!list_empty(&dd->fifo_list[READ]));
384 BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
386 mempool_destroy(dd->drq_pool);
391 * initialize elevator private data (deadline_data), and alloc a drq for
392 * each request on the free lists
394 static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
396 struct deadline_data *dd;
401 dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
404 memset(dd, 0, sizeof(*dd));
406 dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
407 mempool_free_slab, drq_pool, q->node);
413 INIT_LIST_HEAD(&dd->fifo_list[READ]);
414 INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
415 dd->sort_list[READ] = RB_ROOT;
416 dd->sort_list[WRITE] = RB_ROOT;
417 dd->fifo_expire[READ] = read_expire;
418 dd->fifo_expire[WRITE] = write_expire;
419 dd->writes_starved = writes_starved;
420 dd->front_merges = 1;
421 dd->fifo_batch = fifo_batch;
425 static void deadline_put_request(request_queue_t *q, struct request *rq)
427 struct deadline_data *dd = q->elevator->elevator_data;
428 struct deadline_rq *drq = RQ_DATA(rq);
430 mempool_free(drq, dd->drq_pool);
431 rq->elevator_private = NULL;
435 deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
438 struct deadline_data *dd = q->elevator->elevator_data;
439 struct deadline_rq *drq;
441 drq = mempool_alloc(dd->drq_pool, gfp_mask);
443 memset(drq, 0, sizeof(*drq));
446 INIT_LIST_HEAD(&drq->fifo);
448 rq->elevator_private = drq;
460 deadline_var_show(int var, char *page)
462 return sprintf(page, "%d\n", var);
466 deadline_var_store(int *var, const char *page, size_t count)
468 char *p = (char *) page;
470 *var = simple_strtol(p, &p, 10);
474 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
475 static ssize_t __FUNC(elevator_t *e, char *page) \
477 struct deadline_data *dd = e->elevator_data; \
478 int __data = __VAR; \
480 __data = jiffies_to_msecs(__data); \
481 return deadline_var_show(__data, (page)); \
483 SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
484 SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
485 SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
486 SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
487 SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
490 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
491 static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
493 struct deadline_data *dd = e->elevator_data; \
495 int ret = deadline_var_store(&__data, (page), count); \
496 if (__data < (MIN)) \
498 else if (__data > (MAX)) \
501 *(__PTR) = msecs_to_jiffies(__data); \
506 STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
507 STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
508 STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
509 STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
510 STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
511 #undef STORE_FUNCTION
513 #define DD_ATTR(name) \
514 __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \
515 deadline_##name##_store)
517 static struct elv_fs_entry deadline_attrs[] = {
518 DD_ATTR(read_expire),
519 DD_ATTR(write_expire),
520 DD_ATTR(writes_starved),
521 DD_ATTR(front_merges),
526 static struct elevator_type iosched_deadline = {
528 .elevator_merge_fn = deadline_merge,
529 .elevator_merged_fn = deadline_merged_request,
530 .elevator_merge_req_fn = deadline_merged_requests,
531 .elevator_dispatch_fn = deadline_dispatch_requests,
532 .elevator_add_req_fn = deadline_add_request,
533 .elevator_queue_empty_fn = deadline_queue_empty,
534 .elevator_former_req_fn = elv_rb_former_request,
535 .elevator_latter_req_fn = elv_rb_latter_request,
536 .elevator_set_req_fn = deadline_set_request,
537 .elevator_put_req_fn = deadline_put_request,
538 .elevator_init_fn = deadline_init_queue,
539 .elevator_exit_fn = deadline_exit_queue,
542 .elevator_attrs = deadline_attrs,
543 .elevator_name = "deadline",
544 .elevator_owner = THIS_MODULE,
547 static int __init deadline_init(void)
551 drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq),
557 ret = elv_register(&iosched_deadline);
559 kmem_cache_destroy(drq_pool);
564 static void __exit deadline_exit(void)
566 kmem_cache_destroy(drq_pool);
567 elv_unregister(&iosched_deadline);
570 module_init(deadline_init);
571 module_exit(deadline_exit);
573 MODULE_AUTHOR("Jens Axboe");
574 MODULE_LICENSE("GPL");
575 MODULE_DESCRIPTION("deadline IO scheduler");