5 * This is the per-process anticipatory I/O scheduler state.
10 void (*dtor)(struct as_io_context *aic); /* destructor */
11 void (*exit)(struct as_io_context *aic); /* called on task exit */
14 atomic_t nr_queued; /* queued reads & sync writes */
15 atomic_t nr_dispatched; /* number of requests gone to the drivers */
17 /* IO History tracking */
19 unsigned long last_end_request;
20 unsigned long ttime_total;
21 unsigned long ttime_samples;
22 unsigned long ttime_mean;
24 unsigned int seek_samples;
25 sector_t last_request_pos;
31 struct cfq_io_context {
32 struct rb_node rb_node;
35 struct cfq_queue *cfqq[2];
37 struct io_context *ioc;
39 unsigned long last_end_request;
40 sector_t last_request_pos;
42 unsigned long ttime_total;
43 unsigned long ttime_samples;
44 unsigned long ttime_mean;
46 unsigned int seek_samples;
50 struct list_head queue_list;
52 void (*dtor)(struct io_context *); /* destructor */
53 void (*exit)(struct io_context *); /* called on task exit */
57 * This is the per-process I/O subsystem state. It is refcounted and
58 * kmalloc'ed. Currently all fields are modified in process io context
59 * (apart from the atomic refcount), so require no locking.
63 struct task_struct *task;
65 unsigned short ioprio;
66 unsigned short ioprio_changed;
69 * For request batching
71 unsigned long last_waited; /* Time last woken after wait for request */
72 int nr_batch_requests; /* Number of requests left in the batch */
74 struct as_io_context *aic;
75 struct rb_root cic_root;