2 #ifdef CONFIG_SCHEDSTATS
4 * bump this up when changing the output format or the meaning of an existing
5 * format, so that tools can adapt (or abort)
7 #define SCHEDSTAT_VERSION 14
9 static int show_schedstat(struct seq_file *seq, void *v)
12 int mask_len = NR_CPUS/32 * 9;
13 char *mask_str = kmalloc(mask_len, GFP_KERNEL);
18 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
19 seq_printf(seq, "timestamp %lu\n", jiffies);
20 for_each_online_cpu(cpu) {
21 struct rq *rq = cpu_rq(cpu);
23 struct sched_domain *sd;
27 /* runqueue-specific stats */
29 "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu",
30 cpu, rq->yld_both_empty,
31 rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
32 rq->sched_switch, rq->sched_count, rq->sched_goidle,
33 rq->ttwu_count, rq->ttwu_local,
34 rq->rq_sched_info.cpu_time,
35 rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
37 seq_printf(seq, "\n");
40 /* domain-specific stats */
42 for_each_domain(cpu, sd) {
43 enum cpu_idle_type itype;
45 cpumask_scnprintf(mask_str, mask_len, sd->span);
46 seq_printf(seq, "domain%d %s", dcount++, mask_str);
47 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
49 seq_printf(seq, " %u %u %u %u %u %u %u %u",
51 sd->lb_balanced[itype],
53 sd->lb_imbalance[itype],
55 sd->lb_hot_gained[itype],
56 sd->lb_nobusyq[itype],
57 sd->lb_nobusyg[itype]);
60 " %u %u %u %u %u %u %u %u %u %u %u %u\n",
61 sd->alb_count, sd->alb_failed, sd->alb_pushed,
62 sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
63 sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
64 sd->ttwu_wake_remote, sd->ttwu_move_affine,
65 sd->ttwu_move_balance);
74 static int schedstat_open(struct inode *inode, struct file *file)
76 unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
77 char *buf = kmalloc(size, GFP_KERNEL);
83 res = single_open(file, show_schedstat, NULL);
85 m = file->private_data;
93 const struct file_operations proc_schedstat_operations = {
94 .open = schedstat_open,
97 .release = single_release,
101 * Expects runqueue lock to be held for atomicity of update
104 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
107 rq->rq_sched_info.run_delay += delta;
108 rq->rq_sched_info.pcount++;
113 * Expects runqueue lock to be held for atomicity of update
116 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
119 rq->rq_sched_info.cpu_time += delta;
121 # define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
122 # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
123 # define schedstat_set(var, val) do { var = (val); } while (0)
124 #else /* !CONFIG_SCHEDSTATS */
126 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
129 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
131 # define schedstat_inc(rq, field) do { } while (0)
132 # define schedstat_add(rq, field, amt) do { } while (0)
133 # define schedstat_set(var, val) do { } while (0)
136 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
138 * Called when a process is dequeued from the active array and given
139 * the cpu. We should note that with the exception of interactive
140 * tasks, the expired queue will become the active queue after the active
141 * queue is empty, without explicitly dequeuing and requeuing tasks in the
142 * expired queue. (Interactive tasks may be requeued directly to the
143 * active queue, thus delaying tasks in the expired queue from running;
144 * see scheduler_tick()).
146 * This function is only called from sched_info_arrive(), rather than
147 * dequeue_task(). Even though a task may be queued and dequeued multiple
148 * times as it is shuffled about, we're really interested in knowing how
149 * long it was from the *first* time it was queued to the time that it
152 static inline void sched_info_dequeued(struct task_struct *t)
154 t->sched_info.last_queued = 0;
158 * Called when a task finally hits the cpu. We can now calculate how
159 * long it was waiting to run. We also note when it began so that we
160 * can keep stats on how long its timeslice is.
162 static void sched_info_arrive(struct task_struct *t)
164 unsigned long long now = task_rq(t)->clock, delta = 0;
166 if (t->sched_info.last_queued)
167 delta = now - t->sched_info.last_queued;
168 sched_info_dequeued(t);
169 t->sched_info.run_delay += delta;
170 t->sched_info.last_arrival = now;
171 t->sched_info.pcount++;
173 rq_sched_info_arrive(task_rq(t), delta);
177 * Called when a process is queued into either the active or expired
178 * array. The time is noted and later used to determine how long we
179 * had to wait for us to reach the cpu. Since the expired queue will
180 * become the active queue after active queue is empty, without dequeuing
181 * and requeuing any tasks, we are interested in queuing to either. It
182 * is unusual but not impossible for tasks to be dequeued and immediately
183 * requeued in the same or another array: this can happen in sched_yield(),
184 * set_user_nice(), and even load_balance() as it moves tasks from runqueue
187 * This function is only called from enqueue_task(), but also only updates
188 * the timestamp if it is already not set. It's assumed that
189 * sched_info_dequeued() will clear that stamp when appropriate.
191 static inline void sched_info_queued(struct task_struct *t)
193 if (unlikely(sched_info_on()))
194 if (!t->sched_info.last_queued)
195 t->sched_info.last_queued = task_rq(t)->clock;
199 * Called when a process ceases being the active-running process, either
200 * voluntarily or involuntarily. Now we can calculate how long we ran.
201 * Also, if the process is still in the TASK_RUNNING state, call
202 * sched_info_queued() to mark that it has now again started waiting on
205 static inline void sched_info_depart(struct task_struct *t)
207 unsigned long long delta = task_rq(t)->clock -
208 t->sched_info.last_arrival;
210 t->sched_info.cpu_time += delta;
211 rq_sched_info_depart(task_rq(t), delta);
213 if (t->state == TASK_RUNNING)
214 sched_info_queued(t);
218 * Called when tasks are switched involuntarily due, typically, to expiring
219 * their time slice. (This may also be called when switching to or from
220 * the idle task.) We are only called when prev != next.
223 __sched_info_switch(struct task_struct *prev, struct task_struct *next)
225 struct rq *rq = task_rq(prev);
228 * prev now departs the cpu. It's not interesting to record
229 * stats about how efficient we were at scheduling the idle
232 if (prev != rq->idle)
233 sched_info_depart(prev);
235 if (next != rq->idle)
236 sched_info_arrive(next);
239 sched_info_switch(struct task_struct *prev, struct task_struct *next)
241 if (unlikely(sched_info_on()))
242 __sched_info_switch(prev, next);
245 #define sched_info_queued(t) do { } while (0)
246 #define sched_info_switch(t, next) do { } while (0)
247 #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */