]> err.no Git - linux-2.6/blob - kernel/trace/trace_sched_switch.c
ftrace: sched special
[linux-2.6] / kernel / trace / trace_sched_switch.c
1 /*
2  * trace context switch
3  *
4  * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7 #include <linux/module.h>
8 #include <linux/fs.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/marker.h>
13 #include <linux/ftrace.h>
14
15 #include "trace.h"
16
17 static struct trace_array       *ctx_trace;
18 static int __read_mostly        tracer_enabled;
19
20 static void
21 ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
22 {
23         struct trace_array *tr = ctx_trace;
24         struct trace_array_cpu *data;
25         unsigned long flags;
26         long disabled;
27         int cpu;
28
29         if (!tracer_enabled)
30                 return;
31
32         tracing_record_cmdline(prev);
33
34         local_irq_save(flags);
35         cpu = raw_smp_processor_id();
36         data = tr->data[cpu];
37         disabled = atomic_inc_return(&data->disabled);
38
39         if (likely(disabled == 1)) {
40                 tracing_sched_switch_trace(tr, data, prev, next, flags);
41                 if (trace_flags & TRACE_ITER_SCHED_TREE)
42                         ftrace_all_fair_tasks(__rq, tr, data);
43         }
44
45         atomic_dec(&data->disabled);
46         local_irq_restore(flags);
47 }
48
49 static void
50 wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
51 {
52         struct trace_array *tr = ctx_trace;
53         struct trace_array_cpu *data;
54         unsigned long flags;
55         long disabled;
56         int cpu;
57
58         if (!tracer_enabled)
59                 return;
60
61         tracing_record_cmdline(curr);
62
63         local_irq_save(flags);
64         cpu = raw_smp_processor_id();
65         data = tr->data[cpu];
66         disabled = atomic_inc_return(&data->disabled);
67
68         if (likely(disabled == 1)) {
69                 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
70                 if (trace_flags & TRACE_ITER_SCHED_TREE)
71                         ftrace_all_fair_tasks(__rq, tr, data);
72         }
73
74         atomic_dec(&data->disabled);
75         local_irq_restore(flags);
76 }
77
78 void
79 ftrace_ctx_switch(void *__rq, struct task_struct *prev,
80                   struct task_struct *next)
81 {
82         /*
83          * If tracer_switch_func only points to the local
84          * switch func, it still needs the ptr passed to it.
85          */
86         ctx_switch_func(__rq, prev, next);
87
88         /*
89          * Chain to the wakeup tracer (this is a NOP if disabled):
90          */
91         wakeup_sched_switch(prev, next);
92 }
93
94 void
95 ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
96                     struct task_struct *curr)
97 {
98         wakeup_func(__rq, wakee, curr);
99
100         /*
101          * Chain to the wakeup tracer (this is a NOP if disabled):
102          */
103         wakeup_sched_wakeup(wakee, curr);
104 }
105
106 void
107 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
108 {
109         struct trace_array *tr = ctx_trace;
110         struct trace_array_cpu *data;
111         unsigned long flags;
112         long disabled;
113         int cpu;
114
115         if (!tracer_enabled)
116                 return;
117
118         local_irq_save(flags);
119         cpu = raw_smp_processor_id();
120         data = tr->data[cpu];
121         disabled = atomic_inc_return(&data->disabled);
122
123         if (likely(disabled == 1))
124                 __trace_special(tr, data, arg1, arg2, arg3);
125
126         atomic_dec(&data->disabled);
127         local_irq_restore(flags);
128 }
129
130 static void sched_switch_reset(struct trace_array *tr)
131 {
132         int cpu;
133
134         tr->time_start = ftrace_now(tr->cpu);
135
136         for_each_online_cpu(cpu)
137                 tracing_reset(tr->data[cpu]);
138 }
139
140 static void start_sched_trace(struct trace_array *tr)
141 {
142         sched_switch_reset(tr);
143         tracer_enabled = 1;
144 }
145
146 static void stop_sched_trace(struct trace_array *tr)
147 {
148         tracer_enabled = 0;
149 }
150
151 static void sched_switch_trace_init(struct trace_array *tr)
152 {
153         ctx_trace = tr;
154
155         if (tr->ctrl)
156                 start_sched_trace(tr);
157 }
158
159 static void sched_switch_trace_reset(struct trace_array *tr)
160 {
161         if (tr->ctrl)
162                 stop_sched_trace(tr);
163 }
164
165 static void sched_switch_trace_ctrl_update(struct trace_array *tr)
166 {
167         /* When starting a new trace, reset the buffers */
168         if (tr->ctrl)
169                 start_sched_trace(tr);
170         else
171                 stop_sched_trace(tr);
172 }
173
174 static struct tracer sched_switch_trace __read_mostly =
175 {
176         .name           = "sched_switch",
177         .init           = sched_switch_trace_init,
178         .reset          = sched_switch_trace_reset,
179         .ctrl_update    = sched_switch_trace_ctrl_update,
180 #ifdef CONFIG_FTRACE_SELFTEST
181         .selftest    = trace_selftest_startup_sched_switch,
182 #endif
183 };
184
185 __init static int init_sched_switch_trace(void)
186 {
187         return register_tracer(&sched_switch_trace);
188 }
189 device_initcall(init_sched_switch_trace);