]> err.no Git - linux-2.6/blobdiff - kernel/trace/trace_sched_switch.c
ftrace: trace faster
[linux-2.6] / kernel / trace / trace_sched_switch.c
index 2715267be4696913b4b89269b981d30766a0a754..a3376478fc2cfab0170a5d09d85c454ef39227fe 100644 (file)
@@ -17,8 +17,8 @@
 static struct trace_array      *ctx_trace;
 static int __read_mostly       tracer_enabled;
 
-static void notrace
-ctx_switch_func(struct task_struct *prev, struct task_struct *next)
+static void
+ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
 {
        struct trace_array *tr = ctx_trace;
        struct trace_array_cpu *data;
@@ -41,15 +41,44 @@ ctx_switch_func(struct task_struct *prev, struct task_struct *next)
        local_irq_restore(flags);
 }
 
-void ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next)
+static void
+wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
 {
-       tracing_record_cmdline(prev);
+       struct trace_array *tr = ctx_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       long disabled;
+       int cpu;
+
+       if (!tracer_enabled)
+               return;
+
+       tracing_record_cmdline(curr);
+
+       local_irq_save(flags);
+       cpu = raw_smp_processor_id();
+       data = tr->data[cpu];
+       disabled = atomic_inc_return(&data->disabled);
+
+       if (likely(disabled == 1))
+               tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
+
+       atomic_dec(&data->disabled);
+       local_irq_restore(flags);
+}
+
+void
+ftrace_ctx_switch(void *__rq, struct task_struct *prev,
+                 struct task_struct *next)
+{
+       if (unlikely(atomic_read(&trace_record_cmdline_enabled)))
+               tracing_record_cmdline(prev);
 
        /*
         * If tracer_switch_func only points to the local
         * switch func, it still needs the ptr passed to it.
         */
-       ctx_switch_func(prev, next);
+       ctx_switch_func(__rq, prev, next);
 
        /*
         * Chain to the wakeup tracer (this is a NOP if disabled):
@@ -57,28 +86,66 @@ void ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next)
        wakeup_sched_switch(prev, next);
 }
 
-static notrace void sched_switch_reset(struct trace_array *tr)
+void
+ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
+                   struct task_struct *curr)
+{
+       wakeup_func(__rq, wakee, curr);
+
+       /*
+        * Chain to the wakeup tracer (this is a NOP if disabled):
+        */
+       wakeup_sched_wakeup(wakee, curr);
+}
+
+void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+       struct trace_array *tr = ctx_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       long disabled;
+       int cpu;
+
+       if (!tracer_enabled)
+               return;
+
+       local_irq_save(flags);
+       cpu = raw_smp_processor_id();
+       data = tr->data[cpu];
+       disabled = atomic_inc_return(&data->disabled);
+
+       if (likely(disabled == 1))
+               __trace_special(tr, data, arg1, arg2, arg3);
+
+       atomic_dec(&data->disabled);
+       local_irq_restore(flags);
+}
+
+static void sched_switch_reset(struct trace_array *tr)
 {
        int cpu;
 
-       tr->time_start = now(tr->cpu);
+       tr->time_start = ftrace_now(tr->cpu);
 
        for_each_online_cpu(cpu)
                tracing_reset(tr->data[cpu]);
 }
 
-static notrace void start_sched_trace(struct trace_array *tr)
+static void start_sched_trace(struct trace_array *tr)
 {
        sched_switch_reset(tr);
+       atomic_inc(&trace_record_cmdline_enabled);
        tracer_enabled = 1;
 }
 
-static notrace void stop_sched_trace(struct trace_array *tr)
+static void stop_sched_trace(struct trace_array *tr)
 {
+       atomic_dec(&trace_record_cmdline_enabled);
        tracer_enabled = 0;
 }
 
-static notrace void sched_switch_trace_init(struct trace_array *tr)
+static void sched_switch_trace_init(struct trace_array *tr)
 {
        ctx_trace = tr;
 
@@ -86,7 +153,7 @@ static notrace void sched_switch_trace_init(struct trace_array *tr)
                start_sched_trace(tr);
 }
 
-static notrace void sched_switch_trace_reset(struct trace_array *tr)
+static void sched_switch_trace_reset(struct trace_array *tr)
 {
        if (tr->ctrl)
                stop_sched_trace(tr);
@@ -107,6 +174,9 @@ static struct tracer sched_switch_trace __read_mostly =
        .init           = sched_switch_trace_init,
        .reset          = sched_switch_trace_reset,
        .ctrl_update    = sched_switch_trace_ctrl_update,
+#ifdef CONFIG_FTRACE_SELFTEST
+       .selftest    = trace_selftest_startup_sched_switch,
+#endif
 };
 
 __init static int init_sched_switch_trace(void)