]> err.no Git - linux-2.6/commitdiff
ftrace: sched tracer, trace full rbtree
authorIngo Molnar <mingo@elte.hu>
Mon, 12 May 2008 19:20:52 +0000 (21:20 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 23 May 2008 19:04:44 +0000 (21:04 +0200)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/sched.h
kernel/sched.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_sched_switch.c

index 05744f9cb0961b8df1221bc51da1703883eb0f64..652d380ae5631b363d5868c609ea7098fe6798f4 100644 (file)
@@ -2119,20 +2119,34 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm)
 
 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
 extern void
-ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next);
+ftrace_ctx_switch(void *rq, struct task_struct *prev, struct task_struct *next);
+extern void
+ftrace_wake_up_task(void *rq, struct task_struct *wakee,
+                   struct task_struct *curr);
+extern void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data);
+extern void
+__trace_special(void *__tr, void *__data,
+               unsigned long arg1, unsigned long arg2, unsigned long arg3);
 #else
 static inline void
-ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next)
+ftrace_ctx_switch(void *rq, struct task_struct *prev, struct task_struct *next)
+{
+}
+static inline void
+sched_trace_special(unsigned long p1, unsigned long p2, unsigned long p3)
+{
+}
+static inline void
+ftrace_wake_up_task(void *rq, struct task_struct *wakee,
+                   struct task_struct *curr)
+{
+}
+static inline void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data)
 {
 }
-#endif
-
-#ifdef CONFIG_SCHED_TRACER
-extern void
-ftrace_wake_up_task(struct task_struct *wakee, struct task_struct *curr);
-#else
 static inline void
-ftrace_wake_up_task(struct task_struct *wakee, struct task_struct *curr)
+__trace_special(void *__tr, void *__data,
+               unsigned long arg1, unsigned long arg2, unsigned long arg3)
 {
 }
 #endif
index 53ab1174664fc53e8ea2719ef8b0d346b1af94be..b9208a0e33a041e39c23f97dd394eca145a08af1 100644 (file)
@@ -2394,6 +2394,35 @@ static int sched_balance_self(int cpu, int flag)
 
 #endif /* CONFIG_SMP */
 
+#ifdef CONFIG_CONTEXT_SWITCH_TRACER
+
+void ftrace_all_fair_tasks(void *__rq, void *__tr, void *__data)
+{
+       struct sched_entity *se;
+       struct task_struct *p;
+       struct rb_node *curr;
+       struct rq *rq = __rq;
+
+       curr = first_fair(&rq->cfs);
+       if (!curr)
+               return;
+
+       while (curr) {
+               se = rb_entry(curr, struct sched_entity, run_node);
+               if (!entity_is_task(se))
+                       continue;
+
+               p = task_of(se);
+
+               __trace_special(__tr, __data,
+                             p->pid, p->se.vruntime, p->se.sum_exec_runtime);
+
+               curr = rb_next(curr);
+       }
+}
+
+#endif
+
 /***
  * try_to_wake_up - wake up a thread
  * @p: the to-be-woken-up thread
@@ -2468,7 +2497,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
 
 out_activate:
 #endif /* CONFIG_SMP */
-       ftrace_wake_up_task(p, rq->curr);
+       ftrace_wake_up_task(rq, p, rq->curr);
        schedstat_inc(p, se.nr_wakeups);
        if (sync)
                schedstat_inc(p, se.nr_wakeups_sync);
@@ -2613,7 +2642,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
                p->sched_class->task_new(rq, p);
                inc_nr_running(rq);
        }
-       ftrace_wake_up_task(p, rq->curr);
+       ftrace_wake_up_task(rq, p, rq->curr);
        check_preempt_curr(rq, p);
 #ifdef CONFIG_SMP
        if (p->sched_class->task_wake_up)
@@ -2786,7 +2815,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
        struct mm_struct *mm, *oldmm;
 
        prepare_task_switch(rq, prev, next);
-       ftrace_ctx_switch(prev, next);
+       ftrace_ctx_switch(rq, prev, next);
        mm = next->mm;
        oldmm = prev->active_mm;
        /*
index 0e4b7119e263e094b55c0ffbc36d05ada915e8d7..65173b14b914502e6c03ca086bbc86b3b7d07275 100644 (file)
@@ -66,7 +66,18 @@ static struct tracer         *current_trace __read_mostly;
 static int                     max_tracer_type_len;
 
 static DEFINE_MUTEX(trace_types_lock);
-static DECLARE_WAIT_QUEUE_HEAD (trace_wait);
+static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
+
+unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
+
+/*
+ * FIXME: where should this be called?
+ */
+void trace_wake_up(void)
+{
+       if (!(trace_flags & TRACE_ITER_BLOCK))
+               wake_up(&trace_wait);
+}
 
 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
 
@@ -103,18 +114,6 @@ enum trace_flag_type {
        TRACE_FLAG_SOFTIRQ              = 0x08,
 };
 
-enum trace_iterator_flags {
-       TRACE_ITER_PRINT_PARENT         = 0x01,
-       TRACE_ITER_SYM_OFFSET           = 0x02,
-       TRACE_ITER_SYM_ADDR             = 0x04,
-       TRACE_ITER_VERBOSE              = 0x08,
-       TRACE_ITER_RAW                  = 0x10,
-       TRACE_ITER_HEX                  = 0x20,
-       TRACE_ITER_BIN                  = 0x40,
-       TRACE_ITER_BLOCK                = 0x80,
-       TRACE_ITER_STACKTRACE           = 0x100,
-};
-
 #define TRACE_ITER_SYM_MASK \
        (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
 
@@ -132,8 +131,6 @@ static const char *trace_options[] = {
        NULL
 };
 
-static unsigned trace_flags = TRACE_ITER_PRINT_PARENT;
-
 static DEFINE_SPINLOCK(ftrace_max_lock);
 
 /*
@@ -660,9 +657,6 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
        entry->fn.ip            = ip;
        entry->fn.parent_ip     = parent_ip;
        spin_unlock_irqrestore(&data->lock, irq_flags);
-
-       if (!(trace_flags & TRACE_ITER_BLOCK))
-               wake_up(&trace_wait);
 }
 
 void
@@ -673,10 +667,14 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
                trace_function(tr, data, ip, parent_ip, flags);
 }
 
+#ifdef CONFIG_CONTEXT_SWITCH_TRACER
+
 void
-trace_special(struct trace_array *tr, struct trace_array_cpu *data,
-             unsigned long arg1, unsigned long arg2, unsigned long arg3)
+__trace_special(void *__tr, void *__data,
+               unsigned long arg1, unsigned long arg2, unsigned long arg3)
 {
+       struct trace_array_cpu *data = __data;
+       struct trace_array *tr = __tr;
        struct trace_entry *entry;
        unsigned long irq_flags;
 
@@ -688,11 +686,10 @@ trace_special(struct trace_array *tr, struct trace_array_cpu *data,
        entry->special.arg2     = arg2;
        entry->special.arg3     = arg3;
        spin_unlock_irqrestore(&data->lock, irq_flags);
-
-       if (!(trace_flags & TRACE_ITER_BLOCK))
-               wake_up(&trace_wait);
 }
 
+#endif
+
 void __trace_stack(struct trace_array *tr,
                   struct trace_array_cpu *data,
                   unsigned long flags,
@@ -739,9 +736,6 @@ tracing_sched_switch_trace(struct trace_array *tr,
        entry->ctx.next_prio    = next->prio;
        __trace_stack(tr, data, flags, 4);
        spin_unlock_irqrestore(&data->lock, irq_flags);
-
-       if (!(trace_flags & TRACE_ITER_BLOCK))
-               wake_up(&trace_wait);
 }
 
 void
@@ -765,9 +759,6 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
        entry->ctx.next_prio    = wakee->prio;
        __trace_stack(tr, data, flags, 5);
        spin_unlock_irqrestore(&data->lock, irq_flags);
-
-       if (!(trace_flags & TRACE_ITER_BLOCK))
-               wake_up(&trace_wait);
 }
 
 #ifdef CONFIG_FTRACE
@@ -1258,7 +1249,7 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
                                 comm);
                break;
        case TRACE_SPECIAL:
-               trace_seq_printf(s, " %lx %lx %lx\n",
+               trace_seq_printf(s, " %ld %ld %ld\n",
                                 entry->special.arg1,
                                 entry->special.arg2,
                                 entry->special.arg3);
@@ -1344,7 +1335,7 @@ static int print_trace_fmt(struct trace_iterator *iter)
                        return 0;
                break;
        case TRACE_SPECIAL:
-               ret = trace_seq_printf(s, " %lx %lx %lx\n",
+               ret = trace_seq_printf(s, " %ld %ld %ld\n",
                                 entry->special.arg1,
                                 entry->special.arg2,
                                 entry->special.arg3);
@@ -1409,7 +1400,7 @@ static int print_raw_fmt(struct trace_iterator *iter)
                break;
        case TRACE_SPECIAL:
        case TRACE_STACK:
-               ret = trace_seq_printf(s, " %lx %lx %lx\n",
+               ret = trace_seq_printf(s, " %ld %ld %ld\n",
                                 entry->special.arg1,
                                 entry->special.arg2,
                                 entry->special.arg3);
index 387bdcf45e28a4c1f1393e2d1d52f26cc54c9467..75e23747567491807bcefc7d5dd9b2afc343271e 100644 (file)
@@ -274,4 +274,18 @@ extern int trace_selftest_startup_sched_switch(struct tracer *trace,
 
 extern void *head_page(struct trace_array_cpu *data);
 
+extern unsigned long trace_flags;
+
+enum trace_iterator_flags {
+       TRACE_ITER_PRINT_PARENT         = 0x01,
+       TRACE_ITER_SYM_OFFSET           = 0x02,
+       TRACE_ITER_SYM_ADDR             = 0x04,
+       TRACE_ITER_VERBOSE              = 0x08,
+       TRACE_ITER_RAW                  = 0x10,
+       TRACE_ITER_HEX                  = 0x20,
+       TRACE_ITER_BIN                  = 0x40,
+       TRACE_ITER_BLOCK                = 0x80,
+       TRACE_ITER_STACKTRACE           = 0x100,
+};
+
 #endif /* _LINUX_KERNEL_TRACE_H */
index 8b1cf1a3aee04cf76df33d17d498852408213284..12658b3f2b2891fc5d853325dc081a311a972a5a 100644 (file)
@@ -18,7 +18,7 @@ static struct trace_array     *ctx_trace;
 static int __read_mostly       tracer_enabled;
 
 static void
-ctx_switch_func(struct task_struct *prev, struct task_struct *next)
+ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
 {
        struct trace_array *tr = ctx_trace;
        struct trace_array_cpu *data;
@@ -34,14 +34,17 @@ ctx_switch_func(struct task_struct *prev, struct task_struct *next)
        data = tr->data[cpu];
        disabled = atomic_inc_return(&data->disabled);
 
-       if (likely(disabled == 1))
+       if (likely(disabled == 1)) {
                tracing_sched_switch_trace(tr, data, prev, next, flags);
+               ftrace_all_fair_tasks(__rq, tr, data);
+       }
 
        atomic_dec(&data->disabled);
        local_irq_restore(flags);
 }
 
-static void wakeup_func(struct task_struct *wakee, struct task_struct *curr)
+static void
+wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
 {
        struct trace_array *tr = ctx_trace;
        struct trace_array_cpu *data;
@@ -57,14 +60,18 @@ static void wakeup_func(struct task_struct *wakee, struct task_struct *curr)
        data = tr->data[cpu];
        disabled = atomic_inc_return(&data->disabled);
 
-       if (likely(disabled == 1))
+       if (likely(disabled == 1)) {
                tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
+               ftrace_all_fair_tasks(__rq, tr, data);
+       }
 
        atomic_dec(&data->disabled);
        local_irq_restore(flags);
 }
 
-void ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next)
+void
+ftrace_ctx_switch(void *__rq, struct task_struct *prev,
+                 struct task_struct *next)
 {
        tracing_record_cmdline(prev);
 
@@ -72,7 +79,7 @@ void ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next)
         * If tracer_switch_func only points to the local
         * switch func, it still needs the ptr passed to it.
         */
-       ctx_switch_func(prev, next);
+       ctx_switch_func(__rq, prev, next);
 
        /*
         * Chain to the wakeup tracer (this is a NOP if disabled):
@@ -81,11 +88,12 @@ void ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next)
 }
 
 void
-ftrace_wake_up_task(struct task_struct *wakee, struct task_struct *curr)
+ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
+                   struct task_struct *curr)
 {
        tracing_record_cmdline(curr);
 
-       wakeup_func(wakee, curr);
+       wakeup_func(__rq, wakee, curr);
 
        /*
         * Chain to the wakeup tracer (this is a NOP if disabled):