]> err.no Git - linux-2.6/blobdiff - kernel/trace/trace.c
ftrace: avoid modifying kprobe'd records
[linux-2.6] / kernel / trace / trace.c
index 583fe24903d39c35240bc66f976dd4a1b156ad02..9ade79369bfb07f342e5942d73f3e0fd891cebd6 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/poll.h>
 #include <linux/gfp.h>
 #include <linux/fs.h>
+#include <linux/kprobes.h>
 #include <linux/writeback.h>
 
 #include <linux/stacktrace.h>
@@ -42,11 +43,6 @@ static cpumask_t __read_mostly               tracing_buffer_mask;
 #define for_each_tracing_cpu(cpu)      \
        for_each_cpu_mask(cpu, tracing_buffer_mask)
 
-/* dummy trace to disable tracing */
-static struct tracer no_tracer __read_mostly = {
-       .name           = "none",
-};
-
 static int trace_alloc_page(void);
 static int trace_free_page(void);
 
@@ -134,6 +130,23 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
 /* trace_flags holds iter_ctrl options */
 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
 
+static notrace void no_trace_init(struct trace_array *tr)
+{
+       int cpu;
+
+       if(tr->ctrl)
+               for_each_online_cpu(cpu)
+                       tracing_reset(tr->data[cpu]);
+       tracer_enabled = 0;
+}
+
+/* dummy trace to disable tracing */
+static struct tracer no_tracer __read_mostly = {
+       .name           = "none",
+       .init           = no_trace_init
+};
+
+
 /**
  * trace_wake_up - wake up tasks waiting for trace input
  *
@@ -652,9 +665,6 @@ static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
 static int cmdline_idx;
 static DEFINE_SPINLOCK(trace_cmdline_lock);
 
-/* trace in all context switches */
-atomic_t trace_record_cmdline_enabled __read_mostly;
-
 /* temporary disable recording */
 atomic_t trace_record_cmdline_disabled __read_mostly;
 
@@ -978,6 +988,9 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
        if (unlikely(!tracer_enabled))
                return;
 
+       if (skip_trace(ip))
+               return;
+
        local_irq_save(flags);
        cpu = raw_smp_processor_id();
        data = tr->data[cpu];
@@ -1202,6 +1215,20 @@ static void s_stop(struct seq_file *m, void *p)
        mutex_unlock(&trace_types_lock);
 }
 
+#define KRETPROBE_MSG "[unknown/kretprobe'd]"
+
+#ifdef CONFIG_KRETPROBES
+static inline int kretprobed(unsigned long addr)
+{
+       return addr == (unsigned long)kretprobe_trampoline;
+}
+#else
+static inline int kretprobed(unsigned long addr)
+{
+       return 0;
+}
+#endif /* CONFIG_KRETPROBES */
+
 static int
 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
 {
@@ -1437,7 +1464,10 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
        case TRACE_FN:
                seq_print_ip_sym(s, entry->fn.ip, sym_flags);
                trace_seq_puts(s, " (");
-               seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags);
+               if (kretprobed(entry->fn.parent_ip))
+                       trace_seq_puts(s, KRETPROBE_MSG);
+               else
+                       seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags);
                trace_seq_puts(s, ")\n");
                break;
        case TRACE_CTX:
@@ -1517,8 +1547,11 @@ static int print_trace_fmt(struct trace_iterator *iter)
                        ret = trace_seq_printf(s, " <-");
                        if (!ret)
                                return 0;
-                       ret = seq_print_ip_sym(s, entry->fn.parent_ip,
-                                              sym_flags);
+                       if (kretprobed(entry->fn.parent_ip))
+                               ret = trace_seq_puts(s, KRETPROBE_MSG);
+                       else
+                               ret = seq_print_ip_sym(s, entry->fn.parent_ip,
+                                                      sym_flags);
                        if (!ret)
                                return 0;
                }
@@ -3021,9 +3054,8 @@ __init static int tracer_alloc_buffers(void)
        }
        max_tr.entries = global_trace.entries;
 
-       pr_info("tracer: %d pages allocated for %ld",
-               pages, trace_nr_entries);
-       pr_info(" entries of %ld bytes\n", (long)TRACE_ENTRY_SIZE);
+       pr_info("tracer: %d pages allocated for %ld entries of %ld bytes\n",
+               pages, trace_nr_entries, (long)TRACE_ENTRY_SIZE);
        pr_info("   actual entries %ld\n", global_trace.entries);
 
        tracer_init_debugfs();