]> err.no Git - linux-2.6/blobdiff - kernel/trace/trace.c
ftrace: fix printout
[linux-2.6] / kernel / trace / trace.c
index 82ced406aacf810b9fd45818ef943b35d3b003b8..6e9dae7eb418d67bf549c1c699101a4058b0384f 100644 (file)
@@ -27,6 +27,8 @@
 #include <linux/poll.h>
 #include <linux/gfp.h>
 #include <linux/fs.h>
+#include <linux/kprobes.h>
+#include <linux/writeback.h>
 
 #include <linux/stacktrace.h>
 
@@ -41,16 +43,13 @@ static cpumask_t __read_mostly              tracing_buffer_mask;
 #define for_each_tracing_cpu(cpu)      \
        for_each_cpu_mask(cpu, tracing_buffer_mask)
 
-/* dummy trace to disable tracing */
-static struct tracer no_tracer __read_mostly = {
-       .name           = "none",
-};
-
 static int trace_alloc_page(void);
 static int trace_free_page(void);
 
 static int tracing_disabled = 1;
 
+static unsigned long tracing_pages_allocated;
+
 long
 ns2usecs(cycle_t nsec)
 {
@@ -131,6 +130,23 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
 /* trace_flags holds iter_ctrl options */
 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
 
+static notrace void no_trace_init(struct trace_array *tr)
+{
+       int cpu;
+
+       if(tr->ctrl)
+               for_each_online_cpu(cpu)
+                       tracing_reset(tr->data[cpu]);
+       tracer_enabled = 0;
+}
+
+/* dummy trace to disable tracing */
+static struct tracer no_tracer __read_mostly = {
+       .name           = "none",
+       .init           = no_trace_init
+};
+
+
 /**
  * trace_wake_up - wake up tasks waiting for trace input
  *
@@ -246,24 +262,32 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
        tracing_record_cmdline(current);
 }
 
+#define CHECK_COND(cond)                       \
+       if (unlikely(cond)) {                   \
+               tracing_disabled = 1;           \
+               WARN_ON(1);                     \
+               return -1;                      \
+       }
+
 /**
  * check_pages - integrity check of trace buffers
  *
  * As a safty measure we check to make sure the data pages have not
- * been corrupted. TODO: configure to disable this because it adds
- * a bit of overhead.
+ * been corrupted.
  */
-void check_pages(struct trace_array_cpu *data)
+int check_pages(struct trace_array_cpu *data)
 {
        struct page *page, *tmp;
 
-       BUG_ON(data->trace_pages.next->prev != &data->trace_pages);
-       BUG_ON(data->trace_pages.prev->next != &data->trace_pages);
+       CHECK_COND(data->trace_pages.next->prev != &data->trace_pages);
+       CHECK_COND(data->trace_pages.prev->next != &data->trace_pages);
 
        list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
-               BUG_ON(page->lru.next->prev != &page->lru);
-               BUG_ON(page->lru.prev->next != &page->lru);
+               CHECK_COND(page->lru.next->prev != &page->lru);
+               CHECK_COND(page->lru.prev->next != &page->lru);
        }
+
+       return 0;
 }
 
 /**
@@ -277,7 +301,6 @@ void *head_page(struct trace_array_cpu *data)
 {
        struct page *page;
 
-       check_pages(data);
        if (list_empty(&data->trace_pages))
                return NULL;
 
@@ -642,9 +665,6 @@ static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
 static int cmdline_idx;
 static DEFINE_SPINLOCK(trace_cmdline_lock);
 
-/* trace in all context switches */
-atomic_t trace_record_cmdline_enabled __read_mostly;
-
 /* temporary disable recording */
 atomic_t trace_record_cmdline_disabled __read_mostly;
 
@@ -898,7 +918,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
        entry->ctx.next_pid     = next->pid;
        entry->ctx.next_prio    = next->prio;
        entry->ctx.next_state   = next->state;
-       __trace_stack(tr, data, flags, 4);
+       __trace_stack(tr, data, flags, 5);
        __raw_spin_unlock(&data->lock);
        raw_local_irq_restore(irq_flags);
 }
@@ -924,13 +944,37 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
        entry->ctx.next_pid     = wakee->pid;
        entry->ctx.next_prio    = wakee->prio;
        entry->ctx.next_state   = wakee->state;
-       __trace_stack(tr, data, flags, 5);
+       __trace_stack(tr, data, flags, 6);
        __raw_spin_unlock(&data->lock);
        raw_local_irq_restore(irq_flags);
 
        trace_wake_up();
 }
 
+void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+       struct trace_array *tr = &global_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       long disabled;
+       int cpu;
+
+       if (tracing_disabled || current_trace == &no_tracer || !tr->ctrl)
+               return;
+
+       local_irq_save(flags);
+       cpu = raw_smp_processor_id();
+       data = tr->data[cpu];
+       disabled = atomic_inc_return(&data->disabled);
+
+       if (likely(disabled == 1))
+               __trace_special(tr, data, arg1, arg2, arg3);
+
+       atomic_dec(&data->disabled);
+       local_irq_restore(flags);
+}
+
 #ifdef CONFIG_FTRACE
 static void
 function_trace_call(unsigned long ip, unsigned long parent_ip)
@@ -1168,6 +1212,20 @@ static void s_stop(struct seq_file *m, void *p)
        mutex_unlock(&trace_types_lock);
 }
 
+#define KRETPROBE_MSG "[unknown/kretprobe'd]"
+
+#ifdef CONFIG_KRETPROBES
+static inline int kretprobed(unsigned long addr)
+{
+       return addr == (unsigned long)kretprobe_trampoline;
+}
+#else
+static inline int kretprobed(unsigned long addr)
+{
+       return 0;
+}
+#endif /* CONFIG_KRETPROBES */
+
 static int
 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
 {
@@ -1403,7 +1461,10 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
        case TRACE_FN:
                seq_print_ip_sym(s, entry->fn.ip, sym_flags);
                trace_seq_puts(s, " (");
-               seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags);
+               if (kretprobed(entry->fn.parent_ip))
+                       trace_seq_puts(s, KRETPROBE_MSG);
+               else
+                       seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags);
                trace_seq_puts(s, ")\n");
                break;
        case TRACE_CTX:
@@ -1483,8 +1544,11 @@ static int print_trace_fmt(struct trace_iterator *iter)
                        ret = trace_seq_printf(s, " <-");
                        if (!ret)
                                return 0;
-                       ret = seq_print_ip_sym(s, entry->fn.parent_ip,
-                                              sym_flags);
+                       if (kretprobed(entry->fn.parent_ip))
+                               ret = trace_seq_puts(s, KRETPROBE_MSG);
+                       else
+                               ret = seq_print_ip_sym(s, entry->fn.parent_ip,
+                                                      sym_flags);
                        if (!ret)
                                return 0;
                }
@@ -2563,7 +2627,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
 {
        unsigned long val;
        char buf[64];
-       int ret;
+       int i, ret;
 
        if (cnt >= sizeof(buf))
                return -EINVAL;
@@ -2591,20 +2655,56 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
        }
 
        if (val > global_trace.entries) {
+               long pages_requested;
+               unsigned long freeable_pages;
+
+               /* make sure we have enough memory before mapping */
+               pages_requested =
+                       (val + (ENTRIES_PER_PAGE-1)) / ENTRIES_PER_PAGE;
+
+               /* account for each buffer (and max_tr) */
+               pages_requested *= tracing_nr_buffers * 2;
+
+               /* Check for overflow */
+               if (pages_requested < 0) {
+                       cnt = -ENOMEM;
+                       goto out;
+               }
+
+               freeable_pages = determine_dirtyable_memory();
+
+               /* we only allow to request 1/4 of useable memory */
+               if (pages_requested >
+                   ((freeable_pages + tracing_pages_allocated) / 4)) {
+                       cnt = -ENOMEM;
+                       goto out;
+               }
+
                while (global_trace.entries < val) {
                        if (trace_alloc_page()) {
                                cnt = -ENOMEM;
                                goto out;
                        }
+                       /* double check that we don't go over the known pages */
+                       if (tracing_pages_allocated > pages_requested)
+                               break;
                }
+
        } else {
                /* include the number of entries in val (inc of page entries) */
                while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1))
                        trace_free_page();
        }
 
+       /* check integrity */
+       for_each_tracing_cpu(i)
+               check_pages(global_trace.data[i]);
+
        filp->f_pos += cnt;
 
+       /* If check pages failed, return ENOMEM */
+       if (tracing_disabled)
+               cnt = -ENOMEM;
  out:
        max_tr.entries = global_trace.entries;
        mutex_unlock(&trace_types_lock);
@@ -2776,6 +2876,7 @@ static int trace_alloc_page(void)
        struct page *page, *tmp;
        LIST_HEAD(pages);
        void *array;
+       unsigned pages_allocated = 0;
        int i;
 
        /* first allocate a page for each CPU */
@@ -2787,6 +2888,7 @@ static int trace_alloc_page(void)
                        goto free_pages;
                }
 
+               pages_allocated++;
                page = virt_to_page(array);
                list_add(&page->lru, &pages);
 
@@ -2798,6 +2900,7 @@ static int trace_alloc_page(void)
                               "for trace buffer!\n");
                        goto free_pages;
                }
+               pages_allocated++;
                page = virt_to_page(array);
                list_add(&page->lru, &pages);
 #endif
@@ -2819,6 +2922,7 @@ static int trace_alloc_page(void)
                SetPageLRU(page);
 #endif
        }
+       tracing_pages_allocated += pages_allocated;
        global_trace.entries += ENTRIES_PER_PAGE;
 
        return 0;
@@ -2853,6 +2957,8 @@ static int trace_free_page(void)
                page = list_entry(p, struct page, lru);
                ClearPageLRU(page);
                list_del(&page->lru);
+               tracing_pages_allocated--;
+               tracing_pages_allocated--;
                __free_page(page);
 
                tracing_reset(data);
@@ -2889,8 +2995,6 @@ __init static int tracer_alloc_buffers(void)
        int ret = -ENOMEM;
        int i;
 
-       global_trace.ctrl = tracer_enabled;
-
        /* TODO: make the number of buffers hot pluggable with CPUS */
        tracing_nr_buffers = num_possible_cpus();
        tracing_buffer_mask = cpu_possible_map;
@@ -2947,9 +3051,8 @@ __init static int tracer_alloc_buffers(void)
        }
        max_tr.entries = global_trace.entries;
 
-       pr_info("tracer: %d pages allocated for %ld",
-               pages, trace_nr_entries);
-       pr_info(" entries of %ld bytes\n", (long)TRACE_ENTRY_SIZE);
+       pr_info("tracer: %d pages allocated for %ld entries of %ld bytes\n",
+               pages, trace_nr_entries, (long)TRACE_ENTRY_SIZE);
        pr_info("   actual entries %ld\n", global_trace.entries);
 
        tracer_init_debugfs();
@@ -2960,6 +3063,7 @@ __init static int tracer_alloc_buffers(void)
        current_trace = &no_tracer;
 
        /* All seems OK, enable tracing */
+       global_trace.ctrl = tracer_enabled;
        tracing_disabled = 0;
 
        return 0;