]> err.no Git - linux-2.6/blobdiff - kernel/trace/trace.c
ftrace: avoid modifying kprobe'd records
[linux-2.6] / kernel / trace / trace.c
index a102b11eacf2276784ee11271919ad25e5b5fa0f..9ade79369bfb07f342e5942d73f3e0fd891cebd6 100644 (file)
@@ -27,6 +27,8 @@
 #include <linux/poll.h>
 #include <linux/gfp.h>
 #include <linux/fs.h>
+#include <linux/kprobes.h>
+#include <linux/writeback.h>
 
 #include <linux/stacktrace.h>
 
@@ -41,16 +43,13 @@ static cpumask_t __read_mostly              tracing_buffer_mask;
 #define for_each_tracing_cpu(cpu)      \
        for_each_cpu_mask(cpu, tracing_buffer_mask)
 
-/* dummy trace to disable tracing */
-static struct tracer no_tracer __read_mostly = {
-       .name           = "none",
-};
-
 static int trace_alloc_page(void);
 static int trace_free_page(void);
 
 static int tracing_disabled = 1;
 
+static unsigned long tracing_pages_allocated;
+
 long
 ns2usecs(cycle_t nsec)
 {
@@ -131,6 +130,23 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
 /* trace_flags holds iter_ctrl options */
 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
 
+static notrace void no_trace_init(struct trace_array *tr)
+{
+       int cpu;
+
+       if(tr->ctrl)
+               for_each_online_cpu(cpu)
+                       tracing_reset(tr->data[cpu]);
+       tracer_enabled = 0;
+}
+
+/* dummy trace to disable tracing */
+static struct tracer no_tracer __read_mostly = {
+       .name           = "none",
+       .init           = no_trace_init
+};
+
+
 /**
  * trace_wake_up - wake up tasks waiting for trace input
  *
@@ -246,24 +262,32 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
        tracing_record_cmdline(current);
 }
 
+#define CHECK_COND(cond)                       \
+       if (unlikely(cond)) {                   \
+               tracing_disabled = 1;           \
+               WARN_ON(1);                     \
+               return -1;                      \
+       }
+
 /**
  * check_pages - integrity check of trace buffers
  *
  * As a safty measure we check to make sure the data pages have not
- * been corrupted. TODO: configure to disable this because it adds
- * a bit of overhead.
+ * been corrupted.
  */
-void check_pages(struct trace_array_cpu *data)
+int check_pages(struct trace_array_cpu *data)
 {
        struct page *page, *tmp;
 
-       BUG_ON(data->trace_pages.next->prev != &data->trace_pages);
-       BUG_ON(data->trace_pages.prev->next != &data->trace_pages);
+       CHECK_COND(data->trace_pages.next->prev != &data->trace_pages);
+       CHECK_COND(data->trace_pages.prev->next != &data->trace_pages);
 
        list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
-               BUG_ON(page->lru.next->prev != &page->lru);
-               BUG_ON(page->lru.prev->next != &page->lru);
+               CHECK_COND(page->lru.next->prev != &page->lru);
+               CHECK_COND(page->lru.prev->next != &page->lru);
        }
+
+       return 0;
 }
 
 /**
@@ -277,7 +301,6 @@ void *head_page(struct trace_array_cpu *data)
 {
        struct page *page;
 
-       check_pages(data);
        if (list_empty(&data->trace_pages))
                return NULL;
 
@@ -400,6 +423,26 @@ static void
 trace_seq_reset(struct trace_seq *s)
 {
        s->len = 0;
+       s->readpos = 0;
+}
+
+ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
+{
+       int len;
+       int ret;
+
+       if (s->len <= s->readpos)
+               return -EBUSY;
+
+       len = s->len - s->readpos;
+       if (cnt > len)
+               cnt = len;
+       ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
+       if (ret)
+               return -EFAULT;
+
+       s->readpos += len;
+       return cnt;
 }
 
 static void
@@ -609,6 +652,7 @@ void unregister_tracer(struct tracer *type)
 void tracing_reset(struct trace_array_cpu *data)
 {
        data->trace_idx = 0;
+       data->overrun = 0;
        data->trace_head = data->trace_tail = head_page(data);
        data->trace_head_idx = 0;
        data->trace_tail_idx = 0;
@@ -620,7 +664,9 @@ static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
 static int cmdline_idx;
 static DEFINE_SPINLOCK(trace_cmdline_lock);
-atomic_t trace_record_cmdline_disabled;
+
+/* temporary disable recording */
+atomic_t trace_record_cmdline_disabled __read_mostly;
 
 static void trace_init_cmdlines(void)
 {
@@ -745,6 +791,7 @@ tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
        if (data->trace_head == data->trace_tail &&
            idx_next == data->trace_tail_idx) {
                /* overrun */
+               data->overrun++;
                data->trace_tail_idx++;
                if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
                        data->trace_tail =
@@ -801,29 +848,6 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
                trace_function(tr, data, ip, parent_ip, flags);
 }
 
-void
-__trace_special(void *__tr, void *__data,
-               unsigned long arg1, unsigned long arg2, unsigned long arg3)
-{
-       struct trace_array_cpu *data = __data;
-       struct trace_array *tr = __tr;
-       struct trace_entry *entry;
-       unsigned long irq_flags;
-
-       raw_local_irq_save(irq_flags);
-       __raw_spin_lock(&data->lock);
-       entry                   = tracing_get_trace_entry(tr, data);
-       tracing_generic_entry_update(entry, 0);
-       entry->type             = TRACE_SPECIAL;
-       entry->special.arg1     = arg1;
-       entry->special.arg2     = arg2;
-       entry->special.arg3     = arg3;
-       __raw_spin_unlock(&data->lock);
-       raw_local_irq_restore(irq_flags);
-
-       trace_wake_up();
-}
-
 void __trace_stack(struct trace_array *tr,
                   struct trace_array_cpu *data,
                   unsigned long flags,
@@ -849,6 +873,30 @@ void __trace_stack(struct trace_array *tr,
        save_stack_trace(&trace);
 }
 
+void
+__trace_special(void *__tr, void *__data,
+               unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+       struct trace_array_cpu *data = __data;
+       struct trace_array *tr = __tr;
+       struct trace_entry *entry;
+       unsigned long irq_flags;
+
+       raw_local_irq_save(irq_flags);
+       __raw_spin_lock(&data->lock);
+       entry                   = tracing_get_trace_entry(tr, data);
+       tracing_generic_entry_update(entry, 0);
+       entry->type             = TRACE_SPECIAL;
+       entry->special.arg1     = arg1;
+       entry->special.arg2     = arg2;
+       entry->special.arg3     = arg3;
+       __trace_stack(tr, data, irq_flags, 4);
+       __raw_spin_unlock(&data->lock);
+       raw_local_irq_restore(irq_flags);
+
+       trace_wake_up();
+}
+
 void
 tracing_sched_switch_trace(struct trace_array *tr,
                           struct trace_array_cpu *data,
@@ -870,7 +918,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
        entry->ctx.next_pid     = next->pid;
        entry->ctx.next_prio    = next->prio;
        entry->ctx.next_state   = next->state;
-       __trace_stack(tr, data, flags, 4);
+       __trace_stack(tr, data, flags, 5);
        __raw_spin_unlock(&data->lock);
        raw_local_irq_restore(irq_flags);
 }
@@ -896,13 +944,37 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
        entry->ctx.next_pid     = wakee->pid;
        entry->ctx.next_prio    = wakee->prio;
        entry->ctx.next_state   = wakee->state;
-       __trace_stack(tr, data, flags, 5);
+       __trace_stack(tr, data, flags, 6);
        __raw_spin_unlock(&data->lock);
        raw_local_irq_restore(irq_flags);
 
        trace_wake_up();
 }
 
+void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+       struct trace_array *tr = &global_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       long disabled;
+       int cpu;
+
+       if (tracing_disabled || current_trace == &no_tracer || !tr->ctrl)
+               return;
+
+       local_irq_save(flags);
+       cpu = raw_smp_processor_id();
+       data = tr->data[cpu];
+       disabled = atomic_inc_return(&data->disabled);
+
+       if (likely(disabled == 1))
+               __trace_special(tr, data, arg1, arg2, arg3);
+
+       atomic_dec(&data->disabled);
+       local_irq_restore(flags);
+}
+
 #ifdef CONFIG_FTRACE
 static void
 function_trace_call(unsigned long ip, unsigned long parent_ip)
@@ -916,6 +988,9 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
        if (unlikely(!tracer_enabled))
                return;
 
+       if (skip_trace(ip))
+               return;
+
        local_irq_save(flags);
        cpu = raw_smp_processor_id();
        data = tr->data[cpu];
@@ -1140,6 +1215,20 @@ static void s_stop(struct seq_file *m, void *p)
        mutex_unlock(&trace_types_lock);
 }
 
+#define KRETPROBE_MSG "[unknown/kretprobe'd]"
+
+#ifdef CONFIG_KRETPROBES
+static inline int kretprobed(unsigned long addr)
+{
+       return addr == (unsigned long)kretprobe_trampoline;
+}
+#else
+static inline int kretprobed(unsigned long addr)
+{
+       return 0;
+}
+#endif /* CONFIG_KRETPROBES */
+
 static int
 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
 {
@@ -1375,7 +1464,10 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
        case TRACE_FN:
                seq_print_ip_sym(s, entry->fn.ip, sym_flags);
                trace_seq_puts(s, " (");
-               seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags);
+               if (kretprobed(entry->fn.parent_ip))
+                       trace_seq_puts(s, KRETPROBE_MSG);
+               else
+                       seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags);
                trace_seq_puts(s, ")\n");
                break;
        case TRACE_CTX:
@@ -1455,8 +1547,11 @@ static int print_trace_fmt(struct trace_iterator *iter)
                        ret = trace_seq_printf(s, " <-");
                        if (!ret)
                                return 0;
-                       ret = seq_print_ip_sym(s, entry->fn.parent_ip,
-                                              sym_flags);
+                       if (kretprobed(entry->fn.parent_ip))
+                               ret = trace_seq_puts(s, KRETPROBE_MSG);
+                       else
+                               ret = seq_print_ip_sym(s, entry->fn.parent_ip,
+                                                      sym_flags);
                        if (!ret)
                                return 0;
                }
@@ -2300,11 +2395,15 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
        if (!iter)
                return -ENOMEM;
 
+       mutex_lock(&trace_types_lock);
        iter->tr = &global_trace;
        iter->trace = current_trace;
-
        filp->private_data = iter;
 
+       if (iter->trace->pipe_open)
+               iter->trace->pipe_open(iter);
+       mutex_unlock(&trace_types_lock);
+
        return 0;
 }
 
@@ -2348,40 +2447,35 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
 {
        struct trace_iterator *iter = filp->private_data;
        struct trace_array_cpu *data;
-       struct trace_array *tr = iter->tr;
-       struct tracer *tracer = iter->trace;
        static cpumask_t mask;
-       static int start;
        unsigned long flags;
 #ifdef CONFIG_FTRACE
        int ftrace_save;
 #endif
-       int read = 0;
        int cpu;
-       int len;
-       int ret;
+       ssize_t sret;
 
        /* return any leftover data */
-       if (iter->seq.len > start) {
-               len = iter->seq.len - start;
-               if (cnt > len)
-                       cnt = len;
-               ret = copy_to_user(ubuf, iter->seq.buffer + start, cnt);
-               if (ret)
-                       cnt = -EFAULT;
+       sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
+       if (sret != -EBUSY)
+               return sret;
+       sret = 0;
 
-               start += len;
+       trace_seq_reset(&iter->seq);
 
-               return cnt;
+       mutex_lock(&trace_types_lock);
+       if (iter->trace->read) {
+               sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
+               if (sret)
+                       goto out;
        }
 
-       trace_seq_reset(&iter->seq);
-       start = 0;
-
        while (trace_empty(iter)) {
 
-               if ((filp->f_flags & O_NONBLOCK))
-                       return -EAGAIN;
+               if ((filp->f_flags & O_NONBLOCK)) {
+                       sret = -EAGAIN;
+                       goto out;
+               }
 
                /*
                 * This is a make-shift waitqueue. The reason we don't use
@@ -2395,16 +2489,22 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
                set_current_state(TASK_INTERRUPTIBLE);
                iter->tr->waiter = current;
 
-               /* sleep for one second, and try again. */
-               schedule_timeout(HZ);
+               mutex_unlock(&trace_types_lock);
+
+               /* sleep for 100 msecs, and try again. */
+               schedule_timeout(HZ/10);
+
+               mutex_lock(&trace_types_lock);
 
                iter->tr->waiter = NULL;
 
-               if (signal_pending(current))
-                       return -EINTR;
+               if (signal_pending(current)) {
+                       sret = -EINTR;
+                       goto out;
+               }
 
                if (iter->trace != current_trace)
-                       return 0;
+                       goto out;
 
                /*
                 * We block until we read something and tracing is disabled.
@@ -2423,14 +2523,15 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
 
        /* stop when tracing is finished */
        if (trace_empty(iter))
-               return 0;
+               goto out;
 
        if (cnt >= PAGE_SIZE)
                cnt = PAGE_SIZE - 1;
 
-       memset(iter, 0, sizeof(*iter));
-       iter->tr = tr;
-       iter->trace = tracer;
+       /* reset all but tr, trace, and overruns */
+       memset(&iter->seq, 0,
+              sizeof(struct trace_iterator) -
+              offsetof(struct trace_iterator, seq));
        iter->pos = -1;
 
        /*
@@ -2460,9 +2561,15 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
        for_each_cpu_mask(cpu, mask) {
                data = iter->tr->data[cpu];
                __raw_spin_lock(&data->lock);
+
+               if (data->overrun > iter->last_overrun[cpu])
+                       iter->overrun[cpu] +=
+                               data->overrun - iter->last_overrun[cpu];
+               iter->last_overrun[cpu] = data->overrun;
        }
 
        while (find_next_entry_inc(iter) != NULL) {
+               int ret;
                int len = iter->seq.len;
 
                ret = print_trace_line(iter);
@@ -2493,21 +2600,16 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
        local_irq_restore(flags);
 
        /* Now copy what we have to the user */
-       read = iter->seq.len;
-       if (read > cnt)
-               read = cnt;
-
-       ret = copy_to_user(ubuf, iter->seq.buffer, read);
-
-       if (read < iter->seq.len)
-               start = read;
-       else
+       sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
+       if (iter->seq.readpos >= iter->seq.len)
                trace_seq_reset(&iter->seq);
+       if (sret == -EBUSY)
+               sret = 0;
 
-       if (ret)
-               read = -EFAULT;
+out:
+       mutex_unlock(&trace_types_lock);
 
-       return read;
+       return sret;
 }
 
 static ssize_t
@@ -2528,7 +2630,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
 {
        unsigned long val;
        char buf[64];
-       int ret;
+       int i, ret;
 
        if (cnt >= sizeof(buf))
                return -EINVAL;
@@ -2556,20 +2658,56 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
        }
 
        if (val > global_trace.entries) {
+               long pages_requested;
+               unsigned long freeable_pages;
+
+               /* make sure we have enough memory before mapping */
+               pages_requested =
+                       (val + (ENTRIES_PER_PAGE-1)) / ENTRIES_PER_PAGE;
+
+               /* account for each buffer (and max_tr) */
+               pages_requested *= tracing_nr_buffers * 2;
+
+               /* Check for overflow */
+               if (pages_requested < 0) {
+                       cnt = -ENOMEM;
+                       goto out;
+               }
+
+               freeable_pages = determine_dirtyable_memory();
+
+               /* we only allow to request 1/4 of useable memory */
+               if (pages_requested >
+                   ((freeable_pages + tracing_pages_allocated) / 4)) {
+                       cnt = -ENOMEM;
+                       goto out;
+               }
+
                while (global_trace.entries < val) {
                        if (trace_alloc_page()) {
                                cnt = -ENOMEM;
                                goto out;
                        }
+                       /* double check that we don't go over the known pages */
+                       if (tracing_pages_allocated > pages_requested)
+                               break;
                }
+
        } else {
                /* include the number of entries in val (inc of page entries) */
                while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1))
                        trace_free_page();
        }
 
+       /* check integrity */
+       for_each_tracing_cpu(i)
+               check_pages(global_trace.data[i]);
+
        filp->f_pos += cnt;
 
+       /* If check pages failed, return ENOMEM */
+       if (tracing_disabled)
+               cnt = -ENOMEM;
  out:
        max_tr.entries = global_trace.entries;
        mutex_unlock(&trace_types_lock);
@@ -2741,6 +2879,7 @@ static int trace_alloc_page(void)
        struct page *page, *tmp;
        LIST_HEAD(pages);
        void *array;
+       unsigned pages_allocated = 0;
        int i;
 
        /* first allocate a page for each CPU */
@@ -2752,6 +2891,7 @@ static int trace_alloc_page(void)
                        goto free_pages;
                }
 
+               pages_allocated++;
                page = virt_to_page(array);
                list_add(&page->lru, &pages);
 
@@ -2763,6 +2903,7 @@ static int trace_alloc_page(void)
                               "for trace buffer!\n");
                        goto free_pages;
                }
+               pages_allocated++;
                page = virt_to_page(array);
                list_add(&page->lru, &pages);
 #endif
@@ -2784,6 +2925,7 @@ static int trace_alloc_page(void)
                SetPageLRU(page);
 #endif
        }
+       tracing_pages_allocated += pages_allocated;
        global_trace.entries += ENTRIES_PER_PAGE;
 
        return 0;
@@ -2818,6 +2960,8 @@ static int trace_free_page(void)
                page = list_entry(p, struct page, lru);
                ClearPageLRU(page);
                list_del(&page->lru);
+               tracing_pages_allocated--;
+               tracing_pages_allocated--;
                __free_page(page);
 
                tracing_reset(data);
@@ -2854,8 +2998,6 @@ __init static int tracer_alloc_buffers(void)
        int ret = -ENOMEM;
        int i;
 
-       global_trace.ctrl = tracer_enabled;
-
        /* TODO: make the number of buffers hot pluggable with CPUS */
        tracing_nr_buffers = num_possible_cpus();
        tracing_buffer_mask = cpu_possible_map;
@@ -2912,9 +3054,8 @@ __init static int tracer_alloc_buffers(void)
        }
        max_tr.entries = global_trace.entries;
 
-       pr_info("tracer: %d pages allocated for %ld",
-               pages, trace_nr_entries);
-       pr_info(" entries of %ld bytes\n", (long)TRACE_ENTRY_SIZE);
+       pr_info("tracer: %d pages allocated for %ld entries of %ld bytes\n",
+               pages, trace_nr_entries, (long)TRACE_ENTRY_SIZE);
        pr_info("   actual entries %ld\n", global_trace.entries);
 
        tracer_init_debugfs();
@@ -2925,6 +3066,7 @@ __init static int tracer_alloc_buffers(void)
        current_trace = &no_tracer;
 
        /* All seems OK, enable tracing */
+       global_trace.ctrl = tracer_enabled;
        tracing_disabled = 0;
 
        return 0;