]> err.no Git - linux-2.6/blobdiff - kernel/trace/trace.c
Merge branch 'linus' into tracing/mmiotrace-mergefixups
[linux-2.6] / kernel / trace / trace.c
index ca0d6ff74c114a8f5a0a4a5b2751d9a6f0b7cee9..4dcc4e85c5d64d6615668fa04f99166d60d69280 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/poll.h>
 #include <linux/gfp.h>
 #include <linux/fs.h>
+#include <linux/writeback.h>
 
 #include <linux/stacktrace.h>
 
@@ -51,6 +52,8 @@ static int trace_free_page(void);
 
 static int tracing_disabled = 1;
 
+static unsigned long tracing_pages_allocated;
+
 long
 ns2usecs(cycle_t nsec)
 {
@@ -400,6 +403,26 @@ static void
 trace_seq_reset(struct trace_seq *s)
 {
        s->len = 0;
+       s->readpos = 0;
+}
+
+ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
+{
+       int len;
+       int ret;
+
+       if (s->len <= s->readpos)
+               return -EBUSY;
+
+       len = s->len - s->readpos;
+       if (cnt > len)
+               cnt = len;
+       ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
+       if (ret)
+               return -EFAULT;
+
+       s->readpos += len;
+       return cnt;
 }
 
 static void
@@ -808,29 +831,48 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
                trace_function(tr, data, ip, parent_ip, flags);
 }
 
-void
-__trace_special(void *__tr, void *__data,
-               unsigned long arg1, unsigned long arg2, unsigned long arg3)
+#ifdef CONFIG_MMIOTRACE
+void __trace_mmiotrace_rw(struct trace_array *tr, struct trace_array_cpu *data,
+                                               struct mmiotrace_rw *rw)
 {
-       struct trace_array_cpu *data = __data;
-       struct trace_array *tr = __tr;
        struct trace_entry *entry;
        unsigned long irq_flags;
 
        raw_local_irq_save(irq_flags);
        __raw_spin_lock(&data->lock);
+
        entry                   = tracing_get_trace_entry(tr, data);
        tracing_generic_entry_update(entry, 0);
-       entry->type             = TRACE_SPECIAL;
-       entry->special.arg1     = arg1;
-       entry->special.arg2     = arg2;
-       entry->special.arg3     = arg3;
+       entry->type             = TRACE_MMIO_RW;
+       entry->mmiorw           = *rw;
+
        __raw_spin_unlock(&data->lock);
        raw_local_irq_restore(irq_flags);
 
        trace_wake_up();
 }
 
+void __trace_mmiotrace_map(struct trace_array *tr, struct trace_array_cpu *data,
+                                               struct mmiotrace_map *map)
+{
+       struct trace_entry *entry;
+       unsigned long irq_flags;
+
+       raw_local_irq_save(irq_flags);
+       __raw_spin_lock(&data->lock);
+
+       entry                   = tracing_get_trace_entry(tr, data);
+       tracing_generic_entry_update(entry, 0);
+       entry->type             = TRACE_MMIO_MAP;
+       entry->mmiomap          = *map;
+
+       __raw_spin_unlock(&data->lock);
+       raw_local_irq_restore(irq_flags);
+
+       trace_wake_up();
+}
+#endif
+
 void __trace_stack(struct trace_array *tr,
                   struct trace_array_cpu *data,
                   unsigned long flags,
@@ -856,6 +898,30 @@ void __trace_stack(struct trace_array *tr,
        save_stack_trace(&trace);
 }
 
+void
+__trace_special(void *__tr, void *__data,
+               unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+       struct trace_array_cpu *data = __data;
+       struct trace_array *tr = __tr;
+       struct trace_entry *entry;
+       unsigned long irq_flags;
+
+       raw_local_irq_save(irq_flags);
+       __raw_spin_lock(&data->lock);
+       entry                   = tracing_get_trace_entry(tr, data);
+       tracing_generic_entry_update(entry, 0);
+       entry->type             = TRACE_SPECIAL;
+       entry->special.arg1     = arg1;
+       entry->special.arg2     = arg2;
+       entry->special.arg3     = arg3;
+       __trace_stack(tr, data, irq_flags, 4);
+       __raw_spin_unlock(&data->lock);
+       raw_local_irq_restore(irq_flags);
+
+       trace_wake_up();
+}
+
 void
 tracing_sched_switch_trace(struct trace_array *tr,
                           struct trace_array_cpu *data,
@@ -877,7 +943,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
        entry->ctx.next_pid     = next->pid;
        entry->ctx.next_prio    = next->prio;
        entry->ctx.next_state   = next->state;
-       __trace_stack(tr, data, flags, 4);
+       __trace_stack(tr, data, flags, 5);
        __raw_spin_unlock(&data->lock);
        raw_local_irq_restore(irq_flags);
 }
@@ -903,7 +969,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
        entry->ctx.next_pid     = wakee->pid;
        entry->ctx.next_prio    = wakee->prio;
        entry->ctx.next_state   = wakee->state;
-       __trace_stack(tr, data, flags, 5);
+       __trace_stack(tr, data, flags, 6);
        __raw_spin_unlock(&data->lock);
        raw_local_irq_restore(irq_flags);
 
@@ -2360,46 +2426,32 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
        struct trace_iterator *iter = filp->private_data;
        struct trace_array_cpu *data;
        static cpumask_t mask;
-       static int start;
        unsigned long flags;
 #ifdef CONFIG_FTRACE
        int ftrace_save;
 #endif
-       int read = 0;
        int cpu;
-       int len;
-       int ret;
+       ssize_t sret;
 
        /* return any leftover data */
-       if (iter->seq.len > start) {
-               len = iter->seq.len - start;
-               if (cnt > len)
-                       cnt = len;
-               ret = copy_to_user(ubuf, iter->seq.buffer + start, cnt);
-               if (ret)
-                       cnt = -EFAULT;
-
-               start += len;
+       sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
+       if (sret != -EBUSY)
+               return sret;
+       sret = 0;
 
-               return cnt;
-       }
+       trace_seq_reset(&iter->seq);
 
        mutex_lock(&trace_types_lock);
        if (iter->trace->read) {
-               ret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
-               if (ret) {
-                       read = ret;
+               sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
+               if (sret)
                        goto out;
-               }
        }
 
-       trace_seq_reset(&iter->seq);
-       start = 0;
-
        while (trace_empty(iter)) {
 
                if ((filp->f_flags & O_NONBLOCK)) {
-                       read = -EAGAIN;
+                       sret = -EAGAIN;
                        goto out;
                }
 
@@ -2425,7 +2477,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
                iter->tr->waiter = NULL;
 
                if (signal_pending(current)) {
-                       read = -EINTR;
+                       sret = -EINTR;
                        goto out;
                }
 
@@ -2495,6 +2547,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
        }
 
        while (find_next_entry_inc(iter) != NULL) {
+               int ret;
                int len = iter->seq.len;
 
                ret = print_trace_line(iter);
@@ -2525,24 +2578,16 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
        local_irq_restore(flags);
 
        /* Now copy what we have to the user */
-       read = iter->seq.len;
-       if (read > cnt)
-               read = cnt;
-
-       ret = copy_to_user(ubuf, iter->seq.buffer, read);
-
-       if (read < iter->seq.len)
-               start = read;
-       else
+       sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
+       if (iter->seq.readpos >= iter->seq.len)
                trace_seq_reset(&iter->seq);
-
-       if (ret)
-               read = -EFAULT;
+       if (sret == -EBUSY)
+               sret = 0;
 
 out:
        mutex_unlock(&trace_types_lock);
 
-       return read;
+       return sret;
 }
 
 static ssize_t
@@ -2591,12 +2636,41 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
        }
 
        if (val > global_trace.entries) {
+               long pages_requested;
+               unsigned long freeable_pages;
+
+               /* make sure we have enough memory before mapping */
+               pages_requested =
+                       (val + (ENTRIES_PER_PAGE-1)) / ENTRIES_PER_PAGE;
+
+               /* account for each buffer (and max_tr) */
+               pages_requested *= tracing_nr_buffers * 2;
+
+               /* Check for overflow */
+               if (pages_requested < 0) {
+                       cnt = -ENOMEM;
+                       goto out;
+               }
+
+               freeable_pages = determine_dirtyable_memory();
+
+               /* we only allow to request 1/4 of useable memory */
+               if (pages_requested >
+                   ((freeable_pages + tracing_pages_allocated) / 4)) {
+                       cnt = -ENOMEM;
+                       goto out;
+               }
+
                while (global_trace.entries < val) {
                        if (trace_alloc_page()) {
                                cnt = -ENOMEM;
                                goto out;
                        }
+                       /* double check that we don't go over the known pages */
+                       if (tracing_pages_allocated > pages_requested)
+                               break;
                }
+
        } else {
                /* include the number of entries in val (inc of page entries) */
                while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1))
@@ -2776,6 +2850,7 @@ static int trace_alloc_page(void)
        struct page *page, *tmp;
        LIST_HEAD(pages);
        void *array;
+       unsigned pages_allocated = 0;
        int i;
 
        /* first allocate a page for each CPU */
@@ -2787,6 +2862,7 @@ static int trace_alloc_page(void)
                        goto free_pages;
                }
 
+               pages_allocated++;
                page = virt_to_page(array);
                list_add(&page->lru, &pages);
 
@@ -2798,6 +2874,7 @@ static int trace_alloc_page(void)
                               "for trace buffer!\n");
                        goto free_pages;
                }
+               pages_allocated++;
                page = virt_to_page(array);
                list_add(&page->lru, &pages);
 #endif
@@ -2819,6 +2896,7 @@ static int trace_alloc_page(void)
                SetPageLRU(page);
 #endif
        }
+       tracing_pages_allocated += pages_allocated;
        global_trace.entries += ENTRIES_PER_PAGE;
 
        return 0;
@@ -2853,6 +2931,8 @@ static int trace_free_page(void)
                page = list_entry(p, struct page, lru);
                ClearPageLRU(page);
                list_del(&page->lru);
+               tracing_pages_allocated--;
+               tracing_pages_allocated--;
                __free_page(page);
 
                tracing_reset(data);