#include <linux/percpu.h>
#include <linux/ctype.h>
#include <linux/init.h>
+#include <linux/poll.h>
#include <linux/gfp.h>
#include <linux/fs.h>
+#include <linux/stacktrace.h>
+
#include "trace.h"
unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
unsigned long __read_mostly tracing_thresh;
+static unsigned long __read_mostly tracing_nr_buffers;
+static cpumask_t __read_mostly tracing_buffer_mask;
+
+#define for_each_tracing_cpu(cpu) \
+ for_each_cpu_mask(cpu, tracing_buffer_mask)
+
+/* dummy trace to disable tracing */
+static struct tracer no_tracer __read_mostly = {
+ .name = "none",
+};
+
+static int trace_alloc_page(void);
+static int trace_free_page(void);
+
static int tracing_disabled = 1;
-static long notrace
+long
ns2usecs(cycle_t nsec)
{
nsec += 500;
return nsec;
}
-static const int time_sync_freq_max = 128;
-static const cycle_t time_sync_thresh = 100000;
-
-static DEFINE_PER_CPU(cycle_t, time_offset);
-static DEFINE_PER_CPU(cycle_t, prev_cpu_time);
-static DEFINE_PER_CPU(int, time_sync_count);
-static DEFINE_PER_CPU(int, time_sync_freq);
-
-/*
- * Global lock which we take every now and then to synchronize
- * the CPUs time. This method is not warp-safe, but it's good
- * enough to synchronize slowly diverging time sources and thus
- * it's good enough for tracing:
- */
-static DEFINE_SPINLOCK(time_sync_lock);
-static cycle_t prev_global_time;
-
-static notrace cycle_t __ftrace_now_sync(cycles_t time, int cpu)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&time_sync_lock, flags);
-
- /*
- * Update the synchronization frequency:
- */
- if (per_cpu(time_sync_freq, cpu) < time_sync_freq_max)
- per_cpu(time_sync_freq, cpu) *= 2;
- per_cpu(time_sync_count, cpu) = per_cpu(time_sync_freq, cpu);
-
- if (time < prev_global_time) {
- per_cpu(time_offset, cpu) += prev_global_time - time;
- time = prev_global_time;
- } else {
- prev_global_time = time;
- }
-
- spin_unlock_irqrestore(&time_sync_lock, flags);
-
- return time;
-}
-
-notrace cycle_t ftrace_now(int cpu)
+cycle_t ftrace_now(int cpu)
{
- cycle_t prev_cpu_time, time, delta_time;
-
- prev_cpu_time = per_cpu(prev_cpu_time, cpu);
- time = sched_clock() + per_cpu(time_offset, cpu);
- delta_time = time-prev_cpu_time;
-
- if (unlikely(delta_time > time_sync_thresh ||
- --per_cpu(time_sync_count, cpu) <= 0))
- time = __ftrace_now_sync(time, cpu);
-
- return time;
+ return cpu_clock(cpu);
}
static struct trace_array global_trace;
static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
-static int tracer_enabled;
-static unsigned long trace_nr_entries = 16384UL;
+static int tracer_enabled = 1;
+static unsigned long trace_nr_entries = 65536UL;
static struct tracer *trace_types __read_mostly;
static struct tracer *current_trace __read_mostly;
static int max_tracer_type_len;
static DEFINE_MUTEX(trace_types_lock);
+static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
+
+unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
+
+void trace_wake_up(void)
+{
+ /*
+ * The runqueue_is_locked() can fail, but this is the best we
+ * have for now:
+ */
+ if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
+ wake_up(&trace_wait);
+}
#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
static int __init set_nr_entries(char *str)
{
+ unsigned long nr_entries;
+ int ret;
+
if (!str)
return 0;
- trace_nr_entries = simple_strtoul(str, &str, 0);
+ ret = strict_strtoul(str, 0, &nr_entries);
+ /* nr_entries can not be zero */
+ if (ret < 0 || nr_entries == 0)
+ return 0;
+ trace_nr_entries = nr_entries;
return 1;
}
__setup("trace_entries=", set_nr_entries);
return nsecs / 1000;
}
-enum trace_type {
- __TRACE_FIRST_TYPE = 0,
-
- TRACE_FN,
- TRACE_CTX,
- TRACE_SPECIAL,
-
- __TRACE_LAST_TYPE
-};
-
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
TRACE_FLAG_NEED_RESCHED = 0x02,
TRACE_FLAG_SOFTIRQ = 0x08,
};
-enum trace_iterator_flags {
- TRACE_ITER_PRINT_PARENT = 0x01,
- TRACE_ITER_SYM_OFFSET = 0x02,
- TRACE_ITER_SYM_ADDR = 0x04,
- TRACE_ITER_VERBOSE = 0x08,
- TRACE_ITER_RAW = 0x10,
- TRACE_ITER_BIN = 0x20,
-};
-
#define TRACE_ITER_SYM_MASK \
(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
"sym-addr",
"verbose",
"raw",
+ "hex",
"bin",
+ "block",
+ "stacktrace",
+ "sched-tree",
NULL
};
-static unsigned trace_flags;
-
-static DEFINE_SPINLOCK(ftrace_max_lock);
+static raw_spinlock_t ftrace_max_lock =
+ (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
/*
* Copy the new maximum trace into the separate maximum-trace
* structure. (this way the maximum trace is permanently saved,
* for later retrieval via /debugfs/tracing/latency_trace)
*/
-static notrace void
+static void
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
struct trace_array_cpu *data = tr->data[cpu];
return page_address(page);
}
-static notrace int
+int
trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
{
int len = (PAGE_SIZE - 1) - s->len;
va_end(ap);
/* If we can't write it all, don't bother writing anything */
- if (ret > len)
+ if (ret >= len)
return 0;
s->len += ret;
return len;
}
-static notrace int
+static int
trace_seq_puts(struct trace_seq *s, const char *str)
{
int len = strlen(str);
return len;
}
-static notrace int
+static int
trace_seq_putc(struct trace_seq *s, unsigned char c)
{
if (s->len >= (PAGE_SIZE - 1))
return 1;
}
-static notrace int
+static int
trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
{
if (len > ((PAGE_SIZE - 1) - s->len))
return len;
}
-static notrace void
+#define HEX_CHARS 17
+static const char hex2asc[] = "0123456789abcdef";
+
+static int
+trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
+{
+ unsigned char hex[HEX_CHARS];
+ unsigned char *data = mem;
+ unsigned char byte;
+ int i, j;
+
+ BUG_ON(len >= HEX_CHARS);
+
+#ifdef __BIG_ENDIAN
+ for (i = 0, j = 0; i < len; i++) {
+#else
+ for (i = len-1, j = 0; i >= 0; i--) {
+#endif
+ byte = data[i];
+
+ hex[j++] = hex2asc[byte & 0x0f];
+ hex[j++] = hex2asc[byte >> 4];
+ }
+ hex[j++] = ' ';
+
+ return trace_seq_putmem(s, hex, j);
+}
+
+static void
trace_seq_reset(struct trace_seq *s)
{
s->len = 0;
}
-static notrace void
+static void
trace_print_seq(struct seq_file *m, struct trace_seq *s)
{
int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
trace_seq_reset(s);
}
-notrace static void
+static void
flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
{
struct list_head flip_pages;
check_pages(tr2);
}
-notrace void
+void
update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
struct trace_array_cpu *data;
int i;
WARN_ON_ONCE(!irqs_disabled());
- spin_lock(&ftrace_max_lock);
+ __raw_spin_lock(&ftrace_max_lock);
/* clear out all the previous traces */
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
data = tr->data[i];
flip_trace(max_tr.data[i], data);
tracing_reset(data);
}
__update_max_tr(tr, tsk, cpu);
- spin_unlock(&ftrace_max_lock);
+ __raw_spin_unlock(&ftrace_max_lock);
}
/**
* @tsk - task with the latency
* @cpu - the cpu of the buffer to copy.
*/
-notrace void
+void
update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
struct trace_array_cpu *data = tr->data[cpu];
int i;
WARN_ON_ONCE(!irqs_disabled());
- spin_lock(&ftrace_max_lock);
- for_each_possible_cpu(i)
+ __raw_spin_lock(&ftrace_max_lock);
+ for_each_tracing_cpu(i)
tracing_reset(max_tr.data[i]);
flip_trace(max_tr.data[cpu], data);
tracing_reset(data);
__update_max_tr(tr, tsk, cpu);
- spin_unlock(&ftrace_max_lock);
+ __raw_spin_unlock(&ftrace_max_lock);
}
int register_tracer(struct tracer *type)
* internal tracing to verify that everything is in order.
* If we fail, we do not register this tracer.
*/
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
data = tr->data[i];
if (!head_page(data))
continue;
goto out;
}
/* Only reset on passing, to avoid touching corrupted buffers */
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
data = tr->data[i];
if (!head_page(data))
continue;
mutex_unlock(&trace_types_lock);
}
-notrace void tracing_reset(struct trace_array_cpu *data)
+void tracing_reset(struct trace_array_cpu *data)
{
data->trace_idx = 0;
data->trace_head = data->trace_tail = head_page(data);
data->trace_tail_idx = 0;
}
-#ifdef CONFIG_FTRACE
-static notrace void
-function_trace_call(unsigned long ip, unsigned long parent_ip)
-{
- struct trace_array *tr = &global_trace;
- struct trace_array_cpu *data;
- unsigned long flags;
- long disabled;
- int cpu;
-
- if (unlikely(!tracer_enabled))
- return;
-
- local_irq_save(flags);
- cpu = raw_smp_processor_id();
- data = tr->data[cpu];
- disabled = atomic_inc_return(&data->disabled);
-
- if (likely(disabled == 1))
- ftrace(tr, data, ip, parent_ip, flags);
-
- atomic_dec(&data->disabled);
- local_irq_restore(flags);
-}
-
-static struct ftrace_ops trace_ops __read_mostly =
-{
- .func = function_trace_call,
-};
-#endif
-
-notrace void tracing_start_function_trace(void)
-{
- register_ftrace_function(&trace_ops);
-}
-
-notrace void tracing_stop_function_trace(void)
-{
- unregister_ftrace_function(&trace_ops);
-}
-
#define SAVED_CMDLINES 128
static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
cmdline_idx = 0;
}
-notrace void trace_stop_cmdline_recording(void);
+void trace_stop_cmdline_recording(void);
-static notrace void trace_save_cmdline(struct task_struct *tsk)
+static void trace_save_cmdline(struct task_struct *tsk)
{
unsigned map;
unsigned idx;
spin_unlock(&trace_cmdline_lock);
}
-static notrace char *trace_find_cmdline(int pid)
+static char *trace_find_cmdline(int pid)
{
char *cmdline = "<...>";
unsigned map;
return cmdline;
}
-notrace void tracing_record_cmdline(struct task_struct *tsk)
+void tracing_record_cmdline(struct task_struct *tsk)
{
if (atomic_read(&trace_record_cmdline_disabled))
return;
trace_save_cmdline(tsk);
}
-static inline notrace struct list_head *
+static inline struct list_head *
trace_next_list(struct trace_array_cpu *data, struct list_head *next)
{
/*
return next;
}
-static inline notrace void *
+static inline void *
trace_next_page(struct trace_array_cpu *data, void *addr)
{
struct list_head *next;
return page_address(page);
}
-static inline notrace struct trace_entry *
+static inline struct trace_entry *
tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
{
unsigned long idx, idx_next;
return entry;
}
-static inline notrace void
+static inline void
tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
{
struct task_struct *tsk = current;
pc = preempt_count();
entry->preempt_count = pc & 0xff;
- entry->pid = tsk->pid;
+ entry->pid = (tsk) ? tsk->pid : 0;
entry->t = ftrace_now(raw_smp_processor_id());
entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
}
-notrace void
-ftrace(struct trace_array *tr, struct trace_array_cpu *data,
- unsigned long ip, unsigned long parent_ip, unsigned long flags)
+void
+trace_function(struct trace_array *tr, struct trace_array_cpu *data,
+ unsigned long ip, unsigned long parent_ip, unsigned long flags)
{
struct trace_entry *entry;
+ unsigned long irq_flags;
- spin_lock(&data->lock);
+ raw_local_irq_save(irq_flags);
+ __raw_spin_lock(&data->lock);
entry = tracing_get_trace_entry(tr, data);
tracing_generic_entry_update(entry, flags);
entry->type = TRACE_FN;
entry->fn.ip = ip;
entry->fn.parent_ip = parent_ip;
- spin_unlock(&data->lock);
+ __raw_spin_unlock(&data->lock);
+ raw_local_irq_restore(irq_flags);
}
-notrace void
-trace_special(struct trace_array *tr, struct trace_array_cpu *data,
- unsigned long arg1, unsigned long arg2, unsigned long arg3)
+void
+ftrace(struct trace_array *tr, struct trace_array_cpu *data,
+ unsigned long ip, unsigned long parent_ip, unsigned long flags)
{
+ if (likely(!atomic_read(&data->disabled)))
+ trace_function(tr, data, ip, parent_ip, flags);
+}
+
+void
+__trace_special(void *__tr, void *__data,
+ unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+ struct trace_array_cpu *data = __data;
+ struct trace_array *tr = __tr;
struct trace_entry *entry;
+ unsigned long irq_flags;
- spin_lock(&data->lock);
+ raw_local_irq_save(irq_flags);
+ __raw_spin_lock(&data->lock);
entry = tracing_get_trace_entry(tr, data);
tracing_generic_entry_update(entry, 0);
entry->type = TRACE_SPECIAL;
entry->special.arg1 = arg1;
entry->special.arg2 = arg2;
entry->special.arg3 = arg3;
- spin_unlock(&data->lock);
+ __raw_spin_unlock(&data->lock);
+ raw_local_irq_restore(irq_flags);
+
+ trace_wake_up();
}
-notrace void
+void __trace_stack(struct trace_array *tr,
+ struct trace_array_cpu *data,
+ unsigned long flags,
+ int skip)
+{
+ struct trace_entry *entry;
+ struct stack_trace trace;
+
+ if (!(trace_flags & TRACE_ITER_STACKTRACE))
+ return;
+
+ entry = tracing_get_trace_entry(tr, data);
+ tracing_generic_entry_update(entry, flags);
+ entry->type = TRACE_STACK;
+
+ memset(&entry->stack, 0, sizeof(entry->stack));
+
+ trace.nr_entries = 0;
+ trace.max_entries = FTRACE_STACK_ENTRIES;
+ trace.skip = skip;
+ trace.entries = entry->stack.caller;
+
+ save_stack_trace(&trace);
+}
+
+void
tracing_sched_switch_trace(struct trace_array *tr,
struct trace_array_cpu *data,
- struct task_struct *prev, struct task_struct *next,
+ struct task_struct *prev,
+ struct task_struct *next,
unsigned long flags)
{
struct trace_entry *entry;
+ unsigned long irq_flags;
- spin_lock(&data->lock);
+ raw_local_irq_save(irq_flags);
+ __raw_spin_lock(&data->lock);
entry = tracing_get_trace_entry(tr, data);
tracing_generic_entry_update(entry, flags);
entry->type = TRACE_CTX;
entry->ctx.prev_state = prev->state;
entry->ctx.next_pid = next->pid;
entry->ctx.next_prio = next->prio;
- spin_unlock(&data->lock);
+ entry->ctx.next_state = next->state;
+ __trace_stack(tr, data, flags, 4);
+ __raw_spin_unlock(&data->lock);
+ raw_local_irq_restore(irq_flags);
+}
+
+void
+tracing_sched_wakeup_trace(struct trace_array *tr,
+ struct trace_array_cpu *data,
+ struct task_struct *wakee,
+ struct task_struct *curr,
+ unsigned long flags)
+{
+ struct trace_entry *entry;
+ unsigned long irq_flags;
+
+ raw_local_irq_save(irq_flags);
+ __raw_spin_lock(&data->lock);
+ entry = tracing_get_trace_entry(tr, data);
+ tracing_generic_entry_update(entry, flags);
+ entry->type = TRACE_WAKE;
+ entry->ctx.prev_pid = curr->pid;
+ entry->ctx.prev_prio = curr->prio;
+ entry->ctx.prev_state = curr->state;
+ entry->ctx.next_pid = wakee->pid;
+ entry->ctx.next_prio = wakee->prio;
+ entry->ctx.next_state = wakee->state;
+ __trace_stack(tr, data, flags, 5);
+ __raw_spin_unlock(&data->lock);
+ raw_local_irq_restore(irq_flags);
+
+ trace_wake_up();
+}
+
+#ifdef CONFIG_FTRACE
+static void
+function_trace_call(unsigned long ip, unsigned long parent_ip)
+{
+ struct trace_array *tr = &global_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int cpu;
+
+ if (unlikely(!tracer_enabled))
+ return;
+
+ local_irq_save(flags);
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+
+ if (likely(disabled == 1))
+ trace_function(tr, data, ip, parent_ip, flags);
+
+ atomic_dec(&data->disabled);
+ local_irq_restore(flags);
}
+static struct ftrace_ops trace_ops __read_mostly =
+{
+ .func = function_trace_call,
+};
+
+void tracing_start_function_trace(void)
+{
+ register_ftrace_function(&trace_ops);
+}
+
+void tracing_stop_function_trace(void)
+{
+ unregister_ftrace_function(&trace_ops);
+}
+#endif
+
enum trace_file_type {
TRACE_FILE_LAT_FMT = 1,
};
array = page_address(page);
- /* Still possible to catch up to the tail */
- if (iter->next_idx[cpu] && array == data->trace_tail &&
- iter->next_page_idx[cpu] == data->trace_tail_idx)
- return NULL;
-
WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE);
return &array[iter->next_page_idx[cpu]];
}
-static struct notrace trace_entry *
+static struct trace_entry *
find_next_entry(struct trace_iterator *iter, int *ent_cpu)
{
struct trace_array *tr = iter->tr;
int next_cpu = -1;
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_tracing_cpu(cpu) {
if (!head_page(tr->data[cpu]))
continue;
ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
return next;
}
-static notrace void trace_iterator_increment(struct trace_iterator *iter)
+static void trace_iterator_increment(struct trace_iterator *iter)
{
iter->idx++;
iter->next_idx[iter->cpu]++;
}
}
-static notrace void trace_consume(struct trace_iterator *iter)
+static void trace_consume(struct trace_iterator *iter)
{
struct trace_array_cpu *data = iter->tr->data[iter->cpu];
data->trace_idx = 0;
}
-static notrace void *find_next_entry_inc(struct trace_iterator *iter)
+static void *find_next_entry_inc(struct trace_iterator *iter)
{
struct trace_entry *next;
int next_cpu = -1;
return next ? iter : NULL;
}
-static notrace void *s_next(struct seq_file *m, void *v, loff_t *pos)
+static void *s_next(struct seq_file *m, void *v, loff_t *pos)
{
struct trace_iterator *iter = m->private;
void *last_ent = iter->ent;
mutex_lock(&trace_types_lock);
- if (!current_trace || current_trace != iter->trace)
+ if (!current_trace || current_trace != iter->trace) {
+ mutex_unlock(&trace_types_lock);
return NULL;
+ }
atomic_inc(&trace_record_cmdline_disabled);
iter->prev_ent = NULL;
iter->prev_cpu = -1;
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
iter->next_idx[i] = 0;
iter->next_page[i] = NULL;
}
# define IP_FMT "%016lx"
#endif
-static notrace int
+static int
seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
{
int ret;
return ret;
}
-static notrace void print_lat_help_header(struct seq_file *m)
+static void print_lat_help_header(struct seq_file *m)
{
seq_puts(m, "# _------=> CPU# \n");
seq_puts(m, "# / _-----=> irqs-off \n");
seq_puts(m, "# \\ / ||||| \\ | / \n");
}
-static notrace void print_func_help_header(struct seq_file *m)
+static void print_func_help_header(struct seq_file *m)
{
seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
seq_puts(m, "# | | | | |\n");
}
-static notrace void
+static void
print_trace_header(struct seq_file *m, struct trace_iterator *iter)
{
unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
if (type)
name = type->name;
- for_each_possible_cpu(cpu) {
+ for_each_tracing_cpu(cpu) {
if (head_page(tr->data[cpu])) {
total += tr->data[cpu]->trace_idx;
if (tr->data[cpu]->trace_idx > tr->entries)
seq_puts(m, "\n");
}
-static notrace void
+static void
lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
{
int hardirq, softirq;
hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
- if (hardirq && softirq)
+ if (hardirq && softirq) {
trace_seq_putc(s, 'H');
- else {
- if (hardirq)
+ } else {
+ if (hardirq) {
trace_seq_putc(s, 'h');
- else {
+ } else {
if (softirq)
trace_seq_putc(s, 's');
else
unsigned long preempt_mark_thresh = 100;
-static notrace void
+static void
lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs,
unsigned long rel_usecs)
{
static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
-static notrace int
+static int
print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
{
struct trace_seq *s = &iter->seq;
unsigned long abs_usecs;
unsigned long rel_usecs;
char *comm;
- int S;
+ int S, T;
+ int i;
+ unsigned state;
if (!next_entry)
next_entry = entry;
trace_seq_puts(s, ")\n");
break;
case TRACE_CTX:
- S = entry->ctx.prev_state < sizeof(state_to_char) ?
- state_to_char[entry->ctx.prev_state] : 'X';
+ case TRACE_WAKE:
+ T = entry->ctx.next_state < sizeof(state_to_char) ?
+ state_to_char[entry->ctx.next_state] : 'X';
+
+ state = entry->ctx.prev_state ? __ffs(entry->ctx.prev_state) + 1 : 0;
+ S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
comm = trace_find_cmdline(entry->ctx.next_pid);
- trace_seq_printf(s, " %d:%d:%c --> %d:%d %s\n",
+ trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c %s\n",
entry->ctx.prev_pid,
entry->ctx.prev_prio,
- S,
+ S, entry->type == TRACE_CTX ? "==>" : " +",
entry->ctx.next_pid,
entry->ctx.next_prio,
- comm);
+ T, comm);
break;
case TRACE_SPECIAL:
- trace_seq_printf(s, " %lx %lx %lx\n",
+ trace_seq_printf(s, "# %ld %ld %ld\n",
entry->special.arg1,
entry->special.arg2,
entry->special.arg3);
break;
+ case TRACE_STACK:
+ for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
+ if (i)
+ trace_seq_puts(s, " <= ");
+ seq_print_ip_sym(s, entry->stack.caller[i], sym_flags);
+ }
+ trace_seq_puts(s, "\n");
+ break;
default:
trace_seq_printf(s, "Unknown type %d\n", entry->type);
}
return 1;
}
-static notrace int print_trace_fmt(struct trace_iterator *iter)
+static int print_trace_fmt(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
unsigned long long t;
unsigned long secs;
char *comm;
- int S;
int ret;
+ int S, T;
+ int i;
entry = iter->ent;
return 0;
break;
case TRACE_CTX:
+ case TRACE_WAKE:
S = entry->ctx.prev_state < sizeof(state_to_char) ?
state_to_char[entry->ctx.prev_state] : 'X';
- ret = trace_seq_printf(s, " %d:%d:%c ==> %d:%d\n",
+ T = entry->ctx.next_state < sizeof(state_to_char) ?
+ state_to_char[entry->ctx.next_state] : 'X';
+ ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c\n",
entry->ctx.prev_pid,
entry->ctx.prev_prio,
S,
+ entry->type == TRACE_CTX ? "==>" : " +",
entry->ctx.next_pid,
- entry->ctx.next_prio);
+ entry->ctx.next_prio,
+ T);
if (!ret)
return 0;
break;
case TRACE_SPECIAL:
- ret = trace_seq_printf(s, " %lx %lx %lx\n",
+ ret = trace_seq_printf(s, "# %ld %ld %ld\n",
entry->special.arg1,
entry->special.arg2,
entry->special.arg3);
if (!ret)
return 0;
break;
+ case TRACE_STACK:
+ for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
+ if (i) {
+ ret = trace_seq_puts(s, " <= ");
+ if (!ret)
+ return 0;
+ }
+ ret = seq_print_ip_sym(s, entry->stack.caller[i],
+ sym_flags);
+ if (!ret)
+ return 0;
+ }
+ ret = trace_seq_puts(s, "\n");
+ if (!ret)
+ return 0;
+ break;
}
return 1;
}
-static notrace int print_raw_fmt(struct trace_iterator *iter)
+static int print_raw_fmt(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
struct trace_entry *entry;
int ret;
- int S;
+ int S, T;
entry = iter->ent;
return 0;
break;
case TRACE_CTX:
+ case TRACE_WAKE:
S = entry->ctx.prev_state < sizeof(state_to_char) ?
state_to_char[entry->ctx.prev_state] : 'X';
- ret = trace_seq_printf(s, "%d %d %c %d %d\n",
+ T = entry->ctx.next_state < sizeof(state_to_char) ?
+ state_to_char[entry->ctx.next_state] : 'X';
+ if (entry->type == TRACE_WAKE)
+ S = '+';
+ ret = trace_seq_printf(s, "%d %d %c %d %d %c\n",
entry->ctx.prev_pid,
entry->ctx.prev_prio,
S,
entry->ctx.next_pid,
- entry->ctx.next_prio);
+ entry->ctx.next_prio,
+ T);
if (!ret)
return 0;
break;
case TRACE_SPECIAL:
- ret = trace_seq_printf(s, " %lx %lx %lx\n",
+ case TRACE_STACK:
+ ret = trace_seq_printf(s, "# %ld %ld %ld\n",
entry->special.arg1,
entry->special.arg2,
entry->special.arg3);
return 0; \
} while (0)
-static notrace int print_bin_fmt(struct trace_iterator *iter)
+#define SEQ_PUT_HEX_FIELD_RET(s, x) \
+do { \
+ if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \
+ return 0; \
+} while (0)
+
+static int print_hex_fmt(struct trace_iterator *iter)
+{
+ struct trace_seq *s = &iter->seq;
+ unsigned char newline = '\n';
+ struct trace_entry *entry;
+ int S, T;
+
+ entry = iter->ent;
+
+ SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
+ SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
+ SEQ_PUT_HEX_FIELD_RET(s, entry->t);
+
+ switch (entry->type) {
+ case TRACE_FN:
+ SEQ_PUT_HEX_FIELD_RET(s, entry->fn.ip);
+ SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
+ break;
+ case TRACE_CTX:
+ case TRACE_WAKE:
+ S = entry->ctx.prev_state < sizeof(state_to_char) ?
+ state_to_char[entry->ctx.prev_state] : 'X';
+ T = entry->ctx.next_state < sizeof(state_to_char) ?
+ state_to_char[entry->ctx.next_state] : 'X';
+ if (entry->type == TRACE_WAKE)
+ S = '+';
+ SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_pid);
+ SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_prio);
+ SEQ_PUT_HEX_FIELD_RET(s, S);
+ SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_pid);
+ SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_prio);
+ SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
+ SEQ_PUT_HEX_FIELD_RET(s, T);
+ break;
+ case TRACE_SPECIAL:
+ case TRACE_STACK:
+ SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg1);
+ SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg2);
+ SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg3);
+ break;
+ }
+ SEQ_PUT_FIELD_RET(s, newline);
+
+ return 1;
+}
+
+static int print_bin_fmt(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
struct trace_entry *entry;
SEQ_PUT_FIELD_RET(s, entry->ctx.prev_state);
SEQ_PUT_FIELD_RET(s, entry->ctx.next_pid);
SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio);
+ SEQ_PUT_FIELD_RET(s, entry->ctx.next_state);
break;
case TRACE_SPECIAL:
+ case TRACE_STACK:
SEQ_PUT_FIELD_RET(s, entry->special.arg1);
SEQ_PUT_FIELD_RET(s, entry->special.arg2);
SEQ_PUT_FIELD_RET(s, entry->special.arg3);
struct trace_array_cpu *data;
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_tracing_cpu(cpu) {
data = iter->tr->data[cpu];
if (head_page(data) && data->trace_idx &&
static int print_trace_line(struct trace_iterator *iter)
{
+ if (iter->trace && iter->trace->print_line)
+ return iter->trace->print_line(iter);
+
if (trace_flags & TRACE_ITER_BIN)
return print_bin_fmt(iter);
+ if (trace_flags & TRACE_ITER_HEX)
+ return print_hex_fmt(iter);
+
if (trace_flags & TRACE_ITER_RAW)
return print_raw_fmt(iter);
.show = s_show,
};
-static struct trace_iterator notrace *
+static struct trace_iterator *
__tracing_open(struct inode *inode, struct file *file, int *ret)
{
struct trace_iterator *iter;
}
-static notrace void *
+static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
struct tracer *t = m->private;
};
static struct file_operations show_traces_fops = {
- .open = show_traces_open,
- .read = seq_read,
- .release = seq_release,
+ .open = show_traces_open,
+ .read = seq_read,
+ .release = seq_release,
+};
+
+/*
+ * Only trace on a CPU if the bitmask is set:
+ */
+static cpumask_t tracing_cpumask = CPU_MASK_ALL;
+
+/*
+ * When tracing/tracing_cpu_mask is modified then this holds
+ * the new bitmask we are about to install:
+ */
+static cpumask_t tracing_cpumask_new;
+
+/*
+ * The tracer itself will not take this lock, but still we want
+ * to provide a consistent cpumask to user-space:
+ */
+static DEFINE_MUTEX(tracing_cpumask_update_lock);
+
+/*
+ * Temporary storage for the character representation of the
+ * CPU bitmask (and one more byte for the newline):
+ */
+static char mask_str[NR_CPUS + 1];
+
+static ssize_t
+tracing_cpumask_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int len;
+
+ mutex_lock(&tracing_cpumask_update_lock);
+
+ len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
+ if (count - len < 2) {
+ count = -EINVAL;
+ goto out_err;
+ }
+ len += sprintf(mask_str + len, "\n");
+ count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
+
+out_err:
+ mutex_unlock(&tracing_cpumask_update_lock);
+
+ return count;
+}
+
+static ssize_t
+tracing_cpumask_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int err, cpu;
+
+ mutex_lock(&tracing_cpumask_update_lock);
+ err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
+ if (err)
+ goto err_unlock;
+
+ raw_local_irq_disable();
+ __raw_spin_lock(&ftrace_max_lock);
+ for_each_tracing_cpu(cpu) {
+ /*
+ * Increase/decrease the disabled counter if we are
+ * about to flip a bit in the cpumask:
+ */
+ if (cpu_isset(cpu, tracing_cpumask) &&
+ !cpu_isset(cpu, tracing_cpumask_new)) {
+ atomic_inc(&global_trace.data[cpu]->disabled);
+ }
+ if (!cpu_isset(cpu, tracing_cpumask) &&
+ cpu_isset(cpu, tracing_cpumask_new)) {
+ atomic_dec(&global_trace.data[cpu]->disabled);
+ }
+ }
+ __raw_spin_unlock(&ftrace_max_lock);
+ raw_local_irq_enable();
+
+ tracing_cpumask = tracing_cpumask_new;
+
+ mutex_unlock(&tracing_cpumask_update_lock);
+
+ return count;
+
+err_unlock:
+ mutex_unlock(&tracing_cpumask_update_lock);
+
+ return err;
+}
+
+static struct file_operations tracing_cpumask_fops = {
+ .open = tracing_open_generic,
+ .read = tracing_cpumask_read,
+ .write = tracing_cpumask_write,
};
static ssize_t
r += sprintf(buf + r, "\n");
WARN_ON(r >= len + 2);
- r = simple_read_from_buffer(ubuf, cnt, ppos,
- buf, r);
+ r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
kfree(buf);
int neg = 0;
int i;
- if (cnt > 63)
- cnt = 63;
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
break;
}
}
+ /*
+ * If no option could be set, return an error:
+ */
+ if (!trace_options[i])
+ return -EINVAL;
filp->f_pos += cnt;
}
static struct file_operations tracing_iter_fops = {
- .open = tracing_open_generic,
- .read = tracing_iter_ctrl_read,
- .write = tracing_iter_ctrl_write,
+ .open = tracing_open_generic,
+ .read = tracing_iter_ctrl_read,
+ .write = tracing_iter_ctrl_write,
};
static const char readme_msg[] =
}
static struct file_operations tracing_readme_fops = {
- .open = tracing_open_generic,
- .read = tracing_readme_read,
+ .open = tracing_open_generic,
+ .read = tracing_readme_read,
};
-
static ssize_t
tracing_ctrl_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
- long val;
char buf[64];
+ long val;
+ int ret;
- if (cnt > 63)
- cnt = 63;
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
- val = simple_strtoul(buf, NULL, 10);
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
val = !!val;
char buf[64];
int r;
- r = snprintf(buf, 64, "%ld\n",
+ r = snprintf(buf, sizeof(buf), "%ld\n",
*ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
- if (r > 64)
- r = 64;
+ if (r > sizeof(buf))
+ r = sizeof(buf);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
size_t cnt, loff_t *ppos)
{
long *ptr = filp->private_data;
- long val;
char buf[64];
+ long val;
+ int ret;
- if (cnt > 63)
- cnt = 63;
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
- val = simple_strtoul(buf, NULL, 10);
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
*ptr = val * 1000;
return -ENOMEM;
iter->tr = &global_trace;
+ iter->trace = current_trace;
filp->private_data = iter;
return 0;
}
+static unsigned int
+tracing_poll_pipe(struct file *filp, poll_table *poll_table)
+{
+ struct trace_iterator *iter = filp->private_data;
+
+ if (trace_flags & TRACE_ITER_BLOCK) {
+ /*
+ * Always select as readable when in blocking mode
+ */
+ return POLLIN | POLLRDNORM;
+ } else {
+ if (!trace_empty(iter))
+ return POLLIN | POLLRDNORM;
+ poll_wait(filp, &trace_wait, poll_table);
+ if (!trace_empty(iter))
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+ }
+}
+
/*
* Consumer reader.
*/
{
struct trace_iterator *iter = filp->private_data;
struct trace_array_cpu *data;
+ struct trace_array *tr = iter->tr;
+ struct tracer *tracer = iter->trace;
static cpumask_t mask;
- struct trace_entry *entry;
static int start;
unsigned long flags;
+#ifdef CONFIG_FTRACE
+ int ftrace_save;
+#endif
int read = 0;
int cpu;
int len;
start = 0;
while (trace_empty(iter)) {
+
+ if ((filp->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
/*
* This is a make-shift waitqueue. The reason we don't use
* an actual wait queue is because:
if (signal_pending(current))
return -EINTR;
+ if (iter->trace != current_trace)
+ return 0;
+
/*
* We block until we read something and tracing is disabled.
* We still block if tracing is disabled, but we have never
cnt = PAGE_SIZE - 1;
memset(iter, 0, sizeof(*iter));
- iter->tr = &global_trace;
+ iter->tr = tr;
+ iter->trace = tracer;
iter->pos = -1;
/*
cpus_clear(mask);
local_irq_save(flags);
- for_each_possible_cpu(cpu) {
+#ifdef CONFIG_FTRACE
+ ftrace_save = ftrace_enabled;
+ ftrace_enabled = 0;
+#endif
+ smp_wmb();
+ for_each_tracing_cpu(cpu) {
data = iter->tr->data[cpu];
if (!head_page(data) || !data->trace_idx)
continue;
atomic_inc(&data->disabled);
- spin_lock(&data->lock);
cpu_set(cpu, mask);
}
- while ((entry = find_next_entry_inc(iter)) != NULL) {
+ for_each_cpu_mask(cpu, mask) {
+ data = iter->tr->data[cpu];
+ __raw_spin_lock(&data->lock);
+ }
+
+ while (find_next_entry_inc(iter) != NULL) {
+ int len = iter->seq.len;
+
ret = print_trace_line(iter);
- if (!ret)
+ if (!ret) {
+ /* don't print partial lines */
+ iter->seq.len = len;
break;
+ }
trace_consume(iter);
for_each_cpu_mask(cpu, mask) {
data = iter->tr->data[cpu];
- spin_unlock(&data->lock);
+ __raw_spin_unlock(&data->lock);
+ }
+
+ for_each_cpu_mask(cpu, mask) {
+ data = iter->tr->data[cpu];
atomic_dec(&data->disabled);
}
+#ifdef CONFIG_FTRACE
+ ftrace_enabled = ftrace_save;
+#endif
local_irq_restore(flags);
/* Now copy what we have to the user */
return read;
}
+static ssize_t
+tracing_entries_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct trace_array *tr = filp->private_data;
+ char buf[64];
+ int r;
+
+ r = sprintf(buf, "%lu\n", tr->entries);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t
+tracing_entries_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ unsigned long val;
+ char buf[64];
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ /* must have at least 1 entry */
+ if (!val)
+ return -EINVAL;
+
+ mutex_lock(&trace_types_lock);
+
+ if (current_trace != &no_tracer) {
+ cnt = -EBUSY;
+ pr_info("ftrace: set current_tracer to none"
+ " before modifying buffer size\n");
+ goto out;
+ }
+
+ if (val > global_trace.entries) {
+ while (global_trace.entries < val) {
+ if (trace_alloc_page()) {
+ cnt = -ENOMEM;
+ goto out;
+ }
+ }
+ } else {
+ /* include the number of entries in val (inc of page entries) */
+ while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1))
+ trace_free_page();
+ }
+
+ filp->f_pos += cnt;
+
+ out:
+ max_tr.entries = global_trace.entries;
+ mutex_unlock(&trace_types_lock);
+
+ return cnt;
+}
+
static struct file_operations tracing_max_lat_fops = {
.open = tracing_open_generic,
.read = tracing_max_lat_read,
static struct file_operations tracing_pipe_fops = {
.open = tracing_open_pipe,
+ .poll = tracing_poll_pipe,
.read = tracing_read_pipe,
.release = tracing_release_pipe,
};
+static struct file_operations tracing_entries_fops = {
+ .open = tracing_open_generic,
+ .read = tracing_entries_read,
+ .write = tracing_entries_write,
+};
+
#ifdef CONFIG_DYNAMIC_FTRACE
static ssize_t
if (!entry)
pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
+ entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
+ NULL, &tracing_cpumask_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
+
entry = debugfs_create_file("latency_trace", 0444, d_tracer,
&global_trace, &tracing_lt_fops);
if (!entry)
pr_warning("Could not create debugfs "
"'tracing_threash' entry\n");
+ entry = debugfs_create_file("trace_entries", 0644, d_tracer,
+ &global_trace, &tracing_entries_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs "
+ "'tracing_threash' entry\n");
+
#ifdef CONFIG_DYNAMIC_FTRACE
entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
&ftrace_update_tot_cnt,
#endif
}
-/* dummy trace to disable tracing */
-static struct tracer no_tracer __read_mostly =
-{
- .name = "none",
-};
-
static int trace_alloc_page(void)
{
struct trace_array_cpu *data;
int i;
/* first allocate a page for each CPU */
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
array = (void *)__get_free_page(GFP_KERNEL);
if (array == NULL) {
printk(KERN_ERR "tracer: failed to allocate page"
}
/* Now that we successfully allocate a page per CPU, add them */
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
data = global_trace.data[i];
- spin_lock_init(&data->lock);
- lockdep_set_class(&data->lock, &data->lock_key);
page = list_entry(pages.next, struct page, lru);
list_del_init(&page->lru);
list_add_tail(&page->lru, &data->trace_pages);
#ifdef CONFIG_TRACER_MAX_TRACE
data = max_tr.data[i];
- spin_lock_init(&data->lock);
- lockdep_set_class(&data->lock, &data->lock_key);
page = list_entry(pages.next, struct page, lru);
list_del_init(&page->lru);
list_add_tail(&page->lru, &data->trace_pages);
return -ENOMEM;
}
+static int trace_free_page(void)
+{
+ struct trace_array_cpu *data;
+ struct page *page;
+ struct list_head *p;
+ int i;
+ int ret = 0;
+
+ /* free one page from each buffer */
+ for_each_tracing_cpu(i) {
+ data = global_trace.data[i];
+ p = data->trace_pages.next;
+ if (p == &data->trace_pages) {
+ /* should never happen */
+ WARN_ON(1);
+ tracing_disabled = 1;
+ ret = -1;
+ break;
+ }
+ page = list_entry(p, struct page, lru);
+ ClearPageLRU(page);
+ list_del(&page->lru);
+ __free_page(page);
+
+ tracing_reset(data);
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+ data = max_tr.data[i];
+ p = data->trace_pages.next;
+ if (p == &data->trace_pages) {
+ /* should never happen */
+ WARN_ON(1);
+ tracing_disabled = 1;
+ ret = -1;
+ break;
+ }
+ page = list_entry(p, struct page, lru);
+ ClearPageLRU(page);
+ list_del(&page->lru);
+ __free_page(page);
+
+ tracing_reset(data);
+#endif
+ }
+ global_trace.entries -= ENTRIES_PER_PAGE;
+
+ return ret;
+}
+
__init static int tracer_alloc_buffers(void)
{
struct trace_array_cpu *data;
int ret = -ENOMEM;
int i;
+ global_trace.ctrl = tracer_enabled;
+
+ /* TODO: make the number of buffers hot pluggable with CPUS */
+ tracing_nr_buffers = num_possible_cpus();
+ tracing_buffer_mask = cpu_possible_map;
+
/* Allocate the first page for all buffers */
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
max_tr.data[i] = &per_cpu(max_data, i);
/* use the LRU flag to differentiate the two buffers */
ClearPageLRU(page);
+ data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ max_tr.data[i]->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+
/* Only allocate if we are actually using the max trace */
#ifdef CONFIG_TRACER_MAX_TRACE
array = (void *)__get_free_page(GFP_KERNEL);