unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
unsigned long __read_mostly tracing_thresh;
+static unsigned long __read_mostly tracing_nr_buffers;
+static cpumask_t __read_mostly tracing_buffer_mask;
+
+#define for_each_tracing_cpu(cpu) \
+ for_each_cpu_mask(cpu, tracing_buffer_mask)
+
+/* dummy trace to disable tracing */
+static struct tracer no_tracer __read_mostly = {
+ .name = "none",
+};
+
+static int trace_alloc_page(void);
+static int trace_free_page(void);
+
static int tracing_disabled = 1;
-static long
+long
ns2usecs(cycle_t nsec)
{
nsec += 500;
return cpu_clock(cpu);
}
+/*
+ * The global_trace is the descriptor that holds the tracing
+ * buffers for the live tracing. For each CPU, it contains
+ * a link list of pages that will store trace entries. The
+ * page descriptor of the pages in the memory is used to hold
+ * the link list by linking the lru item in the page descriptor
+ * to each of the pages in the buffer per CPU.
+ *
+ * For each active CPU there is a data field that holds the
+ * pages for the buffer for that CPU. Each CPU has the same number
+ * of pages allocated for its buffer.
+ */
static struct trace_array global_trace;
static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
+/*
+ * The max_tr is used to snapshot the global_trace when a maximum
+ * latency is reached. Some tracers will use this to store a maximum
+ * trace while it continues examining live traces.
+ *
+ * The buffers for the max_tr are set up the same as the global_trace.
+ * When a snapshot is taken, the link list of the max_tr is swapped
+ * with the link list of the global_trace and the buffers are reset for
+ * the global_trace so the tracing can continue.
+ */
static struct trace_array max_tr;
static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
+/* tracer_enabled is used to toggle activation of a tracer */
static int tracer_enabled = 1;
+
+/*
+ * trace_nr_entries is the number of entries that is allocated
+ * for a buffer. Note, the number of entries is always rounded
+ * to ENTRIES_PER_PAGE.
+ */
static unsigned long trace_nr_entries = 65536UL;
+/* trace_types holds a link list of available tracers. */
static struct tracer *trace_types __read_mostly;
+
+/* current_trace points to the tracer that is currently active */
static struct tracer *current_trace __read_mostly;
+
+/*
+ * max_tracer_type_len is used to simplify the allocating of
+ * buffers to read userspace tracer names. We keep track of
+ * the longest tracer name registered.
+ */
static int max_tracer_type_len;
+/*
+ * trace_types_lock is used to protect the trace_types list.
+ * This lock is also used to keep user access serialized.
+ * Accesses from userspace will grab this lock while userspace
+ * activities happen inside the kernel.
+ */
static DEFINE_MUTEX(trace_types_lock);
+
+/* trace_wait is a waitqueue for tasks blocked on trace_poll */
static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
+/* trace_flags holds iter_ctrl options */
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
+/**
+ * trace_wake_up - wake up tasks waiting for trace input
+ *
+ * Simply wakes up any task that is blocked on the trace_wait
+ * queue. These is used with trace_poll for tasks polling the trace.
+ */
void trace_wake_up(void)
{
/*
static int __init set_nr_entries(char *str)
{
+ unsigned long nr_entries;
+ int ret;
+
if (!str)
return 0;
- trace_nr_entries = simple_strtoul(str, &str, 0);
+ ret = strict_strtoul(str, 0, &nr_entries);
+ /* nr_entries can not be zero */
+ if (ret < 0 || nr_entries == 0)
+ return 0;
+ trace_nr_entries = nr_entries;
return 1;
}
__setup("trace_entries=", set_nr_entries);
return nsecs / 1000;
}
-enum trace_type {
- __TRACE_FIRST_TYPE = 0,
-
- TRACE_FN,
- TRACE_CTX,
- TRACE_WAKE,
- TRACE_STACK,
- TRACE_SPECIAL,
-
- __TRACE_LAST_TYPE
-};
-
+/*
+ * trace_flag_type is an enumeration that holds different
+ * states when a trace occurs. These are:
+ * IRQS_OFF - interrupts were disabled
+ * NEED_RESCED - reschedule is requested
+ * HARDIRQ - inside an interrupt handler
+ * SOFTIRQ - inside a softirq handler
+ */
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
TRACE_FLAG_NEED_RESCHED = 0x02,
TRACE_FLAG_SOFTIRQ = 0x08,
};
+/*
+ * TRACE_ITER_SYM_MASK masks the options in trace_flags that
+ * control the output of kernel symbols.
+ */
#define TRACE_ITER_SYM_MASK \
(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
-/* These must match the bit postions above */
+/* These must match the bit postions in trace_iterator_flags */
static const char *trace_options[] = {
"print-parent",
"sym-offset",
NULL
};
-static DEFINE_SPINLOCK(ftrace_max_lock);
+/*
+ * ftrace_max_lock is used to protect the swapping of buffers
+ * when taking a max snapshot. The buffers themselves are
+ * protected by per_cpu spinlocks. But the action of the swap
+ * needs its own lock.
+ *
+ * This is defined as a raw_spinlock_t in order to help
+ * with performance when lockdep debugging is enabled.
+ */
+static raw_spinlock_t ftrace_max_lock =
+ (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
/*
* Copy the new maximum trace into the separate maximum-trace
tracing_record_cmdline(current);
}
+/**
+ * check_pages - integrity check of trace buffers
+ *
+ * As a safty measure we check to make sure the data pages have not
+ * been corrupted. TODO: configure to disable this because it adds
+ * a bit of overhead.
+ */
void check_pages(struct trace_array_cpu *data)
{
struct page *page, *tmp;
}
}
+/**
+ * head_page - page address of the first page in per_cpu buffer.
+ *
+ * head_page returns the page address of the first page in
+ * a per_cpu buffer. This also preforms various consistency
+ * checks to make sure the buffer has not been corrupted.
+ */
void *head_page(struct trace_array_cpu *data)
{
struct page *page;
return page_address(page);
}
-static int
+/**
+ * trace_seq_printf - sequence printing of trace information
+ * @s: trace sequence descriptor
+ * @fmt: printf format string
+ *
+ * The tracer may use either sequence operations or its own
+ * copy to user routines. To simplify formating of a trace
+ * trace_seq_printf is used to store strings into a special
+ * buffer (@s). Then the output may be either used by
+ * the sequencer or pulled into another buffer.
+ */
+int
trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
{
int len = (PAGE_SIZE - 1) - s->len;
va_end(ap);
/* If we can't write it all, don't bother writing anything */
- if (ret > len)
+ if (ret >= len)
return 0;
s->len += ret;
return len;
}
+/**
+ * trace_seq_puts - trace sequence printing of simple string
+ * @s: trace sequence descriptor
+ * @str: simple string to record
+ *
+ * The tracer may use either the sequence operations or its own
+ * copy to user routines. This function records a simple string
+ * into a special buffer (@s) for later retrieval by a sequencer
+ * or other mechanism.
+ */
static int
trace_seq_puts(struct trace_seq *s, const char *str)
{
}
#define HEX_CHARS 17
+static const char hex2asc[] = "0123456789abcdef";
static int
trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
{
unsigned char hex[HEX_CHARS];
- unsigned char *data;
+ unsigned char *data = mem;
unsigned char byte;
int i, j;
BUG_ON(len >= HEX_CHARS);
- data = mem;
-
#ifdef __BIG_ENDIAN
for (i = 0, j = 0; i < len; i++) {
#else
#endif
byte = data[i];
- hex[j] = byte & 0x0f;
- if (hex[j] >= 10)
- hex[j] += 'a' - 10;
- else
- hex[j] += '0';
- j++;
-
- hex[j] = byte >> 4;
- if (hex[j] >= 10)
- hex[j] += 'a' - 10;
- else
- hex[j] += '0';
- j++;
+ hex[j++] = hex2asc[byte & 0x0f];
+ hex[j++] = hex2asc[byte >> 4];
}
- hex[j] = ' ';
- j++;
+ hex[j++] = ' ';
return trace_seq_putmem(s, hex, j);
}
trace_seq_reset(s);
}
+/*
+ * flip the trace buffers between two trace descriptors.
+ * This usually is the buffers between the global_trace and
+ * the max_tr to record a snapshot of a current trace.
+ *
+ * The ftrace_max_lock must be held.
+ */
static void
flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
{
check_pages(tr2);
}
+/**
+ * update_max_tr - snapshot all trace buffers from global_trace to max_tr
+ * @tr: tracer
+ * @tsk: the task with the latency
+ * @cpu: The cpu that initiated the trace.
+ *
+ * Flip the buffers between the @tr and the max_tr and record information
+ * about which task was the cause of this latency.
+ */
void
update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
int i;
WARN_ON_ONCE(!irqs_disabled());
- spin_lock(&ftrace_max_lock);
+ __raw_spin_lock(&ftrace_max_lock);
/* clear out all the previous traces */
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
data = tr->data[i];
flip_trace(max_tr.data[i], data);
tracing_reset(data);
}
__update_max_tr(tr, tsk, cpu);
- spin_unlock(&ftrace_max_lock);
+ __raw_spin_unlock(&ftrace_max_lock);
}
/**
* @tr - tracer
* @tsk - task with the latency
* @cpu - the cpu of the buffer to copy.
+ *
+ * Flip the trace of a single CPU buffer between the @tr and the max_tr.
*/
void
update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
int i;
WARN_ON_ONCE(!irqs_disabled());
- spin_lock(&ftrace_max_lock);
- for_each_possible_cpu(i)
+ __raw_spin_lock(&ftrace_max_lock);
+ for_each_tracing_cpu(i)
tracing_reset(max_tr.data[i]);
flip_trace(max_tr.data[cpu], data);
tracing_reset(data);
__update_max_tr(tr, tsk, cpu);
- spin_unlock(&ftrace_max_lock);
+ __raw_spin_unlock(&ftrace_max_lock);
}
+/**
+ * register_tracer - register a tracer with the ftrace system.
+ * @type - the plugin for the tracer
+ *
+ * Register a new plugin tracer.
+ */
int register_tracer(struct tracer *type)
{
struct tracer *t;
* internal tracing to verify that everything is in order.
* If we fail, we do not register this tracer.
*/
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
data = tr->data[i];
if (!head_page(data))
continue;
goto out;
}
/* Only reset on passing, to avoid touching corrupted buffers */
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
data = tr->data[i];
if (!head_page(data))
continue;
void tracing_reset(struct trace_array_cpu *data)
{
data->trace_idx = 0;
+ data->overrun = 0;
data->trace_head = data->trace_tail = head_page(data);
data->trace_head_idx = 0;
data->trace_tail_idx = 0;
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
static DEFINE_SPINLOCK(trace_cmdline_lock);
-atomic_t trace_record_cmdline_disabled;
+
+/* trace in all context switches */
+atomic_t trace_record_cmdline_enabled __read_mostly;
+
+/* temporary disable recording */
+atomic_t trace_record_cmdline_disabled __read_mostly;
static void trace_init_cmdlines(void)
{
if (data->trace_head == data->trace_tail &&
idx_next == data->trace_tail_idx) {
/* overrun */
+ data->overrun++;
data->trace_tail_idx++;
if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
data->trace_tail =
pc = preempt_count();
entry->preempt_count = pc & 0xff;
- entry->pid = tsk->pid;
+ entry->pid = (tsk) ? tsk->pid : 0;
entry->t = ftrace_now(raw_smp_processor_id());
entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
struct trace_entry *entry;
unsigned long irq_flags;
- spin_lock_irqsave(&data->lock, irq_flags);
+ raw_local_irq_save(irq_flags);
+ __raw_spin_lock(&data->lock);
entry = tracing_get_trace_entry(tr, data);
tracing_generic_entry_update(entry, flags);
entry->type = TRACE_FN;
entry->fn.ip = ip;
entry->fn.parent_ip = parent_ip;
- spin_unlock_irqrestore(&data->lock, irq_flags);
-
- trace_wake_up();
+ __raw_spin_unlock(&data->lock);
+ raw_local_irq_restore(irq_flags);
}
void
struct trace_entry *entry;
unsigned long irq_flags;
- spin_lock_irqsave(&data->lock, irq_flags);
+ raw_local_irq_save(irq_flags);
+ __raw_spin_lock(&data->lock);
entry = tracing_get_trace_entry(tr, data);
tracing_generic_entry_update(entry, 0);
entry->type = TRACE_SPECIAL;
entry->special.arg1 = arg1;
entry->special.arg2 = arg2;
entry->special.arg3 = arg3;
- spin_unlock_irqrestore(&data->lock, irq_flags);
+ __raw_spin_unlock(&data->lock);
+ raw_local_irq_restore(irq_flags);
trace_wake_up();
}
struct trace_entry *entry;
unsigned long irq_flags;
- spin_lock_irqsave(&data->lock, irq_flags);
+ raw_local_irq_save(irq_flags);
+ __raw_spin_lock(&data->lock);
entry = tracing_get_trace_entry(tr, data);
tracing_generic_entry_update(entry, flags);
entry->type = TRACE_CTX;
entry->ctx.prev_state = prev->state;
entry->ctx.next_pid = next->pid;
entry->ctx.next_prio = next->prio;
+ entry->ctx.next_state = next->state;
__trace_stack(tr, data, flags, 4);
- spin_unlock_irqrestore(&data->lock, irq_flags);
+ __raw_spin_unlock(&data->lock);
+ raw_local_irq_restore(irq_flags);
}
void
struct trace_entry *entry;
unsigned long irq_flags;
- spin_lock_irqsave(&data->lock, irq_flags);
+ raw_local_irq_save(irq_flags);
+ __raw_spin_lock(&data->lock);
entry = tracing_get_trace_entry(tr, data);
tracing_generic_entry_update(entry, flags);
entry->type = TRACE_WAKE;
entry->ctx.prev_state = curr->state;
entry->ctx.next_pid = wakee->pid;
entry->ctx.next_prio = wakee->prio;
+ entry->ctx.next_state = wakee->state;
__trace_stack(tr, data, flags, 5);
- spin_unlock_irqrestore(&data->lock, irq_flags);
+ __raw_spin_unlock(&data->lock);
+ raw_local_irq_restore(irq_flags);
trace_wake_up();
}
int next_cpu = -1;
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_tracing_cpu(cpu) {
if (!head_page(tr->data[cpu]))
continue;
ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
mutex_lock(&trace_types_lock);
- if (!current_trace || current_trace != iter->trace)
+ if (!current_trace || current_trace != iter->trace) {
+ mutex_unlock(&trace_types_lock);
return NULL;
+ }
atomic_inc(&trace_record_cmdline_disabled);
iter->prev_ent = NULL;
iter->prev_cpu = -1;
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
iter->next_idx[i] = 0;
iter->next_page[i] = NULL;
}
if (type)
name = type->name;
- for_each_possible_cpu(cpu) {
+ for_each_tracing_cpu(cpu) {
if (head_page(tr->data[cpu])) {
total += tr->data[cpu]->trace_idx;
if (tr->data[cpu]->trace_idx > tr->entries)
hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
- if (hardirq && softirq)
+ if (hardirq && softirq) {
trace_seq_putc(s, 'H');
- else {
- if (hardirq)
+ } else {
+ if (hardirq) {
trace_seq_putc(s, 'h');
- else {
+ } else {
if (softirq)
trace_seq_putc(s, 's');
else
unsigned long abs_usecs;
unsigned long rel_usecs;
char *comm;
- int S;
+ int S, T;
int i;
+ unsigned state;
if (!next_entry)
next_entry = entry;
abs_usecs % 1000, rel_usecs/1000,
rel_usecs % 1000);
} else {
- if (entry->type != TRACE_STACK) {
- lat_print_generic(s, entry, cpu);
- lat_print_timestamp(s, abs_usecs, rel_usecs);
- }
+ lat_print_generic(s, entry, cpu);
+ lat_print_timestamp(s, abs_usecs, rel_usecs);
}
switch (entry->type) {
case TRACE_FN:
break;
case TRACE_CTX:
case TRACE_WAKE:
- S = entry->ctx.prev_state < sizeof(state_to_char) ?
- state_to_char[entry->ctx.prev_state] : 'X';
+ T = entry->ctx.next_state < sizeof(state_to_char) ?
+ state_to_char[entry->ctx.next_state] : 'X';
+
+ state = entry->ctx.prev_state ? __ffs(entry->ctx.prev_state) + 1 : 0;
+ S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
comm = trace_find_cmdline(entry->ctx.next_pid);
- trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d %s\n",
+ trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c %s\n",
entry->ctx.prev_pid,
entry->ctx.prev_prio,
S, entry->type == TRACE_CTX ? "==>" : " +",
entry->ctx.next_pid,
entry->ctx.next_prio,
- comm);
+ T, comm);
break;
case TRACE_SPECIAL:
- trace_seq_printf(s, " %ld %ld %ld\n",
+ trace_seq_printf(s, "# %ld %ld %ld\n",
entry->special.arg1,
entry->special.arg2,
entry->special.arg3);
unsigned long secs;
char *comm;
int ret;
- int S;
+ int S, T;
int i;
entry = iter->ent;
usec_rem = do_div(t, 1000000ULL);
secs = (unsigned long)t;
- if (entry->type != TRACE_STACK) {
- ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
- if (!ret)
- return 0;
- ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
- if (!ret)
- return 0;
- ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
- if (!ret)
- return 0;
- }
+ ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
+ if (!ret)
+ return 0;
+ ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
+ if (!ret)
+ return 0;
+ ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
+ if (!ret)
+ return 0;
switch (entry->type) {
case TRACE_FN:
case TRACE_WAKE:
S = entry->ctx.prev_state < sizeof(state_to_char) ?
state_to_char[entry->ctx.prev_state] : 'X';
- ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d\n",
+ T = entry->ctx.next_state < sizeof(state_to_char) ?
+ state_to_char[entry->ctx.next_state] : 'X';
+ ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c\n",
entry->ctx.prev_pid,
entry->ctx.prev_prio,
S,
entry->type == TRACE_CTX ? "==>" : " +",
entry->ctx.next_pid,
- entry->ctx.next_prio);
+ entry->ctx.next_prio,
+ T);
if (!ret)
return 0;
break;
case TRACE_SPECIAL:
- ret = trace_seq_printf(s, " %ld %ld %ld\n",
+ ret = trace_seq_printf(s, "# %ld %ld %ld\n",
entry->special.arg1,
entry->special.arg2,
entry->special.arg3);
struct trace_seq *s = &iter->seq;
struct trace_entry *entry;
int ret;
- int S;
+ int S, T;
entry = iter->ent;
case TRACE_WAKE:
S = entry->ctx.prev_state < sizeof(state_to_char) ?
state_to_char[entry->ctx.prev_state] : 'X';
+ T = entry->ctx.next_state < sizeof(state_to_char) ?
+ state_to_char[entry->ctx.next_state] : 'X';
if (entry->type == TRACE_WAKE)
S = '+';
- ret = trace_seq_printf(s, "%d %d %c %d %d\n",
+ ret = trace_seq_printf(s, "%d %d %c %d %d %c\n",
entry->ctx.prev_pid,
entry->ctx.prev_prio,
S,
entry->ctx.next_pid,
- entry->ctx.next_prio);
+ entry->ctx.next_prio,
+ T);
if (!ret)
return 0;
break;
case TRACE_SPECIAL:
case TRACE_STACK:
- ret = trace_seq_printf(s, " %ld %ld %ld\n",
+ ret = trace_seq_printf(s, "# %ld %ld %ld\n",
entry->special.arg1,
entry->special.arg2,
entry->special.arg3);
struct trace_seq *s = &iter->seq;
unsigned char newline = '\n';
struct trace_entry *entry;
- int S;
+ int S, T;
entry = iter->ent;
case TRACE_WAKE:
S = entry->ctx.prev_state < sizeof(state_to_char) ?
state_to_char[entry->ctx.prev_state] : 'X';
+ T = entry->ctx.next_state < sizeof(state_to_char) ?
+ state_to_char[entry->ctx.next_state] : 'X';
if (entry->type == TRACE_WAKE)
S = '+';
SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_pid);
SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_pid);
SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_prio);
SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
+ SEQ_PUT_HEX_FIELD_RET(s, T);
break;
case TRACE_SPECIAL:
case TRACE_STACK:
SEQ_PUT_FIELD_RET(s, entry->ctx.prev_state);
SEQ_PUT_FIELD_RET(s, entry->ctx.next_pid);
SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio);
+ SEQ_PUT_FIELD_RET(s, entry->ctx.next_state);
break;
case TRACE_SPECIAL:
case TRACE_STACK:
struct trace_array_cpu *data;
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_tracing_cpu(cpu) {
data = iter->tr->data[cpu];
if (head_page(data) && data->trace_idx &&
static int print_trace_line(struct trace_iterator *iter)
{
+ if (iter->trace && iter->trace->print_line)
+ return iter->trace->print_line(iter);
+
if (trace_flags & TRACE_ITER_BIN)
return print_bin_fmt(iter);
};
static struct file_operations show_traces_fops = {
- .open = show_traces_open,
- .read = seq_read,
- .release = seq_release,
+ .open = show_traces_open,
+ .read = seq_read,
+ .release = seq_release,
+};
+
+/*
+ * Only trace on a CPU if the bitmask is set:
+ */
+static cpumask_t tracing_cpumask = CPU_MASK_ALL;
+
+/*
+ * When tracing/tracing_cpu_mask is modified then this holds
+ * the new bitmask we are about to install:
+ */
+static cpumask_t tracing_cpumask_new;
+
+/*
+ * The tracer itself will not take this lock, but still we want
+ * to provide a consistent cpumask to user-space:
+ */
+static DEFINE_MUTEX(tracing_cpumask_update_lock);
+
+/*
+ * Temporary storage for the character representation of the
+ * CPU bitmask (and one more byte for the newline):
+ */
+static char mask_str[NR_CPUS + 1];
+
+static ssize_t
+tracing_cpumask_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int len;
+
+ mutex_lock(&tracing_cpumask_update_lock);
+
+ len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
+ if (count - len < 2) {
+ count = -EINVAL;
+ goto out_err;
+ }
+ len += sprintf(mask_str + len, "\n");
+ count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
+
+out_err:
+ mutex_unlock(&tracing_cpumask_update_lock);
+
+ return count;
+}
+
+static ssize_t
+tracing_cpumask_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int err, cpu;
+
+ mutex_lock(&tracing_cpumask_update_lock);
+ err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
+ if (err)
+ goto err_unlock;
+
+ raw_local_irq_disable();
+ __raw_spin_lock(&ftrace_max_lock);
+ for_each_tracing_cpu(cpu) {
+ /*
+ * Increase/decrease the disabled counter if we are
+ * about to flip a bit in the cpumask:
+ */
+ if (cpu_isset(cpu, tracing_cpumask) &&
+ !cpu_isset(cpu, tracing_cpumask_new)) {
+ atomic_inc(&global_trace.data[cpu]->disabled);
+ }
+ if (!cpu_isset(cpu, tracing_cpumask) &&
+ cpu_isset(cpu, tracing_cpumask_new)) {
+ atomic_dec(&global_trace.data[cpu]->disabled);
+ }
+ }
+ __raw_spin_unlock(&ftrace_max_lock);
+ raw_local_irq_enable();
+
+ tracing_cpumask = tracing_cpumask_new;
+
+ mutex_unlock(&tracing_cpumask_update_lock);
+
+ return count;
+
+err_unlock:
+ mutex_unlock(&tracing_cpumask_update_lock);
+
+ return err;
+}
+
+static struct file_operations tracing_cpumask_fops = {
+ .open = tracing_open_generic,
+ .read = tracing_cpumask_read,
+ .write = tracing_cpumask_write,
};
static ssize_t
r += sprintf(buf + r, "\n");
WARN_ON(r >= len + 2);
- r = simple_read_from_buffer(ubuf, cnt, ppos,
- buf, r);
+ r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
kfree(buf);
int neg = 0;
int i;
- if (cnt > 63)
- cnt = 63;
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
break;
}
}
+ /*
+ * If no option could be set, return an error:
+ */
+ if (!trace_options[i])
+ return -EINVAL;
filp->f_pos += cnt;
}
static struct file_operations tracing_iter_fops = {
- .open = tracing_open_generic,
- .read = tracing_iter_ctrl_read,
- .write = tracing_iter_ctrl_write,
+ .open = tracing_open_generic,
+ .read = tracing_iter_ctrl_read,
+ .write = tracing_iter_ctrl_write,
};
static const char readme_msg[] =
}
static struct file_operations tracing_readme_fops = {
- .open = tracing_open_generic,
- .read = tracing_readme_read,
+ .open = tracing_open_generic,
+ .read = tracing_readme_read,
};
static ssize_t
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
- long val;
char buf[64];
+ long val;
+ int ret;
- if (cnt > 63)
- cnt = 63;
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
- val = simple_strtoul(buf, NULL, 10);
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
val = !!val;
char buf[64];
int r;
- r = snprintf(buf, 64, "%ld\n",
+ r = snprintf(buf, sizeof(buf), "%ld\n",
*ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
- if (r > 64)
- r = 64;
+ if (r > sizeof(buf))
+ r = sizeof(buf);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
size_t cnt, loff_t *ppos)
{
long *ptr = filp->private_data;
- long val;
char buf[64];
+ long val;
+ int ret;
- if (cnt > 63)
- cnt = 63;
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
- val = simple_strtoul(buf, NULL, 10);
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
*ptr = val * 1000;
if (!iter)
return -ENOMEM;
+ mutex_lock(&trace_types_lock);
iter->tr = &global_trace;
-
+ iter->trace = current_trace;
filp->private_data = iter;
+ if (iter->trace->pipe_open)
+ iter->trace->pipe_open(iter);
+ mutex_unlock(&trace_types_lock);
+
return 0;
}
* Always select as readable when in blocking mode
*/
return POLLIN | POLLRDNORM;
- }
- else {
+ } else {
if (!trace_empty(iter))
return POLLIN | POLLRDNORM;
poll_wait(filp, &trace_wait, poll_table);
return cnt;
}
+ mutex_lock(&trace_types_lock);
+ if (iter->trace->read) {
+ ret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
+ if (ret) {
+ read = ret;
+ goto out;
+ }
+ }
+
trace_seq_reset(&iter->seq);
start = 0;
while (trace_empty(iter)) {
- if (!(trace_flags & TRACE_ITER_BLOCK))
- return -EWOULDBLOCK;
+
+ if ((filp->f_flags & O_NONBLOCK)) {
+ read = -EAGAIN;
+ goto out;
+ }
+
/*
* This is a make-shift waitqueue. The reason we don't use
* an actual wait queue is because:
set_current_state(TASK_INTERRUPTIBLE);
iter->tr->waiter = current;
- /* sleep for one second, and try again. */
- schedule_timeout(HZ);
+ mutex_unlock(&trace_types_lock);
+
+ /* sleep for 100 msecs, and try again. */
+ schedule_timeout(HZ/10);
+
+ mutex_lock(&trace_types_lock);
iter->tr->waiter = NULL;
- if (signal_pending(current))
- return -EINTR;
+ if (signal_pending(current)) {
+ read = -EINTR;
+ goto out;
+ }
+
+ if (iter->trace != current_trace)
+ goto out;
/*
* We block until we read something and tracing is disabled.
/* stop when tracing is finished */
if (trace_empty(iter))
- return 0;
+ goto out;
if (cnt >= PAGE_SIZE)
cnt = PAGE_SIZE - 1;
- memset(iter, 0, sizeof(*iter));
- iter->tr = &global_trace;
+ /* reset all but tr, trace, and overruns */
+ memset(&iter->seq, 0,
+ sizeof(struct trace_iterator) -
+ offsetof(struct trace_iterator, seq));
iter->pos = -1;
/*
ftrace_enabled = 0;
#endif
smp_wmb();
- for_each_possible_cpu(cpu) {
+ for_each_tracing_cpu(cpu) {
data = iter->tr->data[cpu];
if (!head_page(data) || !data->trace_idx)
for_each_cpu_mask(cpu, mask) {
data = iter->tr->data[cpu];
- spin_lock(&data->lock);
+ __raw_spin_lock(&data->lock);
+
+ if (data->overrun > iter->last_overrun[cpu])
+ iter->overrun[cpu] +=
+ data->overrun - iter->last_overrun[cpu];
+ iter->last_overrun[cpu] = data->overrun;
}
while (find_next_entry_inc(iter) != NULL) {
for_each_cpu_mask(cpu, mask) {
data = iter->tr->data[cpu];
- spin_unlock(&data->lock);
+ __raw_spin_unlock(&data->lock);
}
for_each_cpu_mask(cpu, mask) {
if (ret)
read = -EFAULT;
+out:
+ mutex_unlock(&trace_types_lock);
+
return read;
}
+static ssize_t
+tracing_entries_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct trace_array *tr = filp->private_data;
+ char buf[64];
+ int r;
+
+ r = sprintf(buf, "%lu\n", tr->entries);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t
+tracing_entries_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ unsigned long val;
+ char buf[64];
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ /* must have at least 1 entry */
+ if (!val)
+ return -EINVAL;
+
+ mutex_lock(&trace_types_lock);
+
+ if (current_trace != &no_tracer) {
+ cnt = -EBUSY;
+ pr_info("ftrace: set current_tracer to none"
+ " before modifying buffer size\n");
+ goto out;
+ }
+
+ if (val > global_trace.entries) {
+ while (global_trace.entries < val) {
+ if (trace_alloc_page()) {
+ cnt = -ENOMEM;
+ goto out;
+ }
+ }
+ } else {
+ /* include the number of entries in val (inc of page entries) */
+ while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1))
+ trace_free_page();
+ }
+
+ filp->f_pos += cnt;
+
+ out:
+ max_tr.entries = global_trace.entries;
+ mutex_unlock(&trace_types_lock);
+
+ return cnt;
+}
+
static struct file_operations tracing_max_lat_fops = {
.open = tracing_open_generic,
.read = tracing_max_lat_read,
.release = tracing_release_pipe,
};
+static struct file_operations tracing_entries_fops = {
+ .open = tracing_open_generic,
+ .read = tracing_entries_read,
+ .write = tracing_entries_write,
+};
+
#ifdef CONFIG_DYNAMIC_FTRACE
static ssize_t
if (!entry)
pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
+ entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
+ NULL, &tracing_cpumask_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
+
entry = debugfs_create_file("latency_trace", 0444, d_tracer,
&global_trace, &tracing_lt_fops);
if (!entry)
pr_warning("Could not create debugfs "
"'tracing_threash' entry\n");
+ entry = debugfs_create_file("trace_entries", 0644, d_tracer,
+ &global_trace, &tracing_entries_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs "
+ "'tracing_threash' entry\n");
+
#ifdef CONFIG_DYNAMIC_FTRACE
entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
&ftrace_update_tot_cnt,
#endif
}
-/* dummy trace to disable tracing */
-static struct tracer no_tracer __read_mostly =
-{
- .name = "none",
-};
-
static int trace_alloc_page(void)
{
struct trace_array_cpu *data;
int i;
/* first allocate a page for each CPU */
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
array = (void *)__get_free_page(GFP_KERNEL);
if (array == NULL) {
printk(KERN_ERR "tracer: failed to allocate page"
}
/* Now that we successfully allocate a page per CPU, add them */
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
data = global_trace.data[i];
- spin_lock_init(&data->lock);
- lockdep_set_class(&data->lock, &data->lock_key);
page = list_entry(pages.next, struct page, lru);
list_del_init(&page->lru);
list_add_tail(&page->lru, &data->trace_pages);
#ifdef CONFIG_TRACER_MAX_TRACE
data = max_tr.data[i];
- spin_lock_init(&data->lock);
- lockdep_set_class(&data->lock, &data->lock_key);
page = list_entry(pages.next, struct page, lru);
list_del_init(&page->lru);
list_add_tail(&page->lru, &data->trace_pages);
return -ENOMEM;
}
+static int trace_free_page(void)
+{
+ struct trace_array_cpu *data;
+ struct page *page;
+ struct list_head *p;
+ int i;
+ int ret = 0;
+
+ /* free one page from each buffer */
+ for_each_tracing_cpu(i) {
+ data = global_trace.data[i];
+ p = data->trace_pages.next;
+ if (p == &data->trace_pages) {
+ /* should never happen */
+ WARN_ON(1);
+ tracing_disabled = 1;
+ ret = -1;
+ break;
+ }
+ page = list_entry(p, struct page, lru);
+ ClearPageLRU(page);
+ list_del(&page->lru);
+ __free_page(page);
+
+ tracing_reset(data);
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+ data = max_tr.data[i];
+ p = data->trace_pages.next;
+ if (p == &data->trace_pages) {
+ /* should never happen */
+ WARN_ON(1);
+ tracing_disabled = 1;
+ ret = -1;
+ break;
+ }
+ page = list_entry(p, struct page, lru);
+ ClearPageLRU(page);
+ list_del(&page->lru);
+ __free_page(page);
+
+ tracing_reset(data);
+#endif
+ }
+ global_trace.entries -= ENTRIES_PER_PAGE;
+
+ return ret;
+}
+
__init static int tracer_alloc_buffers(void)
{
struct trace_array_cpu *data;
global_trace.ctrl = tracer_enabled;
+ /* TODO: make the number of buffers hot pluggable with CPUS */
+ tracing_nr_buffers = num_possible_cpus();
+ tracing_buffer_mask = cpu_possible_map;
+
/* Allocate the first page for all buffers */
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
max_tr.data[i] = &per_cpu(max_data, i);
/* use the LRU flag to differentiate the two buffers */
ClearPageLRU(page);
+ data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ max_tr.data[i]->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+
/* Only allocate if we are actually using the max trace */
#ifdef CONFIG_TRACER_MAX_TRACE
array = (void *)__get_free_page(GFP_KERNEL);