unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
unsigned long __read_mostly tracing_thresh;
+static unsigned long __read_mostly tracing_nr_buffers;
+static cpumask_t __read_mostly tracing_buffer_mask;
+
+#define for_each_tracing_cpu(cpu) \
+ for_each_cpu_mask(cpu, tracing_buffer_mask)
+
/* dummy trace to disable tracing */
-static struct tracer no_tracer __read_mostly =
-{
+static struct tracer no_tracer __read_mostly = {
.name = "none",
};
return cpu_clock(cpu);
}
+/*
+ * The global_trace is the descriptor that holds the tracing
+ * buffers for the live tracing. For each CPU, it contains
+ * a link list of pages that will store trace entries. The
+ * page descriptor of the pages in the memory is used to hold
+ * the link list by linking the lru item in the page descriptor
+ * to each of the pages in the buffer per CPU.
+ *
+ * For each active CPU there is a data field that holds the
+ * pages for the buffer for that CPU. Each CPU has the same number
+ * of pages allocated for its buffer.
+ */
static struct trace_array global_trace;
static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
+/*
+ * The max_tr is used to snapshot the global_trace when a maximum
+ * latency is reached. Some tracers will use this to store a maximum
+ * trace while it continues examining live traces.
+ *
+ * The buffers for the max_tr are set up the same as the global_trace.
+ * When a snapshot is taken, the link list of the max_tr is swapped
+ * with the link list of the global_trace and the buffers are reset for
+ * the global_trace so the tracing can continue.
+ */
static struct trace_array max_tr;
static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
+/* tracer_enabled is used to toggle activation of a tracer */
static int tracer_enabled = 1;
+
+/*
+ * trace_nr_entries is the number of entries that is allocated
+ * for a buffer. Note, the number of entries is always rounded
+ * to ENTRIES_PER_PAGE.
+ */
static unsigned long trace_nr_entries = 65536UL;
+/* trace_types holds a link list of available tracers. */
static struct tracer *trace_types __read_mostly;
+
+/* current_trace points to the tracer that is currently active */
static struct tracer *current_trace __read_mostly;
+
+/*
+ * max_tracer_type_len is used to simplify the allocating of
+ * buffers to read userspace tracer names. We keep track of
+ * the longest tracer name registered.
+ */
static int max_tracer_type_len;
+/*
+ * trace_types_lock is used to protect the trace_types list.
+ * This lock is also used to keep user access serialized.
+ * Accesses from userspace will grab this lock while userspace
+ * activities happen inside the kernel.
+ */
static DEFINE_MUTEX(trace_types_lock);
+
+/* trace_wait is a waitqueue for tasks blocked on trace_poll */
static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
+/* trace_flags holds iter_ctrl options */
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
+/**
+ * trace_wake_up - wake up tasks waiting for trace input
+ *
+ * Simply wakes up any task that is blocked on the trace_wait
+ * queue. These is used with trace_poll for tasks polling the trace.
+ */
void trace_wake_up(void)
{
/*
static int __init set_nr_entries(char *str)
{
+ unsigned long nr_entries;
+ int ret;
+
if (!str)
return 0;
- trace_nr_entries = simple_strtoul(str, &str, 0);
+ ret = strict_strtoul(str, 0, &nr_entries);
+ /* nr_entries can not be zero */
+ if (ret < 0 || nr_entries == 0)
+ return 0;
+ trace_nr_entries = nr_entries;
return 1;
}
__setup("trace_entries=", set_nr_entries);
return nsecs / 1000;
}
+/*
+ * trace_flag_type is an enumeration that holds different
+ * states when a trace occurs. These are:
+ * IRQS_OFF - interrupts were disabled
+ * NEED_RESCED - reschedule is requested
+ * HARDIRQ - inside an interrupt handler
+ * SOFTIRQ - inside a softirq handler
+ */
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
TRACE_FLAG_NEED_RESCHED = 0x02,
TRACE_FLAG_SOFTIRQ = 0x08,
};
+/*
+ * TRACE_ITER_SYM_MASK masks the options in trace_flags that
+ * control the output of kernel symbols.
+ */
#define TRACE_ITER_SYM_MASK \
(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
-/* These must match the bit postions above */
+/* These must match the bit postions in trace_iterator_flags */
static const char *trace_options[] = {
"print-parent",
"sym-offset",
NULL
};
+/*
+ * ftrace_max_lock is used to protect the swapping of buffers
+ * when taking a max snapshot. The buffers themselves are
+ * protected by per_cpu spinlocks. But the action of the swap
+ * needs its own lock.
+ *
+ * This is defined as a raw_spinlock_t in order to help
+ * with performance when lockdep debugging is enabled.
+ */
static raw_spinlock_t ftrace_max_lock =
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
tracing_record_cmdline(current);
}
+/**
+ * check_pages - integrity check of trace buffers
+ *
+ * As a safty measure we check to make sure the data pages have not
+ * been corrupted. TODO: configure to disable this because it adds
+ * a bit of overhead.
+ */
void check_pages(struct trace_array_cpu *data)
{
struct page *page, *tmp;
}
}
+/**
+ * head_page - page address of the first page in per_cpu buffer.
+ *
+ * head_page returns the page address of the first page in
+ * a per_cpu buffer. This also preforms various consistency
+ * checks to make sure the buffer has not been corrupted.
+ */
void *head_page(struct trace_array_cpu *data)
{
struct page *page;
return page_address(page);
}
+/**
+ * trace_seq_printf - sequence printing of trace information
+ * @s: trace sequence descriptor
+ * @fmt: printf format string
+ *
+ * The tracer may use either sequence operations or its own
+ * copy to user routines. To simplify formating of a trace
+ * trace_seq_printf is used to store strings into a special
+ * buffer (@s). Then the output may be either used by
+ * the sequencer or pulled into another buffer.
+ */
int
trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
{
return len;
}
+/**
+ * trace_seq_puts - trace sequence printing of simple string
+ * @s: trace sequence descriptor
+ * @str: simple string to record
+ *
+ * The tracer may use either the sequence operations or its own
+ * copy to user routines. This function records a simple string
+ * into a special buffer (@s) for later retrieval by a sequencer
+ * or other mechanism.
+ */
static int
trace_seq_puts(struct trace_seq *s, const char *str)
{
trace_seq_reset(s);
}
+/*
+ * flip the trace buffers between two trace descriptors.
+ * This usually is the buffers between the global_trace and
+ * the max_tr to record a snapshot of a current trace.
+ *
+ * The ftrace_max_lock must be held.
+ */
static void
flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
{
check_pages(tr2);
}
+/**
+ * update_max_tr - snapshot all trace buffers from global_trace to max_tr
+ * @tr: tracer
+ * @tsk: the task with the latency
+ * @cpu: The cpu that initiated the trace.
+ *
+ * Flip the buffers between the @tr and the max_tr and record information
+ * about which task was the cause of this latency.
+ */
void
update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
WARN_ON_ONCE(!irqs_disabled());
__raw_spin_lock(&ftrace_max_lock);
/* clear out all the previous traces */
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
data = tr->data[i];
flip_trace(max_tr.data[i], data);
tracing_reset(data);
* @tr - tracer
* @tsk - task with the latency
* @cpu - the cpu of the buffer to copy.
+ *
+ * Flip the trace of a single CPU buffer between the @tr and the max_tr.
*/
void
update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
WARN_ON_ONCE(!irqs_disabled());
__raw_spin_lock(&ftrace_max_lock);
- for_each_possible_cpu(i)
+ for_each_tracing_cpu(i)
tracing_reset(max_tr.data[i]);
flip_trace(max_tr.data[cpu], data);
__raw_spin_unlock(&ftrace_max_lock);
}
+/**
+ * register_tracer - register a tracer with the ftrace system.
+ * @type - the plugin for the tracer
+ *
+ * Register a new plugin tracer.
+ */
int register_tracer(struct tracer *type)
{
struct tracer *t;
* internal tracing to verify that everything is in order.
* If we fail, we do not register this tracer.
*/
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
data = tr->data[i];
if (!head_page(data))
continue;
goto out;
}
/* Only reset on passing, to avoid touching corrupted buffers */
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
data = tr->data[i];
if (!head_page(data))
continue;
void tracing_reset(struct trace_array_cpu *data)
{
data->trace_idx = 0;
+ data->overrun = 0;
data->trace_head = data->trace_tail = head_page(data);
data->trace_head_idx = 0;
data->trace_tail_idx = 0;
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
static DEFINE_SPINLOCK(trace_cmdline_lock);
-atomic_t trace_record_cmdline_disabled;
+
+/* trace in all context switches */
+atomic_t trace_record_cmdline_enabled __read_mostly;
+
+/* temporary disable recording */
+atomic_t trace_record_cmdline_disabled __read_mostly;
static void trace_init_cmdlines(void)
{
if (data->trace_head == data->trace_tail &&
idx_next == data->trace_tail_idx) {
/* overrun */
+ data->overrun++;
data->trace_tail_idx++;
if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
data->trace_tail =
int next_cpu = -1;
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_tracing_cpu(cpu) {
if (!head_page(tr->data[cpu]))
continue;
ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
iter->prev_ent = NULL;
iter->prev_cpu = -1;
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
iter->next_idx[i] = 0;
iter->next_page[i] = NULL;
}
if (type)
name = type->name;
- for_each_possible_cpu(cpu) {
+ for_each_tracing_cpu(cpu) {
if (head_page(tr->data[cpu])) {
total += tr->data[cpu]->trace_idx;
if (tr->data[cpu]->trace_idx > tr->entries)
hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
- if (hardirq && softirq)
+ if (hardirq && softirq) {
trace_seq_putc(s, 'H');
- else {
- if (hardirq)
+ } else {
+ if (hardirq) {
trace_seq_putc(s, 'h');
- else {
+ } else {
if (softirq)
trace_seq_putc(s, 's');
else
struct trace_array_cpu *data;
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_tracing_cpu(cpu) {
data = iter->tr->data[cpu];
if (head_page(data) && data->trace_idx &&
raw_local_irq_disable();
__raw_spin_lock(&ftrace_max_lock);
- for_each_possible_cpu(cpu) {
+ for_each_tracing_cpu(cpu) {
/*
* Increase/decrease the disabled counter if we are
* about to flip a bit in the cpumask:
int neg = 0;
int i;
- if (cnt > 63)
- cnt = 63;
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
- long val;
char buf[64];
+ long val;
+ int ret;
- if (cnt > 63)
- cnt = 63;
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
- val = simple_strtoul(buf, NULL, 10);
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
val = !!val;
char buf[64];
int r;
- r = snprintf(buf, 64, "%ld\n",
+ r = snprintf(buf, sizeof(buf), "%ld\n",
*ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
- if (r > 64)
- r = 64;
+ if (r > sizeof(buf))
+ r = sizeof(buf);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
size_t cnt, loff_t *ppos)
{
long *ptr = filp->private_data;
- long val;
char buf[64];
+ long val;
+ int ret;
- if (cnt > 63)
- cnt = 63;
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
- val = simple_strtoul(buf, NULL, 10);
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
*ptr = val * 1000;
if (!iter)
return -ENOMEM;
+ mutex_lock(&trace_types_lock);
iter->tr = &global_trace;
iter->trace = current_trace;
-
filp->private_data = iter;
+ if (iter->trace->pipe_open)
+ iter->trace->pipe_open(iter);
+ mutex_unlock(&trace_types_lock);
+
return 0;
}
* Always select as readable when in blocking mode
*/
return POLLIN | POLLRDNORM;
- }
- else {
+ } else {
if (!trace_empty(iter))
return POLLIN | POLLRDNORM;
poll_wait(filp, &trace_wait, poll_table);
{
struct trace_iterator *iter = filp->private_data;
struct trace_array_cpu *data;
- struct trace_array *tr = iter->tr;
- struct tracer *tracer = iter->trace;
static cpumask_t mask;
static int start;
unsigned long flags;
return cnt;
}
+ mutex_lock(&trace_types_lock);
+ if (iter->trace->read) {
+ ret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
+ if (ret) {
+ read = ret;
+ goto out;
+ }
+ }
+
trace_seq_reset(&iter->seq);
start = 0;
while (trace_empty(iter)) {
- if ((filp->f_flags & O_NONBLOCK))
- return -EAGAIN;
+ if ((filp->f_flags & O_NONBLOCK)) {
+ read = -EAGAIN;
+ goto out;
+ }
/*
* This is a make-shift waitqueue. The reason we don't use
set_current_state(TASK_INTERRUPTIBLE);
iter->tr->waiter = current;
- /* sleep for one second, and try again. */
- schedule_timeout(HZ);
+ mutex_unlock(&trace_types_lock);
+
+ /* sleep for 100 msecs, and try again. */
+ schedule_timeout(HZ/10);
+
+ mutex_lock(&trace_types_lock);
iter->tr->waiter = NULL;
- if (signal_pending(current))
- return -EINTR;
+ if (signal_pending(current)) {
+ read = -EINTR;
+ goto out;
+ }
if (iter->trace != current_trace)
- return 0;
+ goto out;
/*
* We block until we read something and tracing is disabled.
/* stop when tracing is finished */
if (trace_empty(iter))
- return 0;
+ goto out;
if (cnt >= PAGE_SIZE)
cnt = PAGE_SIZE - 1;
- memset(iter, 0, sizeof(*iter));
- iter->tr = tr;
- iter->trace = tracer;
+ /* reset all but tr, trace, and overruns */
+ memset(&iter->seq, 0,
+ sizeof(struct trace_iterator) -
+ offsetof(struct trace_iterator, seq));
iter->pos = -1;
/*
ftrace_enabled = 0;
#endif
smp_wmb();
- for_each_possible_cpu(cpu) {
+ for_each_tracing_cpu(cpu) {
data = iter->tr->data[cpu];
if (!head_page(data) || !data->trace_idx)
for_each_cpu_mask(cpu, mask) {
data = iter->tr->data[cpu];
__raw_spin_lock(&data->lock);
+
+ if (data->overrun > iter->last_overrun[cpu])
+ iter->overrun[cpu] +=
+ data->overrun - iter->last_overrun[cpu];
+ iter->last_overrun[cpu] = data->overrun;
}
while (find_next_entry_inc(iter) != NULL) {
if (ret)
read = -EFAULT;
+out:
+ mutex_unlock(&trace_types_lock);
+
return read;
}
{
unsigned long val;
char buf[64];
+ int ret;
- if (cnt > 63)
- cnt = 63;
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
- val = simple_strtoul(buf, NULL, 10);
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
/* must have at least 1 entry */
if (!val)
int i;
/* first allocate a page for each CPU */
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
array = (void *)__get_free_page(GFP_KERNEL);
if (array == NULL) {
printk(KERN_ERR "tracer: failed to allocate page"
}
/* Now that we successfully allocate a page per CPU, add them */
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
data = global_trace.data[i];
page = list_entry(pages.next, struct page, lru);
list_del_init(&page->lru);
int ret = 0;
/* free one page from each buffer */
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
data = global_trace.data[i];
p = data->trace_pages.next;
if (p == &data->trace_pages) {
global_trace.ctrl = tracer_enabled;
+ /* TODO: make the number of buffers hot pluggable with CPUS */
+ tracing_nr_buffers = num_possible_cpus();
+ tracing_buffer_mask = cpu_possible_map;
+
/* Allocate the first page for all buffers */
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
max_tr.data[i] = &per_cpu(max_data, i);