#include <linux/gfp.h>
#include <linux/fs.h>
+#include <linux/stacktrace.h>
+
#include "trace.h"
unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
TRACE_FN,
TRACE_CTX,
TRACE_WAKE,
+ TRACE_STACK,
TRACE_SPECIAL,
__TRACE_LAST_TYPE
TRACE_ITER_HEX = 0x20,
TRACE_ITER_BIN = 0x40,
TRACE_ITER_BLOCK = 0x80,
+ TRACE_ITER_STACKTRACE = 0x100,
};
#define TRACE_ITER_SYM_MASK \
"hex",
"bin",
"block",
+ "stacktrace",
NULL
};
-static unsigned trace_flags;
+static unsigned trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_STACKTRACE;
static DEFINE_SPINLOCK(ftrace_max_lock);
spin_unlock_irqrestore(&data->lock, irq_flags);
if (!(trace_flags & TRACE_ITER_BLOCK))
- wake_up (&trace_wait);
+ wake_up(&trace_wait);
}
void
spin_unlock_irqrestore(&data->lock, irq_flags);
if (!(trace_flags & TRACE_ITER_BLOCK))
- wake_up (&trace_wait);
+ wake_up(&trace_wait);
+}
+
+void __trace_stack(struct trace_array *tr,
+ struct trace_array_cpu *data,
+ unsigned long flags,
+ int skip)
+{
+ struct trace_entry *entry;
+ struct stack_trace trace;
+
+ if (!(trace_flags & TRACE_ITER_STACKTRACE))
+ return;
+
+ entry = tracing_get_trace_entry(tr, data);
+ tracing_generic_entry_update(entry, flags);
+ entry->type = TRACE_STACK;
+
+ memset(&entry->stack, 0, sizeof(entry->stack));
+
+ trace.nr_entries = 0;
+ trace.max_entries = FTRACE_STACK_ENTRIES;
+ trace.skip = skip;
+ trace.entries = entry->stack.caller;
+
+ save_stack_trace(&trace);
}
void
tracing_sched_switch_trace(struct trace_array *tr,
struct trace_array_cpu *data,
- struct task_struct *prev, struct task_struct *next,
+ struct task_struct *prev,
+ struct task_struct *next,
unsigned long flags)
{
struct trace_entry *entry;
entry->ctx.prev_state = prev->state;
entry->ctx.next_pid = next->pid;
entry->ctx.next_prio = next->prio;
+ __trace_stack(tr, data, flags, 4);
spin_unlock_irqrestore(&data->lock, irq_flags);
if (!(trace_flags & TRACE_ITER_BLOCK))
- wake_up (&trace_wait);
+ wake_up(&trace_wait);
}
void
tracing_sched_wakeup_trace(struct trace_array *tr,
struct trace_array_cpu *data,
- struct task_struct *wakee, struct task_struct *curr,
+ struct task_struct *wakee,
+ struct task_struct *curr,
unsigned long flags)
{
struct trace_entry *entry;
entry->ctx.prev_state = curr->state;
entry->ctx.next_pid = wakee->pid;
entry->ctx.next_prio = wakee->prio;
+ __trace_stack(tr, data, flags, 5);
spin_unlock_irqrestore(&data->lock, irq_flags);
if (!(trace_flags & TRACE_ITER_BLOCK))
unsigned long rel_usecs;
char *comm;
int S;
+ int i;
if (!next_entry)
next_entry = entry;
abs_usecs % 1000, rel_usecs/1000,
rel_usecs % 1000);
} else {
- lat_print_generic(s, entry, cpu);
- lat_print_timestamp(s, abs_usecs, rel_usecs);
+ if (entry->type != TRACE_STACK) {
+ lat_print_generic(s, entry, cpu);
+ lat_print_timestamp(s, abs_usecs, rel_usecs);
+ }
}
switch (entry->type) {
case TRACE_FN:
entry->special.arg2,
entry->special.arg3);
break;
+ case TRACE_STACK:
+ for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
+ if (i)
+ trace_seq_puts(s, " <= ");
+ seq_print_ip_sym(s, entry->stack.caller[i], sym_flags);
+ }
+ trace_seq_puts(s, "\n");
+ break;
default:
trace_seq_printf(s, "Unknown type %d\n", entry->type);
}
unsigned long long t;
unsigned long secs;
char *comm;
- int S;
int ret;
+ int S;
+ int i;
entry = iter->ent;
usec_rem = do_div(t, 1000000ULL);
secs = (unsigned long)t;
- ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
- if (!ret)
- return 0;
- ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
- if (!ret)
- return 0;
- ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
- if (!ret)
- return 0;
+ if (entry->type != TRACE_STACK) {
+ ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
+ if (!ret)
+ return 0;
+ ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
+ if (!ret)
+ return 0;
+ ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
+ if (!ret)
+ return 0;
+ }
switch (entry->type) {
case TRACE_FN:
if (!ret)
return 0;
break;
+ case TRACE_STACK:
+ for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
+ if (i) {
+ ret = trace_seq_puts(s, " <= ");
+ if (!ret)
+ return 0;
+ }
+ ret = seq_print_ip_sym(s, entry->stack.caller[i],
+ sym_flags);
+ if (!ret)
+ return 0;
+ }
+ ret = trace_seq_puts(s, "\n");
+ if (!ret)
+ return 0;
+ break;
}
return 1;
}
return 0;
break;
case TRACE_SPECIAL:
+ case TRACE_STACK:
ret = trace_seq_printf(s, " %lx %lx %lx\n",
entry->special.arg1,
entry->special.arg2,
SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
break;
case TRACE_SPECIAL:
+ case TRACE_STACK:
SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg1);
SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg2);
SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg3);
SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio);
break;
case TRACE_SPECIAL:
+ case TRACE_STACK:
SEQ_PUT_FIELD_RET(s, entry->special.arg1);
SEQ_PUT_FIELD_RET(s, entry->special.arg2);
SEQ_PUT_FIELD_RET(s, entry->special.arg3);