void tracing_reset(struct trace_array_cpu *data)
{
data->trace_idx = 0;
+ data->overrun = 0;
data->trace_head = data->trace_tail = head_page(data);
data->trace_head_idx = 0;
data->trace_tail_idx = 0;
if (data->trace_head == data->trace_tail &&
idx_next == data->trace_tail_idx) {
/* overrun */
+ data->overrun++;
data->trace_tail_idx++;
if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
data->trace_tail =
{
struct trace_iterator *iter = filp->private_data;
struct trace_array_cpu *data;
- struct trace_array *tr = iter->tr;
- struct tracer *tracer = iter->trace;
static cpumask_t mask;
static int start;
unsigned long flags;
if (cnt >= PAGE_SIZE)
cnt = PAGE_SIZE - 1;
- memset(iter, 0, sizeof(*iter));
- iter->tr = tr;
- iter->trace = tracer;
+ /* reset all but tr, trace, and overruns */
iter->pos = -1;
+ memset(&iter->seq, 0,
+ sizeof(struct trace_iterator) -
+ offsetof(struct trace_iterator, seq));
/*
* We need to stop all tracing on all CPUS to read the
for_each_cpu_mask(cpu, mask) {
data = iter->tr->data[cpu];
__raw_spin_lock(&data->lock);
+
+ if (data->overrun > iter->last_overrun[cpu])
+ iter->overrun[cpu] +=
+ data->overrun - iter->last_overrun[cpu];
+ iter->last_overrun[cpu] = data->overrun;
}
while (find_next_entry_inc(iter) != NULL) {
void *trace_head; /* producer */
void *trace_tail; /* consumer */
unsigned long trace_idx;
+ unsigned long overrun;
unsigned long saved_latency;
unsigned long critical_start;
unsigned long critical_end;
* results to users and which routines might sleep, etc:
*/
struct trace_iterator {
- struct trace_seq seq;
struct trace_array *tr;
struct tracer *trace;
+ long last_overrun[NR_CPUS];
+ long overrun[NR_CPUS];
+ /* The below is zeroed out in pipe_read */
+ struct trace_seq seq;
struct trace_entry *ent;
int cpu;