1 /* Include in trace.c */
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
6 static inline int trace_valid_entry(struct trace_entry *entry)
20 trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
22 struct trace_entry *entries;
27 BUG_ON(list_empty(&data->trace_pages));
28 page = list_entry(data->trace_pages.next, struct page, lru);
29 entries = page_address(page);
31 if (head_page(data) != entries)
35 * The starting trace buffer always has valid elements,
36 * if any element exists.
38 entries = head_page(data);
40 for (i = 0; i < tr->entries; i++) {
42 if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
43 printk(KERN_CONT ".. invalid entry %d ",
49 if (idx >= ENTRIES_PER_PAGE) {
50 page = virt_to_page(entries);
51 if (page->lru.next == &data->trace_pages) {
52 if (i != tr->entries - 1) {
53 printk(KERN_CONT ".. entries buffer mismatch");
57 page = list_entry(page->lru.next, struct page, lru);
58 entries = page_address(page);
64 page = virt_to_page(entries);
65 if (page->lru.next != &data->trace_pages) {
66 printk(KERN_CONT ".. too many entries");
75 printk(KERN_CONT ".. corrupted trace buffer .. ");
80 * Test the trace buffer to see if all the elements
83 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
85 unsigned long flags, cnt = 0;
88 /* Don't allow flipping of max traces now */
89 raw_local_irq_save(flags);
90 __raw_spin_lock(&ftrace_max_lock);
91 for_each_possible_cpu(cpu) {
92 if (!head_page(tr->data[cpu]))
95 cnt += tr->data[cpu]->trace_idx;
97 ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
101 __raw_spin_unlock(&ftrace_max_lock);
102 raw_local_irq_restore(flags);
112 #ifdef CONFIG_DYNAMIC_FTRACE
115 #define STR(x) __STR(x)
117 /* Test dynamic code modification and ftrace filters */
118 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
119 struct trace_array *tr,
124 int save_ftrace_enabled = ftrace_enabled;
125 int save_tracer_enabled = tracer_enabled;
128 /* The ftrace test PASSED */
129 printk(KERN_CONT "PASSED\n");
130 pr_info("Testing dynamic ftrace: ");
132 /* enable tracing, and record the filter function */
136 /* passed in by parameter to fool gcc from optimizing */
139 /* update the records */
140 ret = ftrace_force_update();
142 printk(KERN_CONT ".. ftraced failed .. ");
147 * Some archs *cough*PowerPC*cough* add charachters to the
148 * start of the function names. We simply put a '*' to
151 func_name = "*" STR(DYN_FTRACE_TEST_NAME);
153 /* filter only on our function */
154 ftrace_set_filter(func_name, strlen(func_name), 1);
159 /* Sleep for a 1/10 of a second */
162 /* we should have nothing in the buffer */
163 ret = trace_test_buffer(tr, &count);
169 printk(KERN_CONT ".. filter did not filter .. ");
173 /* call our function again */
179 /* stop the tracing. */
181 trace->ctrl_update(tr);
184 /* check the trace buffer */
185 ret = trace_test_buffer(tr, &count);
188 /* we should only have one item */
189 if (!ret && count != 1) {
190 printk(KERN_CONT ".. filter failed count=%ld ..", count);
195 ftrace_enabled = save_ftrace_enabled;
196 tracer_enabled = save_tracer_enabled;
198 /* Enable tracing on all functions again */
199 ftrace_set_filter(NULL, 0, 1);
204 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
205 #endif /* CONFIG_DYNAMIC_FTRACE */
207 * Simple verification test of ftrace function tracer.
208 * Enable ftrace, sleep 1/10 second, and then read the trace
209 * buffer to see if all is in order.
212 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
216 int save_ftrace_enabled = ftrace_enabled;
217 int save_tracer_enabled = tracer_enabled;
219 /* make sure msleep has been recorded */
222 /* force the recorded functions to be traced */
223 ret = ftrace_force_update();
225 printk(KERN_CONT ".. ftraced failed .. ");
229 /* start the tracing */
235 /* Sleep for a 1/10 of a second */
237 /* stop the tracing. */
239 trace->ctrl_update(tr);
242 /* check the trace buffer */
243 ret = trace_test_buffer(tr, &count);
246 if (!ret && !count) {
247 printk(KERN_CONT ".. no entries found ..");
252 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
253 DYN_FTRACE_TEST_NAME);
256 ftrace_enabled = save_ftrace_enabled;
257 tracer_enabled = save_tracer_enabled;
259 /* kill ftrace totally if we failed */
265 #endif /* CONFIG_FTRACE */
267 #ifdef CONFIG_IRQSOFF_TRACER
269 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
271 unsigned long save_max = tracing_max_latency;
275 /* start the tracing */
278 /* reset the max latency */
279 tracing_max_latency = 0;
280 /* disable interrupts for a bit */
284 /* stop the tracing. */
286 trace->ctrl_update(tr);
287 /* check both trace buffers */
288 ret = trace_test_buffer(tr, NULL);
290 ret = trace_test_buffer(&max_tr, &count);
293 if (!ret && !count) {
294 printk(KERN_CONT ".. no entries found ..");
298 tracing_max_latency = save_max;
302 #endif /* CONFIG_IRQSOFF_TRACER */
304 #ifdef CONFIG_PREEMPT_TRACER
306 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
308 unsigned long save_max = tracing_max_latency;
312 /* start the tracing */
315 /* reset the max latency */
316 tracing_max_latency = 0;
317 /* disable preemption for a bit */
321 /* stop the tracing. */
323 trace->ctrl_update(tr);
324 /* check both trace buffers */
325 ret = trace_test_buffer(tr, NULL);
327 ret = trace_test_buffer(&max_tr, &count);
330 if (!ret && !count) {
331 printk(KERN_CONT ".. no entries found ..");
335 tracing_max_latency = save_max;
339 #endif /* CONFIG_PREEMPT_TRACER */
341 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
343 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
345 unsigned long save_max = tracing_max_latency;
349 /* start the tracing */
353 /* reset the max latency */
354 tracing_max_latency = 0;
356 /* disable preemption and interrupts for a bit */
361 /* reverse the order of preempt vs irqs */
364 /* stop the tracing. */
366 trace->ctrl_update(tr);
367 /* check both trace buffers */
368 ret = trace_test_buffer(tr, NULL);
372 ret = trace_test_buffer(&max_tr, &count);
376 if (!ret && !count) {
377 printk(KERN_CONT ".. no entries found ..");
382 /* do the test by disabling interrupts first this time */
383 tracing_max_latency = 0;
385 trace->ctrl_update(tr);
390 /* reverse the order of preempt vs irqs */
393 /* stop the tracing. */
395 trace->ctrl_update(tr);
396 /* check both trace buffers */
397 ret = trace_test_buffer(tr, NULL);
401 ret = trace_test_buffer(&max_tr, &count);
403 if (!ret && !count) {
404 printk(KERN_CONT ".. no entries found ..");
411 tracing_max_latency = save_max;
415 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
417 #ifdef CONFIG_SCHED_TRACER
418 static int trace_wakeup_test_thread(void *data)
420 /* Make this a RT thread, doesn't need to be too high */
421 struct sched_param param = { .sched_priority = 5 };
422 struct completion *x = data;
424 sched_setscheduler(current, SCHED_FIFO, ¶m);
426 /* Make it know we have a new prio */
429 /* now go to sleep and let the test wake us up */
430 set_current_state(TASK_INTERRUPTIBLE);
433 /* we are awake, now wait to disappear */
434 while (!kthread_should_stop()) {
436 * This is an RT task, do short sleeps to let
446 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
448 unsigned long save_max = tracing_max_latency;
449 struct task_struct *p;
450 struct completion isrt;
454 init_completion(&isrt);
456 /* create a high prio thread */
457 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
459 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
463 /* make sure the thread is running at an RT prio */
464 wait_for_completion(&isrt);
466 /* start the tracing */
469 /* reset the max latency */
470 tracing_max_latency = 0;
472 /* sleep to let the RT thread sleep too */
476 * Yes this is slightly racy. It is possible that for some
477 * strange reason that the RT thread we created, did not
478 * call schedule for 100ms after doing the completion,
479 * and we do a wakeup on a task that already is awake.
480 * But that is extremely unlikely, and the worst thing that
481 * happens in such a case, is that we disable tracing.
482 * Honestly, if this race does happen something is horrible
483 * wrong with the system.
488 /* stop the tracing. */
490 trace->ctrl_update(tr);
491 /* check both trace buffers */
492 ret = trace_test_buffer(tr, NULL);
494 ret = trace_test_buffer(&max_tr, &count);
499 tracing_max_latency = save_max;
501 /* kill the thread */
504 if (!ret && !count) {
505 printk(KERN_CONT ".. no entries found ..");
511 #endif /* CONFIG_SCHED_TRACER */
513 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
515 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
520 /* start the tracing */
523 /* Sleep for a 1/10 of a second */
525 /* stop the tracing. */
527 trace->ctrl_update(tr);
528 /* check the trace buffer */
529 ret = trace_test_buffer(tr, &count);
532 if (!ret && !count) {
533 printk(KERN_CONT ".. no entries found ..");
539 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
541 #ifdef CONFIG_SYSPROF_TRACER
543 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
548 /* start the tracing */
551 /* Sleep for a 1/10 of a second */
553 /* stop the tracing. */
555 trace->ctrl_update(tr);
556 /* check the trace buffer */
557 ret = trace_test_buffer(tr, &count);
560 if (!ret && !count) {
561 printk(KERN_CONT ".. no entries found ..");
567 #endif /* CONFIG_SYSPROF_TRACER */