1 /* Include in trace.c */
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
6 static inline int trace_valid_entry(struct trace_entry *entry)
20 trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
22 struct trace_entry *entries;
27 BUG_ON(list_empty(&data->trace_pages));
28 page = list_entry(data->trace_pages.next, struct page, lru);
29 entries = page_address(page);
31 if (head_page(data) != entries)
35 * The starting trace buffer always has valid elements,
36 * if any element exists.
38 entries = head_page(data);
40 for (i = 0; i < tr->entries; i++) {
42 if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
43 printk(KERN_CONT ".. invalid entry %d ",
49 if (idx >= ENTRIES_PER_PAGE) {
50 page = virt_to_page(entries);
51 if (page->lru.next == &data->trace_pages) {
52 if (i != tr->entries - 1) {
53 printk(KERN_CONT ".. entries buffer mismatch");
57 page = list_entry(page->lru.next, struct page, lru);
58 entries = page_address(page);
64 page = virt_to_page(entries);
65 if (page->lru.next != &data->trace_pages) {
66 printk(KERN_CONT ".. too many entries");
75 printk(KERN_CONT ".. corrupted trace buffer .. ");
80 * Test the trace buffer to see if all the elements
83 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
85 unsigned long cnt = 0;
89 for_each_possible_cpu(cpu) {
90 if (!head_page(tr->data[cpu]))
93 cnt += tr->data[cpu]->trace_idx;
95 ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
108 #ifdef CONFIG_DYNAMIC_FTRACE
110 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
112 #define STR(x) __STR(x)
113 static int DYN_FTRACE_TEST_NAME(void)
115 /* used to call mcount */
119 /* Test dynamic code modification and ftrace filters */
120 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
121 struct trace_array *tr,
126 int save_ftrace_enabled = ftrace_enabled;
127 int save_tracer_enabled = tracer_enabled;
129 /* The ftrace test PASSED */
130 printk(KERN_CONT "PASSED\n");
131 pr_info("Testing dynamic ftrace: ");
133 /* enable tracing, and record the filter function */
137 /* passed in by parameter to fool gcc from optimizing */
140 /* update the records */
141 ret = ftrace_force_update();
143 printk(KERN_CONT ".. ftraced failed .. ");
147 /* filter only on our function */
148 ftrace_set_filter(STR(DYN_FTRACE_TEST_NAME),
149 sizeof(STR(DYN_FTRACE_TEST_NAME)), 1);
154 /* Sleep for a 1/10 of a second */
157 /* we should have nothing in the buffer */
158 ret = trace_test_buffer(tr, &count);
164 printk(KERN_CONT ".. filter did not filter .. ");
168 /* call our function again */
174 /* stop the tracing. */
176 trace->ctrl_update(tr);
179 /* check the trace buffer */
180 ret = trace_test_buffer(tr, &count);
183 /* we should only have one item */
184 if (!ret && count != 1) {
185 printk(KERN_CONT ".. filter failed count=%ld ..", count);
190 ftrace_enabled = save_ftrace_enabled;
191 tracer_enabled = save_tracer_enabled;
193 /* Enable tracing on all functions again */
194 ftrace_set_filter(NULL, 0, 1);
199 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
200 #endif /* CONFIG_DYNAMIC_FTRACE */
202 * Simple verification test of ftrace function tracer.
203 * Enable ftrace, sleep 1/10 second, and then read the trace
204 * buffer to see if all is in order.
207 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
211 int save_ftrace_enabled = ftrace_enabled;
212 int save_tracer_enabled = tracer_enabled;
214 /* make sure msleep has been recorded */
217 /* force the recorded functions to be traced */
218 ret = ftrace_force_update();
220 printk(KERN_CONT ".. ftraced failed .. ");
224 /* start the tracing */
230 /* Sleep for a 1/10 of a second */
232 /* stop the tracing. */
234 trace->ctrl_update(tr);
237 /* check the trace buffer */
238 ret = trace_test_buffer(tr, &count);
241 if (!ret && !count) {
242 printk(KERN_CONT ".. no entries found ..");
247 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
248 DYN_FTRACE_TEST_NAME);
251 ftrace_enabled = save_ftrace_enabled;
252 tracer_enabled = save_tracer_enabled;
254 /* kill ftrace totally if we failed */
260 #endif /* CONFIG_FTRACE */
262 #ifdef CONFIG_IRQSOFF_TRACER
264 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
266 unsigned long save_max = tracing_max_latency;
270 /* start the tracing */
273 /* reset the max latency */
274 tracing_max_latency = 0;
275 /* disable interrupts for a bit */
279 /* stop the tracing. */
281 trace->ctrl_update(tr);
282 /* check both trace buffers */
283 ret = trace_test_buffer(tr, NULL);
285 ret = trace_test_buffer(&max_tr, &count);
288 if (!ret && !count) {
289 printk(KERN_CONT ".. no entries found ..");
293 tracing_max_latency = save_max;
297 #endif /* CONFIG_IRQSOFF_TRACER */
299 #ifdef CONFIG_PREEMPT_TRACER
301 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
303 unsigned long save_max = tracing_max_latency;
307 /* start the tracing */
310 /* reset the max latency */
311 tracing_max_latency = 0;
312 /* disable preemption for a bit */
316 /* stop the tracing. */
318 trace->ctrl_update(tr);
319 /* check both trace buffers */
320 ret = trace_test_buffer(tr, NULL);
322 ret = trace_test_buffer(&max_tr, &count);
325 if (!ret && !count) {
326 printk(KERN_CONT ".. no entries found ..");
330 tracing_max_latency = save_max;
334 #endif /* CONFIG_PREEMPT_TRACER */
336 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
338 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
340 unsigned long save_max = tracing_max_latency;
344 /* start the tracing */
348 /* reset the max latency */
349 tracing_max_latency = 0;
351 /* disable preemption and interrupts for a bit */
356 /* reverse the order of preempt vs irqs */
359 /* stop the tracing. */
361 trace->ctrl_update(tr);
362 /* check both trace buffers */
363 ret = trace_test_buffer(tr, NULL);
367 ret = trace_test_buffer(&max_tr, &count);
371 if (!ret && !count) {
372 printk(KERN_CONT ".. no entries found ..");
377 /* do the test by disabling interrupts first this time */
378 tracing_max_latency = 0;
380 trace->ctrl_update(tr);
385 /* reverse the order of preempt vs irqs */
388 /* stop the tracing. */
390 trace->ctrl_update(tr);
391 /* check both trace buffers */
392 ret = trace_test_buffer(tr, NULL);
396 ret = trace_test_buffer(&max_tr, &count);
398 if (!ret && !count) {
399 printk(KERN_CONT ".. no entries found ..");
406 tracing_max_latency = save_max;
410 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
412 #ifdef CONFIG_SCHED_TRACER
413 static int trace_wakeup_test_thread(void *data)
415 struct completion *x = data;
417 /* Make this a RT thread, doesn't need to be too high */
419 rt_mutex_setprio(current, MAX_RT_PRIO - 5);
421 /* Make it know we have a new prio */
424 /* now go to sleep and let the test wake us up */
425 set_current_state(TASK_INTERRUPTIBLE);
428 /* we are awake, now wait to disappear */
429 while (!kthread_should_stop()) {
431 * This is an RT task, do short sleeps to let
441 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
443 unsigned long save_max = tracing_max_latency;
444 struct task_struct *p;
445 struct completion isrt;
449 init_completion(&isrt);
451 /* create a high prio thread */
452 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
454 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
458 /* make sure the thread is running at an RT prio */
459 wait_for_completion(&isrt);
461 /* start the tracing */
464 /* reset the max latency */
465 tracing_max_latency = 0;
467 /* sleep to let the RT thread sleep too */
471 * Yes this is slightly racy. It is possible that for some
472 * strange reason that the RT thread we created, did not
473 * call schedule for 100ms after doing the completion,
474 * and we do a wakeup on a task that already is awake.
475 * But that is extremely unlikely, and the worst thing that
476 * happens in such a case, is that we disable tracing.
477 * Honestly, if this race does happen something is horrible
478 * wrong with the system.
483 /* stop the tracing. */
485 trace->ctrl_update(tr);
486 /* check both trace buffers */
487 ret = trace_test_buffer(tr, NULL);
489 ret = trace_test_buffer(&max_tr, &count);
494 tracing_max_latency = save_max;
496 /* kill the thread */
499 if (!ret && !count) {
500 printk(KERN_CONT ".. no entries found ..");
506 #endif /* CONFIG_SCHED_TRACER */
508 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
510 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
515 /* start the tracing */
518 /* Sleep for a 1/10 of a second */
520 /* stop the tracing. */
522 trace->ctrl_update(tr);
523 /* check the trace buffer */
524 ret = trace_test_buffer(tr, &count);
527 if (!ret && !count) {
528 printk(KERN_CONT ".. no entries found ..");
534 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */