2 * Code for replacing ftrace calls with jumps.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Thanks goes to Ingo Molnar, for suggesting the idea.
7 * Mathieu Desnoyers, for suggesting postponing the modifications.
8 * Arjan van de Ven, for keeping me straight, and explaining to me
9 * the dangers of modifying code on the run.
12 #include <linux/spinlock.h>
13 #include <linux/hardirq.h>
14 #include <linux/ftrace.h>
15 #include <linux/percpu.h>
16 #include <linux/init.h>
17 #include <linux/list.h>
23 static unsigned short ftrace_jmp = JMPFWD;
25 struct ftrace_record {
26 struct dyn_ftrace rec;
28 } __attribute__((packed));
31 struct ftrace_page *next;
33 struct ftrace_record records[];
34 } __attribute__((packed));
36 #define ENTRIES_PER_PAGE \
37 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct ftrace_record))
39 /* estimate from running different kernels */
40 #define NR_TO_INIT 10000
42 #define MCOUNT_ADDR ((long)(&mcount))
44 union ftrace_code_union {
49 } __attribute__((packed));
52 static struct ftrace_page *ftrace_pages_start;
53 static struct ftrace_page *ftrace_pages;
55 notrace struct dyn_ftrace *ftrace_alloc_shutdown_node(unsigned long ip)
57 struct ftrace_record *rec;
63 /* If this was already converted, skip it */
67 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
68 if (!ftrace_pages->next)
70 ftrace_pages = ftrace_pages->next;
73 rec = &ftrace_pages->records[ftrace_pages->index++];
79 ftrace_modify_code(unsigned long ip, unsigned char *old_code,
80 unsigned char *new_code)
82 unsigned short old = *(unsigned short *)old_code;
83 unsigned short new = *(unsigned short *)new_code;
84 unsigned short replaced;
88 * Note: Due to modules and __init, code can
89 * disappear and change, we need to protect against faulting
90 * as well as code changing.
92 * No real locking needed, this code is run through
97 " cmpxchg %w3, (%2)\n"
99 ".section .fixup, \"ax\"\n"
104 : "=r"(faulted), "=a"(replaced)
105 : "r"(ip), "r"(new), "0"(faulted), "a"(old)
115 static int notrace ftrace_calc_offset(long ip)
117 return (int)(MCOUNT_ADDR - ip);
120 notrace void ftrace_code_disable(struct dyn_ftrace *rec)
123 union ftrace_code_union save;
124 struct ftrace_record *r =
125 container_of(rec, struct ftrace_record, rec);
130 save.offset = ftrace_calc_offset(ip);
132 /* move the IP back to the start of the call */
135 r->failed = ftrace_modify_code(ip, save.code, (char *)&ftrace_jmp);
138 static void notrace ftrace_replace_code(int saved)
140 unsigned char *new = NULL, *old = NULL;
141 struct ftrace_record *rec;
142 struct ftrace_page *pg;
147 old = (char *)&ftrace_jmp;
149 new = (char *)&ftrace_jmp;
151 for (pg = ftrace_pages_start; pg; pg = pg->next) {
152 for (i = 0; i < pg->index; i++) {
153 union ftrace_code_union calc;
154 rec = &pg->records[i];
156 /* don't modify code that has already faulted */
163 calc.offset = ftrace_calc_offset(ip);
172 rec->failed = ftrace_modify_code(ip, old, new);
178 notrace void ftrace_startup_code(void)
180 ftrace_replace_code(1);
183 notrace void ftrace_shutdown_code(void)
185 ftrace_replace_code(0);
188 notrace void ftrace_shutdown_replenish(void)
190 if (ftrace_pages->next)
193 /* allocate another page */
194 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
197 notrace int ftrace_shutdown_arch_init(void)
199 struct ftrace_page *pg;
203 /* allocate a few pages */
204 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
205 if (!ftrace_pages_start)
209 * Allocate a few more pages.
211 * TODO: have some parser search vmlinux before
212 * final linking to find all calls to ftrace.
214 * a) know how many pages to allocate.
216 * b) set up the table then.
218 * The dynamic code is still necessary for
222 pg = ftrace_pages = ftrace_pages_start;
224 cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
226 for (i = 0; i < cnt; i++) {
227 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
229 /* If we fail, we'll try later anyway */