2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) IBM Corporation, 2005
17 * Jeff Muizelaar, 2006, 2007
18 * Pekka Paalanen, 2008 <pq@iki.fi>
20 * Derived from the read-mod example from relay-examples by Tom Zanussi.
24 #include <linux/module.h>
25 #include <linux/debugfs.h>
26 #include <linux/uaccess.h>
28 #include <linux/version.h>
29 #include <linux/kallsyms.h>
30 #include <asm/pgtable.h>
31 #include <linux/mmiotrace.h>
32 #include <asm/e820.h> /* for ISA_START_ADDRESS */
33 #include <asm/atomic.h>
34 #include <linux/percpu.h>
38 #define NAME "mmiotrace: "
43 enum reason_type type;
48 struct list_head list;
49 struct kmmio_probe probe;
54 /* Accessed per-cpu. */
55 static DEFINE_PER_CPU(struct trap_reason, pf_reason);
56 static DEFINE_PER_CPU(struct mmiotrace_rw, cpu_trace);
58 #if 0 /* XXX: no way gather this info anymore */
59 /* Access to this is not per-cpu. */
60 static DEFINE_PER_CPU(atomic_t, dropped);
63 static struct dentry *marker_file;
65 static DEFINE_MUTEX(mmiotrace_mutex);
66 static DEFINE_SPINLOCK(trace_lock);
67 static atomic_t mmiotrace_enabled;
68 static LIST_HEAD(trace_list); /* struct remap_trace */
71 * Locking in this file:
72 * - mmiotrace_mutex enforces enable/disable_mmiotrace() critical sections.
73 * - mmiotrace_enabled may be modified only when holding mmiotrace_mutex
75 * - Routines depending on is_enabled() must take trace_lock.
76 * - trace_list users must hold trace_lock.
77 * - is_enabled() guarantees that mmio_trace_record is allowed.
78 * - pre/post callbacks assume the effect of is_enabled() being true.
81 /* module parameters */
82 static unsigned long filter_offset;
83 static int nommiotrace;
86 module_param(filter_offset, ulong, 0);
87 module_param(nommiotrace, bool, 0);
88 module_param(trace_pc, bool, 0);
90 MODULE_PARM_DESC(filter_offset, "Start address of traced mappings.");
91 MODULE_PARM_DESC(nommiotrace, "Disable actual MMIO tracing.");
92 MODULE_PARM_DESC(trace_pc, "Record address of faulting instructions.");
94 static bool is_enabled(void)
96 return atomic_read(&mmiotrace_enabled);
99 #if 0 /* XXX: needs rewrite */
101 * Write callback for the debugfs entry:
102 * Read a marker and write it to the mmio trace log
104 static ssize_t write_marker(struct file *file, const char __user *buffer,
105 size_t count, loff_t *ppos)
108 struct mm_io_header *headp;
109 ssize_t len = (count > 65535) ? 65535 : count;
111 event = kzalloc(sizeof(*headp) + len, GFP_KERNEL);
115 headp = (struct mm_io_header *)event;
116 headp->type = MMIO_MAGIC | (MMIO_MARKER << MMIO_OPCODE_SHIFT);
117 headp->data_len = len;
119 if (copy_from_user(event + sizeof(*headp), buffer, len)) {
124 spin_lock_irq(&trace_lock);
125 #if 0 /* XXX: convert this to use tracing */
127 relay_write(chan, event, sizeof(*headp) + len);
131 spin_unlock_irq(&trace_lock);
137 static void print_pte(unsigned long address)
140 pte_t *pte = lookup_address(address, &level);
143 pr_err(NAME "Error in %s: no pte for page 0x%08lx\n",
148 if (level == PG_LEVEL_2M) {
149 pr_emerg(NAME "4MB pages are not currently supported: "
150 "0x%08lx\n", address);
153 pr_info(NAME "pte for 0x%lx: 0x%lx 0x%lx\n", address, pte_val(*pte),
154 pte_val(*pte) & _PAGE_PRESENT);
158 * For some reason the pre/post pairs have been called in an
159 * unmatched order. Report and die.
161 static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr)
163 const struct trap_reason *my_reason = &get_cpu_var(pf_reason);
164 pr_emerg(NAME "unexpected fault for address: 0x%08lx, "
165 "last fault for address: 0x%08lx\n",
166 addr, my_reason->addr);
168 print_symbol(KERN_EMERG "faulting IP is at %s\n", regs->ip);
169 print_symbol(KERN_EMERG "last faulting IP was at %s\n", my_reason->ip);
171 pr_emerg("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
172 regs->ax, regs->bx, regs->cx, regs->dx);
173 pr_emerg("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
174 regs->si, regs->di, regs->bp, regs->sp);
176 pr_emerg("rax: %016lx rcx: %016lx rdx: %016lx\n",
177 regs->ax, regs->cx, regs->dx);
178 pr_emerg("rsi: %016lx rdi: %016lx rbp: %016lx rsp: %016lx\n",
179 regs->si, regs->di, regs->bp, regs->sp);
181 put_cpu_var(pf_reason);
185 static void pre(struct kmmio_probe *p, struct pt_regs *regs,
188 struct trap_reason *my_reason = &get_cpu_var(pf_reason);
189 struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
190 const unsigned long instptr = instruction_pointer(regs);
191 const enum reason_type type = get_ins_type(instptr);
192 struct remap_trace *trace = p->user_data;
194 /* it doesn't make sense to have more than one active trace per cpu */
195 if (my_reason->active_traces)
196 die_kmmio_nesting_error(regs, addr);
198 my_reason->active_traces++;
200 my_reason->type = type;
201 my_reason->addr = addr;
202 my_reason->ip = instptr;
204 my_trace->phys = addr - trace->probe.addr + trace->phys;
205 my_trace->map_id = trace->id;
208 * Only record the program counter when requested.
209 * It may taint clean-room reverse engineering.
212 my_trace->pc = instptr;
217 * XXX: the timestamp recorded will be *after* the tracing has been
218 * done, not at the time we hit the instruction. SMP implications
224 my_trace->opcode = MMIO_READ;
225 my_trace->width = get_ins_mem_width(instptr);
228 my_trace->opcode = MMIO_WRITE;
229 my_trace->width = get_ins_mem_width(instptr);
230 my_trace->value = get_ins_reg_val(instptr, regs);
233 my_trace->opcode = MMIO_WRITE;
234 my_trace->width = get_ins_mem_width(instptr);
235 my_trace->value = get_ins_imm_val(instptr);
239 unsigned char *ip = (unsigned char *)instptr;
240 my_trace->opcode = MMIO_UNKNOWN_OP;
242 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
246 put_cpu_var(cpu_trace);
247 put_cpu_var(pf_reason);
250 static void post(struct kmmio_probe *p, unsigned long condition,
251 struct pt_regs *regs)
253 struct trap_reason *my_reason = &get_cpu_var(pf_reason);
254 struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
256 /* this should always return the active_trace count to 0 */
257 my_reason->active_traces--;
258 if (my_reason->active_traces) {
259 pr_emerg(NAME "unexpected post handler");
263 switch (my_reason->type) {
265 my_trace->value = get_ins_reg_val(my_reason->ip, regs);
271 mmio_trace_rw(my_trace);
272 put_cpu_var(cpu_trace);
273 put_cpu_var(pf_reason);
276 static void ioremap_trace_core(unsigned long offset, unsigned long size,
279 static atomic_t next_id;
280 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
281 struct mmiotrace_map map = {
283 .virt = (unsigned long)addr,
289 pr_err(NAME "kmalloc failed in ioremap\n");
293 *trace = (struct remap_trace) {
295 .addr = (unsigned long)addr,
298 .post_handler = post,
302 .id = atomic_inc_return(&next_id)
304 map.map_id = trace->id;
306 spin_lock_irq(&trace_lock);
310 mmio_trace_mapping(&map);
311 list_add_tail(&trace->list, &trace_list);
313 register_kmmio_probe(&trace->probe);
316 spin_unlock_irq(&trace_lock);
320 mmiotrace_ioremap(unsigned long offset, unsigned long size, void __iomem *addr)
322 if (!is_enabled()) /* recheck and proper locking in *_core() */
325 pr_debug(NAME "ioremap_*(0x%lx, 0x%lx) = %p\n", offset, size, addr);
326 if ((filter_offset) && (offset != filter_offset))
328 ioremap_trace_core(offset, size, addr);
331 static void iounmap_trace_core(volatile void __iomem *addr)
333 struct mmiotrace_map map = {
335 .virt = (unsigned long)addr,
337 .opcode = MMIO_UNPROBE
339 struct remap_trace *trace;
340 struct remap_trace *tmp;
341 struct remap_trace *found_trace = NULL;
343 pr_debug(NAME "Unmapping %p.\n", addr);
345 spin_lock_irq(&trace_lock);
349 list_for_each_entry_safe(trace, tmp, &trace_list, list) {
350 if ((unsigned long)addr == trace->probe.addr) {
352 unregister_kmmio_probe(&trace->probe);
353 list_del(&trace->list);
358 map.map_id = (found_trace) ? found_trace->id : -1;
359 mmio_trace_mapping(&map);
362 spin_unlock_irq(&trace_lock);
364 synchronize_rcu(); /* unregister_kmmio_probe() requirement */
369 void mmiotrace_iounmap(volatile void __iomem *addr)
372 if (is_enabled()) /* recheck and proper locking in *_core() */
373 iounmap_trace_core(addr);
376 static void clear_trace_list(void)
378 struct remap_trace *trace;
379 struct remap_trace *tmp;
382 * No locking required, because the caller ensures we are in a
383 * critical section via mutex, and is_enabled() is false,
384 * i.e. nothing can traverse or modify this list.
385 * Caller also ensures is_enabled() cannot change.
387 list_for_each_entry(trace, &trace_list, list) {
388 pr_notice(NAME "purging non-iounmapped "
389 "trace @0x%08lx, size 0x%lx.\n",
390 trace->probe.addr, trace->probe.len);
392 unregister_kmmio_probe(&trace->probe);
394 synchronize_rcu(); /* unregister_kmmio_probe() requirement */
396 list_for_each_entry_safe(trace, tmp, &trace_list, list) {
397 list_del(&trace->list);
402 #if 0 /* XXX: out of order */
403 static struct file_operations fops_marker = {
404 .owner = THIS_MODULE,
405 .write = write_marker
409 void enable_mmiotrace(void)
411 mutex_lock(&mmiotrace_mutex);
415 #if 0 /* XXX: tracing does not support text entries */
416 marker_file = debugfs_create_file("marker", 0660, dir, NULL,
419 pr_err(NAME "marker file creation failed.\n");
423 pr_info(NAME "MMIO tracing disabled.\n");
424 spin_lock_irq(&trace_lock);
425 atomic_inc(&mmiotrace_enabled);
426 spin_unlock_irq(&trace_lock);
427 pr_info(NAME "enabled.\n");
429 mutex_unlock(&mmiotrace_mutex);
432 void disable_mmiotrace(void)
434 mutex_lock(&mmiotrace_mutex);
438 spin_lock_irq(&trace_lock);
439 atomic_dec(&mmiotrace_enabled);
440 BUG_ON(is_enabled());
441 spin_unlock_irq(&trace_lock);
443 clear_trace_list(); /* guarantees: no more kmmio callbacks */
445 debugfs_remove(marker_file);
449 pr_info(NAME "disabled.\n");
451 mutex_unlock(&mmiotrace_mutex);