1 /* Support for MMIO probes.
2 * Benfit many code from kprobes
3 * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>.
4 * 2007 Alexander Eichner
5 * 2008 Pekka Paalanen <pq@iki.fi>
8 #include <linux/version.h>
9 #include <linux/spinlock.h>
10 #include <linux/hash.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/kernel.h>
16 #include <linux/uaccess.h>
17 #include <linux/ptrace.h>
18 #include <linux/preempt.h>
20 #include <asm/cacheflush.h>
21 #include <asm/errno.h>
22 #include <asm/tlbflush.h>
23 #include <asm/pgtable.h>
27 #define KMMIO_HASH_BITS 6
28 #define KMMIO_TABLE_SIZE (1 << KMMIO_HASH_BITS)
29 #define KMMIO_PAGE_HASH_BITS 4
30 #define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)
32 struct kmmio_context {
33 struct kmmio_fault_page *fpage;
34 struct kmmio_probe *probe;
35 unsigned long saved_flags;
39 static int kmmio_page_fault(struct pt_regs *regs, unsigned long error_code,
40 unsigned long address);
41 static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val,
44 static DEFINE_SPINLOCK(kmmio_lock);
46 /* These are protected by kmmio_lock */
47 unsigned int kmmio_count;
48 static unsigned int handler_registered;
49 static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
50 static LIST_HEAD(kmmio_probes);
52 static struct kmmio_context kmmio_ctx[NR_CPUS];
54 static struct pf_handler kmmio_pf_hook = {
55 .handler = kmmio_page_fault
58 static struct notifier_block nb_die = {
59 .notifier_call = kmmio_die_notifier
65 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
66 INIT_LIST_HEAD(&kmmio_page_table[i]);
68 register_die_notifier(&nb_die);
72 void cleanup_kmmio(void)
75 * Assume the following have been already cleaned by calling
76 * unregister_kmmio_probe() appropriately:
77 * kmmio_page_table, kmmio_probes
79 if (handler_registered) {
80 unregister_page_fault_handler(&kmmio_pf_hook);
83 unregister_die_notifier(&nb_die);
87 * this is basically a dynamic stabbing problem:
88 * Could use the existing prio tree code or
89 * Possible better implementations:
90 * The Interval Skip List: A Data Structure for Finding All Intervals That
91 * Overlap a Point (might be simple)
92 * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup
94 /* Get the kmmio at this addr (if any). You must be holding kmmio_lock. */
95 static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
97 struct kmmio_probe *p;
98 list_for_each_entry(p, &kmmio_probes, list) {
99 if (addr >= p->addr && addr <= (p->addr + p->len))
105 static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
107 struct list_head *head, *tmp;
110 head = &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
111 list_for_each(tmp, head) {
112 struct kmmio_fault_page *p
113 = list_entry(tmp, struct kmmio_fault_page, list);
121 static void arm_kmmio_fault_page(unsigned long page, int *page_level)
123 unsigned long address = page & PAGE_MASK;
125 pte_t *pte = lookup_address(address, &level);
128 printk(KERN_ERR "Error in %s: no pte for page 0x%08lx\n",
133 if (level == PG_LEVEL_2M) {
134 pmd_t *pmd = (pmd_t *)pte;
135 set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_PRESENT));
138 set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
144 __flush_tlb_one(page);
147 static void disarm_kmmio_fault_page(unsigned long page, int *page_level)
149 unsigned long address = page & PAGE_MASK;
151 pte_t *pte = lookup_address(address, &level);
154 printk(KERN_ERR "Error in %s: no pte for page 0x%08lx\n",
159 if (level == PG_LEVEL_2M) {
160 pmd_t *pmd = (pmd_t *)pte;
161 set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_PRESENT));
164 set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
170 __flush_tlb_one(page);
174 * Interrupts are disabled on entry as trap3 is an interrupt gate
175 * and they remain disabled thorough out this function.
177 static int kmmio_handler(struct pt_regs *regs, unsigned long addr)
179 struct kmmio_context *ctx;
183 * Preemption is now disabled to prevent process switch during
184 * single stepping. We can only handle one active kmmio trace
185 * per cpu, so ensure that we finish it before something else
188 * XXX what if an interrupt occurs between returning from
189 * do_page_fault() and entering the single-step exception handler?
190 * And that interrupt triggers a kmmio trap?
193 cpu = smp_processor_id();
194 ctx = &kmmio_ctx[cpu];
196 /* interrupts disabled and CPU-local data => atomicity guaranteed. */
199 * This avoids a deadlock with kmmio_lock.
200 * If this page fault really was due to kmmio trap,
201 * all hell breaks loose.
203 printk(KERN_EMERG "mmiotrace: recursive probe hit on CPU %d, "
204 "for address %lu. Ignoring.\n",
211 * Acquire the kmmio lock to prevent changes affecting
212 * get_kmmio_fault_page() and get_kmmio_probe(), since we save their
214 * The lock is released in post_kmmio_handler().
215 * XXX: could/should get_kmmio_*() be using RCU instead of spinlock?
217 spin_lock(&kmmio_lock);
219 ctx->fpage = get_kmmio_fault_page(addr);
221 /* this page fault is not caused by kmmio */
222 goto no_kmmio_locked;
225 ctx->probe = get_kmmio_probe(addr);
226 ctx->saved_flags = (regs->flags & (TF_MASK|IF_MASK));
228 if (ctx->probe && ctx->probe->pre_handler)
229 ctx->probe->pre_handler(ctx->probe, regs, addr);
231 regs->flags |= TF_MASK;
232 regs->flags &= ~IF_MASK;
234 /* We hold lock, now we set present bit in PTE and single step. */
235 disarm_kmmio_fault_page(ctx->fpage->page, NULL);
240 spin_unlock(&kmmio_lock);
243 preempt_enable_no_resched();
244 /* page fault not handled by kmmio */
249 * Interrupts are disabled on entry as trap1 is an interrupt gate
250 * and they remain disabled thorough out this function.
251 * And we hold kmmio lock.
253 static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
255 int cpu = smp_processor_id();
256 struct kmmio_context *ctx = &kmmio_ctx[cpu];
261 if (ctx->probe && ctx->probe->post_handler)
262 ctx->probe->post_handler(ctx->probe, condition, regs);
264 arm_kmmio_fault_page(ctx->fpage->page, NULL);
266 regs->flags &= ~TF_MASK;
267 regs->flags |= ctx->saved_flags;
269 /* These were acquired in kmmio_handler(). */
271 spin_unlock(&kmmio_lock);
272 preempt_enable_no_resched();
275 * if somebody else is singlestepping across a probe point, flags
276 * will have TF set, in which case, continue the remaining processing
277 * of do_debug, as if this is not a probe hit.
279 if (regs->flags & TF_MASK)
285 static int add_kmmio_fault_page(unsigned long page)
287 struct kmmio_fault_page *f;
290 f = get_kmmio_fault_page(page);
296 f = kmalloc(sizeof(*f), GFP_ATOMIC);
303 &kmmio_page_table[hash_long(f->page, KMMIO_PAGE_HASH_BITS)]);
305 arm_kmmio_fault_page(f->page, NULL);
310 static void release_kmmio_fault_page(unsigned long page)
312 struct kmmio_fault_page *f;
315 f = get_kmmio_fault_page(page);
321 disarm_kmmio_fault_page(f->page, NULL);
326 int register_kmmio_probe(struct kmmio_probe *p)
329 unsigned long size = 0;
331 spin_lock_irq(&kmmio_lock);
333 if (get_kmmio_probe(p->addr)) {
337 list_add(&p->list, &kmmio_probes);
338 /*printk("adding fault pages...\n");*/
339 while (size < p->len) {
340 if (add_kmmio_fault_page(p->addr + size))
341 printk(KERN_ERR "mmio: Unable to set page fault.\n");
345 if (!handler_registered) {
346 register_page_fault_handler(&kmmio_pf_hook);
347 handler_registered++;
351 spin_unlock_irq(&kmmio_lock);
353 * XXX: What should I do here?
354 * Here was a call to global_flush_tlb(), but it does not exist
360 void unregister_kmmio_probe(struct kmmio_probe *p)
362 unsigned long size = 0;
364 spin_lock_irq(&kmmio_lock);
365 while (size < p->len) {
366 release_kmmio_fault_page(p->addr + size);
371 spin_unlock_irq(&kmmio_lock);
375 * According to 2.6.20, mainly x86_64 arch:
376 * This is being called from do_page_fault(), via the page fault notifier
377 * chain. The chain is called for both user space faults and kernel space
378 * faults (address >= TASK_SIZE64), except not on faults serviced by
381 * We may be in an interrupt or a critical section. Also prefecthing may
382 * trigger a page fault. We may be in the middle of process switch.
383 * The page fault hook functionality has put us inside RCU read lock.
385 * Local interrupts are disabled, so preemption cannot happen.
386 * Do not enable interrupts, do not sleep, and watch out for other CPUs.
388 static int kmmio_page_fault(struct pt_regs *regs, unsigned long error_code,
389 unsigned long address)
391 if (is_kmmio_active())
392 if (kmmio_handler(regs, address) == 1)
397 static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val,
400 struct die_args *arg = args;
402 if (val == DIE_DEBUG)
403 if (post_kmmio_handler(arg->err, arg->regs) == 1)