--- /dev/null
+/*
+ * arch/mips/kernel/stacktrace.c
+ *
+ * Stack trace management functions
+ *
+ * Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+ */
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+#include <asm/stacktrace.h>
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer:
+ */
+static void save_raw_context_stack(struct stack_trace *trace,
+ unsigned int skip, unsigned long reg29)
+{
+ unsigned long *sp = (unsigned long *)reg29;
+ unsigned long addr;
+
+ while (!kstack_end(sp)) {
+ addr = *sp++;
+ if (__kernel_text_address(addr)) {
+ if (!skip)
+ trace->entries[trace->nr_entries++] = addr;
+ else
+ skip--;
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ }
+ }
+}
+
+static struct pt_regs * save_context_stack(struct stack_trace *trace,
+ unsigned int skip, struct task_struct *task, struct pt_regs *regs)
+{
+ unsigned long sp = regs->regs[29];
+#ifdef CONFIG_KALLSYMS
+ unsigned long ra = regs->regs[31];
+ unsigned long pc = regs->cp0_epc;
+ extern void ret_from_irq(void);
+
+ if (raw_show_trace || !__kernel_text_address(pc)) {
+ save_raw_context_stack(trace, skip, sp);
+ return NULL;
+ }
+ do {
+ if (!skip)
+ trace->entries[trace->nr_entries++] = pc;
+ else
+ skip--;
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ /*
+ * If we reached the bottom of interrupt context,
+ * return saved pt_regs.
+ */
+ if (pc == (unsigned long)ret_from_irq) {
+ unsigned long stack_page =
+ (unsigned long)task_stack_page(task);
+ if (!stack_page ||
+ sp < stack_page ||
+ sp > stack_page + THREAD_SIZE - 32)
+ break;
+ return (struct pt_regs *)sp;
+ }
+ pc = unwind_stack(task, &sp, pc, ra);
+ ra = 0;
+ } while (pc);
+#else
+ save_raw_context_stack(sp);
+#endif
+
+ return NULL;
+}
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ * If all_contexts is set, all contexts (hardirq, softirq and process)
+ * are saved. If not set then only the current context is saved.
+ */
+void save_stack_trace(struct stack_trace *trace,
+ struct task_struct *task, int all_contexts,
+ unsigned int skip)
+{
+ struct pt_regs dummyregs;
+ struct pt_regs *regs = &dummyregs;
+
+ WARN_ON(trace->nr_entries || !trace->max_entries);
+
+ if (task && task != current) {
+ regs->regs[29] = task->thread.reg29;
+ regs->regs[31] = 0;
+ regs->cp0_epc = task->thread.reg31;
+ } else {
+ if (!task)
+ task = current;
+ prepare_frametrace(regs);
+ }
+
+ while (1) {
+ regs = save_context_stack(trace, skip, task, regs);
+ if (!all_contexts || !regs ||
+ trace->nr_entries >= trace->max_entries)
+ break;
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ skip = 0;
+ }
+}
#include <asm/mmu_context.h>
#include <asm/watch.h>
#include <asm/types.h>
+#include <asm/stacktrace.h>
extern asmlinkage void handle_int(void);
extern asmlinkage void handle_tlbm(void);
}
#ifdef CONFIG_KALLSYMS
-static int raw_show_trace;
+int raw_show_trace;
static int __init set_raw_show_trace(char *str)
{
raw_show_trace = 1;
return 1;
}
__setup("raw_show_trace", set_raw_show_trace);
-
-extern unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
- unsigned long pc, unsigned long ra);
+#endif
static void show_backtrace(struct task_struct *task, struct pt_regs *regs)
{
} while (pc);
printk("\n");
}
-#else
-#define show_backtrace(task, r) show_raw_backtrace((r)->regs[29]);
-#endif
/*
* This routine abuses get_user()/put_user() to reference pointers
show_backtrace(task, regs);
}
-static __always_inline void prepare_frametrace(struct pt_regs *regs)
-{
- __asm__ __volatile__(
- ".set push\n\t"
- ".set noat\n\t"
-#ifdef CONFIG_64BIT
- "1: dla $1, 1b\n\t"
- "sd $1, %0\n\t"
- "sd $29, %1\n\t"
- "sd $31, %2\n\t"
-#else
- "1: la $1, 1b\n\t"
- "sw $1, %0\n\t"
- "sw $29, %1\n\t"
- "sw $31, %2\n\t"
-#endif
- ".set pop\n\t"
- : "=m" (regs->cp0_epc),
- "=m" (regs->regs[29]), "=m" (regs->regs[31])
- : : "memory");
-}
-
void show_stack(struct task_struct *task, unsigned long *sp)
{
struct pt_regs regs;
{
struct pt_regs regs;
- /*
- * Remove any garbage that may be in regs (specially func
- * addresses) to avoid show_raw_backtrace() to report them
- */
- memset(®s, 0, sizeof(regs));
prepare_frametrace(®s);
show_backtrace(current, ®s);
}
--- /dev/null
+#ifndef _ASM_STACKTRACE_H
+#define _ASM_STACKTRACE_H
+
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_KALLSYMS
+extern int raw_show_trace;
+extern unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
+ unsigned long pc, unsigned long ra);
+#else
+#define raw_show_trace 1
+#define unwind_stack(task, sp, pc, ra) 0
+#endif
+
+static __always_inline void prepare_frametrace(struct pt_regs *regs)
+{
+#ifndef CONFIG_KALLSYMS
+ /*
+ * Remove any garbage that may be in regs (specially func
+ * addresses) to avoid show_raw_backtrace() to report them
+ */
+ memset(regs, 0, sizeof(*regs));
+#endif
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set noat\n\t"
+#ifdef CONFIG_64BIT
+ "1: dla $1, 1b\n\t"
+ "sd $1, %0\n\t"
+ "sd $29, %1\n\t"
+ "sd $31, %2\n\t"
+#else
+ "1: la $1, 1b\n\t"
+ "sw $1, %0\n\t"
+ "sw $29, %1\n\t"
+ "sw $31, %2\n\t"
+#endif
+ ".set pop\n\t"
+ : "=m" (regs->cp0_epc),
+ "=m" (regs->regs[29]), "=m" (regs->regs[31])
+ : : "memory");
+}
+
+#endif /* _ASM_STACKTRACE_H */