]> err.no Git - linux-2.6/commitdiff
[SPARC64]: Add irqtrace/stacktrace/lockdep support.
authorDavid S. Miller <davem@davemloft.net>
Thu, 16 Nov 2006 21:38:57 +0000 (13:38 -0800)
committerDavid S. Miller <davem@sunset.davemloft.net>
Sun, 10 Dec 2006 10:39:09 +0000 (02:39 -0800)
Signed-off-by: David S. Miller <davem@davemloft.net>
13 files changed:
arch/sparc64/Kconfig
arch/sparc64/Kconfig.debug
arch/sparc64/kernel/Makefile
arch/sparc64/kernel/entry.S
arch/sparc64/kernel/head.S
arch/sparc64/kernel/rtrap.S
arch/sparc64/kernel/stacktrace.c [new file with mode: 0644]
arch/sparc64/kernel/sun4v_ivec.S
arch/sparc64/mm/ultra.S
include/asm-sparc64/irqflags.h [new file with mode: 0644]
include/asm-sparc64/rwsem.h
include/asm-sparc64/system.h
include/asm-sparc64/ttable.h

index d391d11f245ab0d8d06ce6a9e92c95a1b4b989f8..d41f66ac7fff71dd8768271775aad6b183de987f 100644 (file)
@@ -26,6 +26,14 @@ config MMU
        bool
        default y
 
+config STACKTRACE_SUPPORT
+       bool
+       default y
+
+config LOCKDEP_SUPPORT
+       bool
+       default y
+
 config TIME_INTERPOLATION
        bool
        default y
index afe0a7720a26492d08e52b40862817c5ac658081..1f130f3b6c24affc675d29425e7a21d9e8cd2ef7 100644 (file)
@@ -1,5 +1,9 @@
 menu "Kernel hacking"
 
+config TRACE_IRQFLAGS_SUPPORT
+       bool
+       default y
+
 source "lib/Kconfig.debug"
 
 config DEBUG_STACK_USAGE
index e1eabebaed398134bba3f2be668ac56ae922de30..eff0c01d3579671d2ab8ab54fa79bb9faaedbc4c 100644 (file)
@@ -14,6 +14,7 @@ obj-y         := process.o setup.o cpu.o idprom.o \
                   power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \
                   visemul.o prom.o of_device.o
 
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
 obj-$(CONFIG_PCI)       += ebus.o isa.o pci_common.o pci_iommu.o \
                            pci_psycho.o pci_sabre.o pci_schizo.o \
                            pci_sun4v.o pci_sun4v_asm.o
index 6f28bec0a9bf5d4ea8e86806afeb22449099a4c8..c15a3edcb826d93706a69d1bb212fa6269fda6f3 100644 (file)
@@ -597,7 +597,12 @@ __spitfire_cee_trap_continue:
 1:     ba,pt           %xcc, etrap_irq
         rd             %pc, %g7
 
-2:     mov             %l4, %o1
+2:
+#ifdef CONFIG_TRACE_IRQFLAGS
+       call    trace_hardirqs_off
+        nop
+#endif
+       mov             %l4, %o1
        mov             %l5, %o2
        call            spitfire_access_error
         add            %sp, PTREGS_OFF, %o0
@@ -824,6 +829,10 @@ do_cheetah_plus_data_parity:
        wrpr            %g0, 15, %pil
        ba,pt           %xcc, etrap_irq
         rd             %pc, %g7
+#ifdef CONFIG_TRACE_IRQFLAGS
+       call            trace_hardirqs_off
+        nop
+#endif
        mov             0x0, %o0
        call            cheetah_plus_parity_error
         add            %sp, PTREGS_OFF, %o1
@@ -855,6 +864,10 @@ do_cheetah_plus_insn_parity:
        wrpr            %g0, 15, %pil
        ba,pt           %xcc, etrap_irq
         rd             %pc, %g7
+#ifdef CONFIG_TRACE_IRQFLAGS
+       call            trace_hardirqs_off
+        nop
+#endif
        mov             0x1, %o0
        call            cheetah_plus_parity_error
         add            %sp, PTREGS_OFF, %o1
@@ -1183,6 +1196,10 @@ c_fast_ecc:
        wrpr            %g0, 15, %pil
        ba,pt           %xcc, etrap_irq
         rd             %pc, %g7
+#ifdef CONFIG_TRACE_IRQFLAGS
+       call            trace_hardirqs_off
+        nop
+#endif
        mov             %l4, %o1
        mov             %l5, %o2
        call            cheetah_fecc_handler
@@ -1211,6 +1228,10 @@ c_cee:
        wrpr            %g0, 15, %pil
        ba,pt           %xcc, etrap_irq
         rd             %pc, %g7
+#ifdef CONFIG_TRACE_IRQFLAGS
+       call            trace_hardirqs_off
+        nop
+#endif
        mov             %l4, %o1
        mov             %l5, %o2
        call            cheetah_cee_handler
@@ -1239,6 +1260,10 @@ c_deferred:
        wrpr            %g0, 15, %pil
        ba,pt           %xcc, etrap_irq
         rd             %pc, %g7
+#ifdef CONFIG_TRACE_IRQFLAGS
+       call            trace_hardirqs_off
+        nop
+#endif
        mov             %l4, %o1
        mov             %l5, %o2
        call            cheetah_deferred_handler
index c8e9dc9d68a9758c62d8e6efc7b5e0a02d88a8e4..03ffaf895a227760fe1ba8e475dcdda56444903b 100644 (file)
@@ -489,6 +489,14 @@ tlb_fixup_done:
        call    __bzero
         sub    %o1, %o0, %o1
 
+#ifdef CONFIG_LOCKDEP
+       /* We have this call this super early, as even prom_init can grab
+        * spinlocks and thus call into the lockdep code.
+        */
+       call    lockdep_init
+        nop
+#endif
+
        mov     %l6, %o1                        ! OpenPROM stack
        call    prom_init
         mov    %l7, %o0                        ! OpenPROM cif handler
index 3522cd66f3bb36e0e1250b4ac7cd9dadebcfb52d..079d18a11d247fcb3ee668e06438004bb500db74 100644 (file)
@@ -165,14 +165,26 @@ rtrap:
 __handle_softirq_continue:
 rtrap_xcall:
                sethi                   %hi(0xf << 20), %l4
-               andcc                   %l1, TSTATE_PRIV, %l3
                and                     %l1, %l4, %l4
+               andn                    %l1, %l4, %l1
+               srl                     %l4, 20, %l4
+#ifdef CONFIG_TRACE_IRQFLAGS
+               brnz,pn                 %l4, rtrap_no_irq_enable
+                nop
+               call                    trace_hardirqs_on
+                nop
+               wrpr                    %l4, %pil
+rtrap_no_irq_enable:
+#endif
+               andcc                   %l1, TSTATE_PRIV, %l3
                bne,pn                  %icc, to_kernel
-                andn                   %l1, %l4, %l1
+                nop
 
                /* We must hold IRQs off and atomically test schedule+signal
                 * state, then hold them off all the way back to userspace.
-                * If we are returning to kernel, none of this matters.
+                * If we are returning to kernel, none of this matters.  Note
+                * that we are disabling interrupts via PSTATE_IE, not using
+                * %pil.
                 *
                 * If we do not do this, there is a window where we would do
                 * the tests, later the signal/resched event arrives but we do
@@ -256,7 +268,6 @@ rt_continue:        ldx                     [%sp + PTREGS_OFF + PT_V9_G1], %g1
 
                ld                      [%sp + PTREGS_OFF + PT_V9_Y], %o3
                wr                      %o3, %g0, %y
-               srl                     %l4, 20, %l4
                wrpr                    %l4, 0x0, %pil
                wrpr                    %g0, 0x1, %tl
                wrpr                    %l1, %g0, %tstate
@@ -374,8 +385,8 @@ to_kernel:
                 ldx                    [%g6 + TI_FLAGS], %l5
                andcc                   %l5, _TIF_NEED_RESCHED, %g0
                be,pt                   %xcc, kern_fpucheck
-                srl                    %l4, 20, %l5
-               cmp                     %l5, 0
+                nop
+               cmp                     %l4, 0
                bne,pn                  %xcc, kern_fpucheck
                 sethi                  %hi(PREEMPT_ACTIVE), %l6
                stw                     %l6, [%g6 + TI_PRE_COUNT]
diff --git a/arch/sparc64/kernel/stacktrace.c b/arch/sparc64/kernel/stacktrace.c
new file mode 100644 (file)
index 0000000..c4d15f2
--- /dev/null
@@ -0,0 +1,41 @@
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+#include <linux/thread_info.h>
+#include <asm/ptrace.h>
+
+void save_stack_trace(struct stack_trace *trace, struct task_struct *task)
+{
+       unsigned long ksp, fp, thread_base;
+       struct thread_info *tp;
+
+       if (!task)
+               task = current;
+       tp = task_thread_info(task);
+       if (task == current) {
+               flushw_all();
+               __asm__ __volatile__(
+                       "mov    %%fp, %0"
+                       : "=r" (ksp)
+               );
+       } else
+               ksp = tp->ksp;
+
+       fp = ksp + STACK_BIAS;
+       thread_base = (unsigned long) tp;
+       do {
+               struct reg_window *rw;
+
+               /* Bogus frame pointer? */
+               if (fp < (thread_base + sizeof(struct thread_info)) ||
+                   fp >= (thread_base + THREAD_SIZE))
+                       break;
+
+               rw = (struct reg_window *) fp;
+               if (trace->skip > 0)
+                       trace->skip--;
+               else
+                       trace->entries[trace->nr_entries++] = rw->ins[7];
+
+               fp = rw->ins[6] + STACK_BIAS;
+       } while (trace->nr_entries < trace->max_entries);
+}
index 49703c3c57694fe45972d36f204ee57f50a3a033..405855dd886bc9f7afd0795b7ca93a496e2a2db6 100644 (file)
@@ -190,7 +190,10 @@ sun4v_res_mondo:
        mov     %g1, %g4
        ba,pt   %xcc, etrap_irq
         rd     %pc, %g7
-
+#ifdef CONFIG_TRACE_IRQFLAGS
+       call            trace_hardirqs_off
+        nop
+#endif
        /* Log the event.  */
        add     %sp, PTREGS_OFF, %o0
        call    sun4v_resum_error
@@ -216,7 +219,10 @@ sun4v_res_mondo_queue_full:
        wrpr    %g0, 15, %pil
        ba,pt   %xcc, etrap_irq
         rd     %pc, %g7
-
+#ifdef CONFIG_TRACE_IRQFLAGS
+       call            trace_hardirqs_off
+        nop
+#endif
        call    sun4v_resum_overflow
         add    %sp, PTREGS_OFF, %o0
 
@@ -295,7 +301,10 @@ sun4v_nonres_mondo:
        mov     %g1, %g4
        ba,pt   %xcc, etrap_irq
         rd     %pc, %g7
-
+#ifdef CONFIG_TRACE_IRQFLAGS
+       call            trace_hardirqs_off
+        nop
+#endif
        /* Log the event.  */
        add     %sp, PTREGS_OFF, %o0
        call    sun4v_nonresum_error
@@ -321,7 +330,10 @@ sun4v_nonres_mondo_queue_full:
        wrpr    %g0, 15, %pil
        ba,pt   %xcc, etrap_irq
         rd     %pc, %g7
-
+#ifdef CONFIG_TRACE_IRQFLAGS
+       call            trace_hardirqs_off
+        nop
+#endif
        call    sun4v_nonresum_overflow
         add    %sp, PTREGS_OFF, %o0
 
index d70b60a3bbccb5340cf517dcf64657eba1927456..737c26923c0963470a6059f3ed80056b9ac04367 100644 (file)
@@ -477,6 +477,10 @@ xcall_sync_tick:
        sethi           %hi(109f), %g7
        b,pt            %xcc, etrap_irq
 109:    or             %g7, %lo(109b), %g7
+#ifdef CONFIG_TRACE_IRQFLAGS
+       call            trace_hardirqs_off
+        nop
+#endif
        call            smp_synchronize_tick_client
         nop
        clr             %l6
@@ -508,6 +512,10 @@ xcall_report_regs:
        sethi           %hi(109f), %g7
        b,pt            %xcc, etrap_irq
 109:    or             %g7, %lo(109b), %g7
+#ifdef CONFIG_TRACE_IRQFLAGS
+       call            trace_hardirqs_off
+        nop
+#endif
        call            __show_regs
         add            %sp, PTREGS_OFF, %o0
        clr             %l6
diff --git a/include/asm-sparc64/irqflags.h b/include/asm-sparc64/irqflags.h
new file mode 100644 (file)
index 0000000..024fc54
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * include/asm-sparc64/irqflags.h
+ *
+ * IRQ flags handling
+ *
+ * This file gets included from lowlevel asm headers too, to provide
+ * wrapped versions of the local_irq_*() APIs, based on the
+ * raw_local_irq_*() functions from the lowlevel headers.
+ */
+#ifndef _ASM_IRQFLAGS_H
+#define _ASM_IRQFLAGS_H
+
+#ifndef __ASSEMBLY__
+
+static inline unsigned long __raw_local_save_flags(void)
+{
+       unsigned long flags;
+
+       __asm__ __volatile__(
+               "rdpr   %%pil, %0"
+               : "=r" (flags)
+       );
+
+       return flags;
+}
+
+#define raw_local_save_flags(flags) \
+               do { (flags) = __raw_local_save_flags(); } while (0)
+
+static inline void raw_local_irq_restore(unsigned long flags)
+{
+       __asm__ __volatile__(
+               "wrpr   %0, %%pil"
+               : /* no output */
+               : "r" (flags)
+               : "memory"
+       );
+}
+
+static inline void raw_local_irq_disable(void)
+{
+       __asm__ __volatile__(
+               "wrpr   15, %%pil"
+               : /* no outputs */
+               : /* no inputs */
+               : "memory"
+       );
+}
+
+static inline void raw_local_irq_enable(void)
+{
+       __asm__ __volatile__(
+               "wrpr   0, %%pil"
+               : /* no outputs */
+               : /* no inputs */
+               : "memory"
+       );
+}
+
+static inline int raw_irqs_disabled_flags(unsigned long flags)
+{
+       return (flags > 0);
+}
+
+static inline int raw_irqs_disabled(void)
+{
+       unsigned long flags = __raw_local_save_flags();
+
+       return raw_irqs_disabled_flags(flags);
+}
+
+/*
+ * For spinlocks, etc:
+ */
+static inline unsigned long __raw_local_irq_save(void)
+{
+       unsigned long flags = __raw_local_save_flags();
+
+       raw_local_irq_disable();
+
+       return flags;
+}
+
+#define raw_local_irq_save(flags) \
+               do { (flags) = __raw_local_irq_save(); } while (0)
+
+#endif /* (__ASSEMBLY__) */
+
+#endif /* !(_ASM_IRQFLAGS_H) */
index cef5e8270421beda324419c8210b4ffa3a7e294d..1294b7ce5d06c0987870424ddfceacc87ed42b08 100644 (file)
@@ -23,20 +23,33 @@ struct rw_semaphore {
        signed int count;
        spinlock_t              wait_lock;
        struct list_head        wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lockdep_map      dep_map;
+#endif
 };
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
 #define __RWSEM_INITIALIZER(name) \
-{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) }
+{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
+  __RWSEM_DEP_MAP_INIT(name) }
 
 #define DECLARE_RWSEM(name) \
        struct rw_semaphore name = __RWSEM_INITIALIZER(name)
 
-static __inline__ void init_rwsem(struct rw_semaphore *sem)
-{
-       sem->count = RWSEM_UNLOCKED_VALUE;
-       spin_lock_init(&sem->wait_lock);
-       INIT_LIST_HEAD(&sem->wait_list);
-}
+extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
+                        struct lock_class_key *key);
+
+#define init_rwsem(sem)                                                \
+do {                                                           \
+       static struct lock_class_key __key;                     \
+                                                               \
+       __init_rwsem((sem), #sem, &__key);                      \
+} while (0)
 
 extern void __down_read(struct rw_semaphore *sem);
 extern int __down_read_trylock(struct rw_semaphore *sem);
@@ -46,6 +59,11 @@ extern void __up_read(struct rw_semaphore *sem);
 extern void __up_write(struct rw_semaphore *sem);
 extern void __downgrade_write(struct rw_semaphore *sem);
 
+static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+{
+       __down_write(sem);
+}
+
 static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
 {
        return atomic_add_return(delta, (atomic_t *)(&sem->count));
index a8b7432c9a70825f3b7d0c6e3b02f3772c24673c..32281acb878b3dc82745266ea1c5b6fd1dd87e17 100644 (file)
@@ -7,6 +7,9 @@
 #include <asm/visasm.h>
 
 #ifndef __ASSEMBLY__
+
+#include <linux/irqflags.h>
+
 /*
  * Sparc (general) CPU types
  */
@@ -72,52 +75,6 @@ do { __asm__ __volatile__("ba,pt     %%xcc, 1f\n\t" \
 
 #endif
 
-#define setipl(__new_ipl) \
-       __asm__ __volatile__("wrpr      %0, %%pil"  : : "r" (__new_ipl) : "memory")
-
-#define local_irq_disable() \
-       __asm__ __volatile__("wrpr      15, %%pil" : : : "memory")
-
-#define local_irq_enable() \
-       __asm__ __volatile__("wrpr      0, %%pil" : : : "memory")
-
-#define getipl() \
-({ unsigned long retval; __asm__ __volatile__("rdpr    %%pil, %0" : "=r" (retval)); retval; })
-
-#define swap_pil(__new_pil) \
-({     unsigned long retval; \
-       __asm__ __volatile__("rdpr      %%pil, %0\n\t" \
-                            "wrpr      %1, %%pil" \
-                            : "=&r" (retval) \
-                            : "r" (__new_pil) \
-                            : "memory"); \
-       retval; \
-})
-
-#define read_pil_and_cli() \
-({     unsigned long retval; \
-       __asm__ __volatile__("rdpr      %%pil, %0\n\t" \
-                            "wrpr      15, %%pil" \
-                            : "=r" (retval) \
-                            : : "memory"); \
-       retval; \
-})
-
-#define local_save_flags(flags)                ((flags) = getipl())
-#define local_irq_save(flags)          ((flags) = read_pil_and_cli())
-#define local_irq_restore(flags)               setipl((flags))
-
-/* On sparc64 IRQ flags are the PIL register.  A value of zero
- * means all interrupt levels are enabled, any other value means
- * only IRQ levels greater than that value will be received.
- * Consequently this means that the lowest IRQ level is one.
- */
-#define irqs_disabled()                \
-({     unsigned long flags;    \
-       local_save_flags(flags);\
-       (flags > 0);            \
-})
-
 #define nop()          __asm__ __volatile__ ("nop")
 
 #define read_barrier_depends()         do { } while(0)
index f2352606a79f5a1dd6e0f8ff305d6becc1665ce7..c2a16e1884992a0c9bd13dbbf774731eb1fe2ecf 100644 (file)
 #endif
 #define BREAKPOINT_TRAP TRAP(breakpoint_trap)
 
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+#define TRAP_IRQ(routine, level)                       \
+       rdpr    %pil, %g2;                              \
+       wrpr    %g0, 15, %pil;                          \
+       sethi   %hi(1f-4), %g7;                         \
+       ba,pt   %xcc, etrap_irq;                        \
+        or     %g7, %lo(1f-4), %g7;                    \
+       nop;                                            \
+       nop;                                            \
+       nop;                                            \
+       .subsection     2;                              \
+1:     call    trace_hardirqs_off;                     \
+        nop;                                           \
+       mov     level, %o0;                             \
+       call    routine;                                \
+        add    %sp, PTREGS_OFF, %o1;                   \
+       ba,a,pt %xcc, rtrap_irq;                        \
+       .previous;
+
+#define TICK_SMP_IRQ                                   \
+       rdpr    %pil, %g2;                              \
+       wrpr    %g0, 15, %pil;                          \
+       sethi   %hi(1f-4), %g7;                         \
+       ba,pt   %xcc, etrap_irq;                        \
+        or     %g7, %lo(1f-4), %g7;                    \
+       nop;                                            \
+       nop;                                            \
+       nop;                                            \
+       .subsection     2;                              \
+1:     call    trace_hardirqs_off;                     \
+        nop;                                           \
+       call    smp_percpu_timer_interrupt;             \
+        add    %sp, PTREGS_OFF, %o0;                   \
+       ba,a,pt %xcc, rtrap_irq;                        \
+       .previous;
+
+#else
+
 #define TRAP_IRQ(routine, level)                       \
        rdpr    %pil, %g2;                              \
        wrpr    %g0, 15, %pil;                          \
-       b,pt    %xcc, etrap_irq;                        \
+       ba,pt   %xcc, etrap_irq;                        \
         rd     %pc, %g7;                               \
        mov     level, %o0;                             \
        call    routine;                                \
        rdpr    %pil, %g2;                              \
        wrpr    %g0, 15, %pil;                          \
        sethi   %hi(109f), %g7;                         \
-       b,pt    %xcc, etrap_irq;                        \
+       ba,pt   %xcc, etrap_irq;                        \
 109:    or     %g7, %lo(109b), %g7;                    \
        call    smp_percpu_timer_interrupt;             \
         add    %sp, PTREGS_OFF, %o0;                   \
        ba,a,pt %xcc, rtrap_irq;
 
+#endif
+
 #define TRAP_IVEC TRAP_NOSAVE(do_ivec)
 
 #define BTRAP(lvl) TRAP_ARG(bad_trap, lvl)