]> err.no Git - linux-2.6/blobdiff - arch/sh/kernel/process_64.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6
[linux-2.6] / arch / sh / kernel / process_64.c
index 92d01465eb87a916653e7b995e1dab98fa176611..0283d8133075b0507d04d5ca5417a3a6c1425017 100644 (file)
@@ -27,6 +27,8 @@
 #include <linux/io.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+#include <asm/fpu.h>
 
 struct task_struct *last_task_used_math = NULL;
 
@@ -420,19 +422,22 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  */
 void exit_thread(void)
 {
-       /* See arch/sparc/kernel/process.c for the precedent for doing this -- RPC.
-
-          The SH-5 FPU save/restore approach relies on last_task_used_math
-          pointing to a live task_struct.  When another task tries to use the
-          FPU for the 1st time, the FPUDIS trap handling (see
-          arch/sh64/kernel/fpu.c) will save the existing FPU state to the
-          FP regs field within last_task_used_math before re-loading the new
-          task's FPU state (or initialising it if the FPU has been used
-          before).  So if last_task_used_math is stale, and its page has already been
-          re-allocated for another use, the consequences are rather grim. Unless we
-          null it here, there is no other path through which it would get safely
-          nulled. */
-
+       /*
+        * See arch/sparc/kernel/process.c for the precedent for doing
+        * this -- RPC.
+        *
+        * The SH-5 FPU save/restore approach relies on
+        * last_task_used_math pointing to a live task_struct.  When
+        * another task tries to use the FPU for the 1st time, the FPUDIS
+        * trap handling (see arch/sh/kernel/cpu/sh5/fpu.c) will save the
+        * existing FPU state to the FP regs field within
+        * last_task_used_math before re-loading the new task's FPU state
+        * (or initialising it if the FPU has been used before).  So if
+        * last_task_used_math is stale, and its page has already been
+        * re-allocated for another use, the consequences are rather
+        * grim. Unless we null it here, there is no other path through
+        * which it would get safely nulled.
+        */
 #ifdef CONFIG_SH_FPU
        if (last_task_used_math == current) {
                last_task_used_math = NULL;
@@ -479,7 +484,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
        if (fpvalid) {
                if (current == last_task_used_math) {
                        enable_fpu();
-                       fpsave(&tsk->thread.fpu.hard);
+                       save_fpu(tsk, regs);
                        disable_fpu();
                        last_task_used_math = 0;
                        regs->sr |= SR_FD;
@@ -506,7 +511,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
 #ifdef CONFIG_SH_FPU
        if(last_task_used_math == current) {
                enable_fpu();
-               fpsave(&current->thread.fpu.hard);
+               save_fpu(current, regs);
                disable_fpu();
                last_task_used_math = NULL;
                regs->sr |= SR_FD;
@@ -619,6 +624,7 @@ extern void interruptible_sleep_on(wait_queue_head_t *q);
 
 #define mid_sched      ((unsigned long) interruptible_sleep_on)
 
+#ifdef CONFIG_FRAME_POINTER
 static int in_sh64_switch_to(unsigned long pc)
 {
        extern char __sh64_switch_to_end;
@@ -627,12 +633,10 @@ static int in_sh64_switch_to(unsigned long pc)
        return (pc >= (unsigned long) sh64_switch_to) &&
               (pc < (unsigned long) &__sh64_switch_to_end);
 }
+#endif
 
 unsigned long get_wchan(struct task_struct *p)
 {
-       unsigned long schedule_fp;
-       unsigned long sh64_switch_to_fp;
-       unsigned long schedule_caller_pc;
        unsigned long pc;
 
        if (!p || p == current || p->state == TASK_RUNNING)
@@ -645,6 +649,10 @@ unsigned long get_wchan(struct task_struct *p)
 
 #ifdef CONFIG_FRAME_POINTER
        if (in_sh64_switch_to(pc)) {
+               unsigned long schedule_fp;
+               unsigned long sh64_switch_to_fp;
+               unsigned long schedule_caller_pc;
+
                sh64_switch_to_fp = (long) p->thread.sp;
                /* r14 is saved at offset 4 in the sh64_switch_to frame */
                schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4);
@@ -674,17 +682,14 @@ asids_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void
        read_lock(&tasklist_lock);
        for_each_process(p) {
                int pid = p->pid;
-               struct mm_struct *mm;
-               if (!pid) continue;
-               mm = p->mm;
-               if (mm) {
-                       unsigned long asid, context;
-                       context = mm->context;
-                       asid = (context & 0xff);
-                       len += sprintf(buf+len, "%5d : %02lx\n", pid, asid);
-               } else {
+
+               if (!pid)
+                       continue;
+               if (p->mm)
+                       len += sprintf(buf+len, "%5d : %02lx\n", pid,
+                                      asid_cache(smp_processor_id()));
+               else
                        len += sprintf(buf+len, "%5d : (none)\n", pid);
-               }
        }
        read_unlock(&tasklist_lock);
        *eof = 1;