#include <linux/io.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+#include <asm/fpu.h>
struct task_struct *last_task_used_math = NULL;
*/
void exit_thread(void)
{
- /* See arch/sparc/kernel/process.c for the precedent for doing this -- RPC.
-
- The SH-5 FPU save/restore approach relies on last_task_used_math
- pointing to a live task_struct. When another task tries to use the
- FPU for the 1st time, the FPUDIS trap handling (see
- arch/sh64/kernel/fpu.c) will save the existing FPU state to the
- FP regs field within last_task_used_math before re-loading the new
- task's FPU state (or initialising it if the FPU has been used
- before). So if last_task_used_math is stale, and its page has already been
- re-allocated for another use, the consequences are rather grim. Unless we
- null it here, there is no other path through which it would get safely
- nulled. */
-
+ /*
+ * See arch/sparc/kernel/process.c for the precedent for doing
+ * this -- RPC.
+ *
+ * The SH-5 FPU save/restore approach relies on
+ * last_task_used_math pointing to a live task_struct. When
+ * another task tries to use the FPU for the 1st time, the FPUDIS
+ * trap handling (see arch/sh/kernel/cpu/sh5/fpu.c) will save the
+ * existing FPU state to the FP regs field within
+ * last_task_used_math before re-loading the new task's FPU state
+ * (or initialising it if the FPU has been used before). So if
+ * last_task_used_math is stale, and its page has already been
+ * re-allocated for another use, the consequences are rather
+ * grim. Unless we null it here, there is no other path through
+ * which it would get safely nulled.
+ */
#ifdef CONFIG_SH_FPU
if (last_task_used_math == current) {
last_task_used_math = NULL;
if (fpvalid) {
if (current == last_task_used_math) {
enable_fpu();
- fpsave(&tsk->thread.fpu.hard);
+ save_fpu(tsk, regs);
disable_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
#ifdef CONFIG_SH_FPU
if(last_task_used_math == current) {
enable_fpu();
- fpsave(¤t->thread.fpu.hard);
+ save_fpu(current, regs);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
#define mid_sched ((unsigned long) interruptible_sleep_on)
+#ifdef CONFIG_FRAME_POINTER
static int in_sh64_switch_to(unsigned long pc)
{
extern char __sh64_switch_to_end;
return (pc >= (unsigned long) sh64_switch_to) &&
(pc < (unsigned long) &__sh64_switch_to_end);
}
+#endif
unsigned long get_wchan(struct task_struct *p)
{
- unsigned long schedule_fp;
- unsigned long sh64_switch_to_fp;
- unsigned long schedule_caller_pc;
unsigned long pc;
if (!p || p == current || p->state == TASK_RUNNING)
#ifdef CONFIG_FRAME_POINTER
if (in_sh64_switch_to(pc)) {
+ unsigned long schedule_fp;
+ unsigned long sh64_switch_to_fp;
+ unsigned long schedule_caller_pc;
+
sh64_switch_to_fp = (long) p->thread.sp;
/* r14 is saved at offset 4 in the sh64_switch_to frame */
schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4);
read_lock(&tasklist_lock);
for_each_process(p) {
int pid = p->pid;
- struct mm_struct *mm;
- if (!pid) continue;
- mm = p->mm;
- if (mm) {
- unsigned long asid, context;
- context = mm->context;
- asid = (context & 0xff);
- len += sprintf(buf+len, "%5d : %02lx\n", pid, asid);
- } else {
+
+ if (!pid)
+ continue;
+ if (p->mm)
+ len += sprintf(buf+len, "%5d : %02lx\n", pid,
+ asid_cache(smp_processor_id()));
+ else
len += sprintf(buf+len, "%5d : (none)\n", pid);
- }
}
read_unlock(&tasklist_lock);
*eof = 1;