From 05062d96a23ec0959ee5ea969f40813170c73c0e Mon Sep 17 00:00:00 2001 From: Peter Chubb Date: Wed, 8 Jun 2005 15:50:20 -0700 Subject: [PATCH] [PATCH] ia64: fix floating-point preemption problem There've been reports of problems with CONFIG_PREEMPT=y and the high floating point partition. This is caused by the possibility of preemption and rescheduling on a different processor while saving or restioirng the high partition. The only places where the FPU state is touched are in ptrace, in switch_to(), and where handling a floating-point exception. In switch_to() preemption is off. So it's only in trap.c and ptrace.c that we need to prevent preemption. Here is a patch that adds commentary to make the conditions clear, and adds appropriate preempt_{en,dis}able() calls to make it so. In trap.c I use preempt_enable_no_resched(), as we're about to return to user space where the preemption flag will be checked anyway. Signed-off-by: Peter Chubb Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/kernel/ptrace.c | 6 ++++++ arch/ia64/kernel/traps.c | 11 ++++++++++- include/asm-ia64/processor.h | 10 ++++++++-- 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 08c8a5eb25..575a8f657b 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -635,11 +635,17 @@ ia64_flush_fph (struct task_struct *task) { struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); + /* + * Prevent migrating this task while + * we're fiddling with the FPU state + */ + preempt_disable(); if (ia64_is_local_fpu_owner(task) && psr->mfh) { psr->mfh = 0; task->thread.flags |= IA64_THREAD_FPH_VALID; ia64_save_fpu(&task->thread.fph[0]); } + preempt_enable(); } /* diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index 9bad6652d5..1861173bd4 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -220,13 +220,21 @@ disabled_fph_fault (struct pt_regs *regs) /* first, grant user-level access to fph partition: */ psr->dfh = 0; + + /* + * Make sure that no other task gets in on this processor + * while we're claiming the FPU + */ + preempt_disable(); #ifndef CONFIG_SMP { struct task_struct *fpu_owner = (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER); - if (ia64_is_local_fpu_owner(current)) + if (ia64_is_local_fpu_owner(current)) { + preempt_enable_no_resched(); return; + } if (fpu_owner) ia64_flush_fph(fpu_owner); @@ -244,6 +252,7 @@ disabled_fph_fault (struct pt_regs *regs) */ psr->mfh = 1; } + preempt_enable_no_resched(); } static inline int diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index 9e1ba8b7fb..91bbd1f224 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h @@ -403,7 +403,10 @@ extern void ia64_setreg_unknown_kr (void); * task_struct at this point. */ -/* Return TRUE if task T owns the fph partition of the CPU we're running on. */ +/* + * Return TRUE if task T owns the fph partition of the CPU we're running on. + * Must be called from code that has preemption disabled. + */ #define ia64_is_local_fpu_owner(t) \ ({ \ struct task_struct *__ia64_islfo_task = (t); \ @@ -411,7 +414,10 @@ extern void ia64_setreg_unknown_kr (void); && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \ }) -/* Mark task T as owning the fph partition of the CPU we're running on. */ +/* + * Mark task T as owning the fph partition of the CPU we're running on. + * Must be called from code that has preemption disabled. + */ #define ia64_set_local_fpu_owner(t) do { \ struct task_struct *__ia64_slfo_task = (t); \ __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \ -- 2.39.5