]> err.no Git - linux-2.6/commitdiff
[IA64] remove staled comments in asm/system.h
authorChen, Kenneth W <kenneth.w.chen@intel.com>
Fri, 27 Jan 2006 02:24:59 +0000 (18:24 -0800)
committerTony Luck <tony.luck@intel.com>
Thu, 2 Feb 2006 21:20:42 +0000 (13:20 -0800)
With the recent optimization made to wrap_mmu_context function,
we don't hold tasklist_lock anymore when wrapping context id.
The comments in asm/system.h must fall through the crack earlier.
Remove staled comments.

I believe it is still beneficial to unlock the runqueue lock
across context switch. So leave __ARCH_WANT_UNLOCKED_CTXSW on.

Signed-off-by: Ken Chen <kenneth.w.chen@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
include/asm-ia64/system.h

index 80c5a234e2599c90006de76e27b47f49b6197ade..06253871562303b223a9cbfef4ba017ed2f44f32 100644 (file)
@@ -249,32 +249,7 @@ extern void ia64_load_extra (struct task_struct *task);
 # define switch_to(prev,next,last)     __switch_to(prev, next, last)
 #endif
 
-/*
- * On IA-64, we don't want to hold the runqueue's lock during the low-level context-switch,
- * because that could cause a deadlock.  Here is an example by Erich Focht:
- *
- * Example:
- * CPU#0:
- * schedule()
- *    -> spin_lock_irq(&rq->lock)
- *    -> context_switch()
- *       -> wrap_mmu_context()
- *          -> read_lock(&tasklist_lock)
- *
- * CPU#1:
- * sys_wait4() or release_task() or forget_original_parent()
- *    -> write_lock(&tasklist_lock)
- *    -> do_notify_parent()
- *       -> wake_up_parent()
- *          -> try_to_wake_up()
- *             -> spin_lock_irq(&parent_rq->lock)
- *
- * If the parent's rq happens to be on CPU#0, we'll wait for the rq->lock
- * of that CPU which will not be released, because there we wait for the
- * tasklist_lock to become available.
- */
 #define __ARCH_WANT_UNLOCKED_CTXSW
-
 #define ARCH_HAS_PREFETCH_SWITCH_STACK
 #define ia64_platform_is(x) (strcmp(x, platform_name) == 0)