]> err.no Git - linux-2.6/commitdiff
[PATCH] sched: likely profiling
authorNick Piggin <npiggin@suse.de>
Wed, 11 Oct 2006 08:21:52 +0000 (01:21 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Wed, 11 Oct 2006 18:14:22 +0000 (11:14 -0700)
This likely profiling is pretty fun. I found a few possible problems
in sched.c.

This patch may be not measurable, but when I did measure long ago,
nooping (un)likely cost a couple of % on scheduler heavy benchmarks, so
it all adds up.

Tweak some branch hints:

- the 2nd 64 bits in the bitmask is likely to be populated, because it
  contains the first 28 bits (nearly 3/4) of the normal priorities.
  (ratio of 669669:691 ~= 1000:1).

- it isn't unlikely that context switching switches to another process. it
  might be very rapidly switching to and from the idle process (ratio of
  475815:419004 and 471330:423544). Let the branch predictor decide.

- preempt_enable seems to be very often called in a nested preempt_disable
  or with interrupts disabled (ratio of 3567760:87965 ~= 40:1)

Signed-off-by: Nick Piggin <npiggin@suse.de>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: Daniel Walker <dwalker@mvista.com>
Cc: Hua Zhong <hzhong@gmail.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
include/asm-generic/bitops/sched.h
kernel/sched.c

index 5ef93a4d009fd6e43e506b5b3e9febc0e27ecaba..815bb01480601f7ca1a96e81113fb030df89062d 100644 (file)
@@ -15,7 +15,7 @@ static inline int sched_find_first_bit(const unsigned long *b)
 #if BITS_PER_LONG == 64
        if (unlikely(b[0]))
                return __ffs(b[0]);
-       if (unlikely(b[1]))
+       if (likely(b[1]))
                return __ffs(b[1]) + 64;
        return __ffs(b[2]) + 128;
 #elif BITS_PER_LONG == 32
index 53608a59d6e3c0fd3d0b18dbf919beb9b0125397..094b5687eef6da6864ae30bb1ce4d936a5552151 100644 (file)
@@ -1822,14 +1822,14 @@ context_switch(struct rq *rq, struct task_struct *prev,
        struct mm_struct *mm = next->mm;
        struct mm_struct *oldmm = prev->active_mm;
 
-       if (unlikely(!mm)) {
+       if (!mm) {
                next->active_mm = oldmm;
                atomic_inc(&oldmm->mm_count);
                enter_lazy_tlb(oldmm, next);
        } else
                switch_mm(oldmm, mm, next);
 
-       if (unlikely(!prev->mm)) {
+       if (!prev->mm) {
                prev->active_mm = NULL;
                WARN_ON(rq->prev_mm);
                rq->prev_mm = oldmm;
@@ -3491,7 +3491,7 @@ asmlinkage void __sched preempt_schedule(void)
         * If there is a non-zero preempt_count or interrupts are disabled,
         * we do not want to preempt the current task.  Just return..
         */
-       if (unlikely(ti->preempt_count || irqs_disabled()))
+       if (likely(ti->preempt_count || irqs_disabled()))
                return;
 
 need_resched: