]> err.no Git - linux-2.6/blobdiff - kernel/posix-cpu-timers.c
[PATCH] hrtimer: export deinlined mktime
[linux-2.6] / kernel / posix-cpu-timers.c
index bf374fceb39c3718315a73eed65cbd0e960a5e7f..4c68edff900b17bbe46e504c78b12c1231dbf3de 100644 (file)
@@ -36,7 +36,7 @@ timespec_to_sample(clockid_t which_clock, const struct timespec *tp)
        union cpu_time_count ret;
        ret.sched = 0;          /* high half always zero when .cpu used */
        if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
-               ret.sched = tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
+               ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
        } else {
                ret.cpu = timespec_to_cputime(tp);
        }
@@ -238,18 +238,7 @@ static int cpu_clock_sample_group_locked(unsigned int clock_idx,
                while ((t = next_thread(t)) != p) {
                        cpu->sched += t->sched_time;
                }
-               if (p->tgid == current->tgid) {
-                       /*
-                        * We're sampling ourselves, so include the
-                        * cycles not yet banked.  We still omit
-                        * other threads running on other CPUs,
-                        * so the total can always be behind as
-                        * much as max(nthreads-1,ncpus) * (NSEC_PER_SEC/HZ).
-                        */
-                       cpu->sched += current_sched_time(current);
-               } else {
-                       cpu->sched += p->sched_time;
-               }
+               cpu->sched += sched_ns(p);
                break;
        }
        return 0;
@@ -497,7 +486,7 @@ static void process_timer_rebalance(struct task_struct *p,
                left = cputime_div(cputime_sub(expires.cpu, val.cpu),
                                   nthreads);
                do {
-                       if (!unlikely(t->flags & PF_EXITING)) {
+                       if (likely(!(t->flags & PF_EXITING))) {
                                ticks = cputime_add(prof_ticks(t), left);
                                if (cputime_eq(t->it_prof_expires,
                                               cputime_zero) ||
@@ -512,7 +501,7 @@ static void process_timer_rebalance(struct task_struct *p,
                left = cputime_div(cputime_sub(expires.cpu, val.cpu),
                                   nthreads);
                do {
-                       if (!unlikely(t->flags & PF_EXITING)) {
+                       if (likely(!(t->flags & PF_EXITING))) {
                                ticks = cputime_add(virt_ticks(t), left);
                                if (cputime_eq(t->it_virt_expires,
                                               cputime_zero) ||
@@ -527,7 +516,7 @@ static void process_timer_rebalance(struct task_struct *p,
                nsleft = expires.sched - val.sched;
                do_div(nsleft, nthreads);
                do {
-                       if (!unlikely(t->flags & PF_EXITING)) {
+                       if (likely(!(t->flags & PF_EXITING))) {
                                ns = t->sched_time + nsleft;
                                if (t->it_sched_expires == 0 ||
                                    t->it_sched_expires > ns) {
@@ -1225,7 +1214,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
                /*
                 * The task was cleaned up already, no future firings.
                 */
-               return;
+               goto out;
 
        /*
         * Fetch the current sample and update the timer's expiry time.
@@ -1235,7 +1224,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
                bump_cpu_timer(timer, now);
                if (unlikely(p->exit_state)) {
                        clear_dead_task(timer, now);
-                       return;
+                       goto out;
                }
                read_lock(&tasklist_lock); /* arm_timer needs it.  */
        } else {
@@ -1248,8 +1237,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
                        put_task_struct(p);
                        timer->it.cpu.task = p = NULL;
                        timer->it.cpu.expires.sched = 0;
-                       read_unlock(&tasklist_lock);
-                       return;
+                       goto out_unlock;
                } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
                        /*
                         * We've noticed that the thread is dead, but
@@ -1257,8 +1245,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
                         * drop our task ref.
                         */
                        clear_dead_task(timer, now);
-                       read_unlock(&tasklist_lock);
-                       return;
+                       goto out_unlock;
                }
                cpu_clock_sample_group(timer->it_clock, p, &now);
                bump_cpu_timer(timer, now);
@@ -1270,7 +1257,13 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
         */
        arm_timer(timer, now);
 
+out_unlock:
        read_unlock(&tasklist_lock);
+
+out:
+       timer->it_overrun_last = timer->it_overrun;
+       timer->it_overrun = -1;
+       ++timer->it_requeue_pending;
 }
 
 /*