]> err.no Git - linux-2.6/blobdiff - kernel/sched.c
[PATCH] Documentation: remount_fs() needs lock_kernel
[linux-2.6] / kernel / sched.c
index fec97e4e196d039e3055572844bdfb4394ee6514..343e1794233e8054708de25d83f1a3a628cb1d18 100644 (file)
@@ -34,7 +34,7 @@
 #include <linux/security.h>
 #include <linux/notifier.h>
 #include <linux/profile.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/vmalloc.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #define TASK_PREEMPTS_CURR(p, rq) \
        ((p)->prio < (rq)->curr->prio)
 
-/*
- * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
- * to time slice values: [800ms ... 100ms ... 5ms]
- *
- * The higher a thread's priority, the bigger timeslices
- * it gets during one round of execution. But even the lowest
- * priority thread gets MIN_TIMESLICE worth of execution time.
- */
-
 #define SCALE_PRIO(x, prio) \
        max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
 
@@ -180,6 +171,15 @@ static unsigned int static_prio_timeslice(int static_prio)
                return SCALE_PRIO(DEF_TIMESLICE, static_prio);
 }
 
+/*
+ * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
+ * to time slice values: [800ms ... 100ms ... 5ms]
+ *
+ * The higher a thread's priority, the bigger timeslices
+ * it gets during one round of execution. But even the lowest
+ * priority thread gets MIN_TIMESLICE worth of execution time.
+ */
+
 static inline unsigned int task_timeslice(struct task_struct *p)
 {
        return static_prio_timeslice(p->static_prio);
@@ -1822,14 +1822,14 @@ context_switch(struct rq *rq, struct task_struct *prev,
        struct mm_struct *mm = next->mm;
        struct mm_struct *oldmm = prev->active_mm;
 
-       if (unlikely(!mm)) {
+       if (!mm) {
                next->active_mm = oldmm;
                atomic_inc(&oldmm->mm_count);
                enter_lazy_tlb(oldmm, next);
        } else
                switch_mm(oldmm, mm, next);
 
-       if (unlikely(!prev->mm)) {
+       if (!prev->mm) {
                prev->active_mm = NULL;
                WARN_ON(rq->prev_mm);
                rq->prev_mm = oldmm;
@@ -3333,6 +3333,7 @@ asmlinkage void __sched schedule(void)
                printk(KERN_ERR "BUG: scheduling while atomic: "
                        "%s/0x%08x/%d\n",
                        current->comm, preempt_count(), current->pid);
+               debug_show_held_locks(current);
                dump_stack();
        }
        profile_hit(SCHED_PROFILING, __builtin_return_address(0));
@@ -3491,7 +3492,7 @@ asmlinkage void __sched preempt_schedule(void)
         * If there is a non-zero preempt_count or interrupts are disabled,
         * we do not want to preempt the current task.  Just return..
         */
-       if (unlikely(ti->preempt_count || irqs_disabled()))
+       if (likely(ti->preempt_count || irqs_disabled()))
                return;
 
 need_resched:
@@ -4804,7 +4805,7 @@ static void show_task(struct task_struct *p)
                show_stack(p, NULL);
 }
 
-void show_state(void)
+void show_state_filter(unsigned long state_filter)
 {
        struct task_struct *g, *p;
 
@@ -4824,11 +4825,16 @@ void show_state(void)
                 * console might take alot of time:
                 */
                touch_nmi_watchdog();
-               show_task(p);
+               if (p->state & state_filter)
+                       show_task(p);
        } while_each_thread(g, p);
 
        read_unlock(&tasklist_lock);
-       debug_show_all_locks();
+       /*
+        * Only show locks if all tasks are dumped:
+        */
+       if (state_filter == -1)
+               debug_show_all_locks();
 }
 
 /**
@@ -6349,9 +6355,10 @@ static int build_sched_domains(const cpumask_t *cpu_map)
                                > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
                        if (!sched_group_allnodes) {
                                sched_group_allnodes
-                                       = kmalloc(sizeof(struct sched_group)
-                                                       * MAX_NUMNODES,
-                                                 GFP_KERNEL);
+                                       = kmalloc_node(sizeof(struct sched_group)
+                                                       * MAX_NUMNODES,
+                                                 GFP_KERNEL,
+                                                 cpu_to_node(i));
                                if (!sched_group_allnodes) {
                                        printk(KERN_WARNING
                                        "Can not alloc allnodes sched group\n");
@@ -6866,6 +6873,7 @@ void __might_sleep(char *file, int line)
                                " context at %s:%d\n", file, line);
                printk("in_atomic():%d, irqs_disabled():%d\n",
                        in_atomic(), irqs_disabled());
+               debug_show_held_locks(current);
                dump_stack();
        }
 #endif