]> err.no Git - linux-2.6/commitdiff
[MIPS] FP affinity: Coding style cleanups
authorRalf Baechle <ralf@linux-mips.org>
Tue, 10 Jul 2007 16:33:02 +0000 (17:33 +0100)
committerRalf Baechle <ralf@linux-mips.org>
Tue, 10 Jul 2007 16:33:02 +0000 (17:33 +0100)
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
arch/mips/kernel/mips-mt.c
arch/mips/kernel/traps.c
include/asm-mips/system.h

index ba01800b601874d4a224c2752d8682dd1c606853..b1b994dd41dbc1d0a722152ea1f8f0b4cfc894c9 100644 (file)
@@ -109,7 +109,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
        read_unlock(&tasklist_lock);
 
        /* Compute new global allowed CPU set if necessary */
-       if(p->thread.mflags & MF_FPUBOUND)
+       if ((p->thread.mflags & MF_FPUBOUND)
        && cpus_intersects(new_mask, mt_fpu_cpumask)) {
                cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
                retval = set_cpus_allowed(p, effective_mask);
@@ -195,27 +195,31 @@ void mips_mt_regdump(unsigned long mvpctl)
        nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
        ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
        printk("-- per-VPE State --\n");
-       for(i = 0; i < nvpe; i++) {
-           for(tc = 0; tc < ntc; tc++) {
+       for (i = 0; i < nvpe; i++) {
+               for (tc = 0; tc < ntc; tc++) {
                        settc(tc);
-               if((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
-                   printk("  VPE %d\n", i);
-                   printk("   VPEControl : %08lx\n", read_vpe_c0_vpecontrol());
-                   printk("   VPEConf0 : %08lx\n", read_vpe_c0_vpeconf0());
-                   printk("   VPE%d.Status : %08lx\n",
-                               i, read_vpe_c0_status());
-                   printk("   VPE%d.EPC : %08lx\n", i, read_vpe_c0_epc());
-                   printk("   VPE%d.Cause : %08lx\n", i, read_vpe_c0_cause());
-                   printk("   VPE%d.Config7 : %08lx\n",
-                               i, read_vpe_c0_config7());
-                   break; /* Next VPE */
+                       if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
+                               printk("  VPE %d\n", i);
+                               printk("   VPEControl : %08lx\n",
+                                      read_vpe_c0_vpecontrol());
+                               printk("   VPEConf0 : %08lx\n",
+                                      read_vpe_c0_vpeconf0());
+                               printk("   VPE%d.Status : %08lx\n",
+                                      i, read_vpe_c0_status());
+                               printk("   VPE%d.EPC : %08lx\n",
+                                      i, read_vpe_c0_epc());
+                               printk("   VPE%d.Cause : %08lx\n",
+                                      i, read_vpe_c0_cause());
+                               printk("   VPE%d.Config7 : %08lx\n",
+                                      i, read_vpe_c0_config7());
+                               break; /* Next VPE */
+                       }
                }
-           }
        }
        printk("-- per-TC State --\n");
-       for(tc = 0; tc < ntc; tc++) {
+       for (tc = 0; tc < ntc; tc++) {
                settc(tc);
-               if(read_tc_c0_tcbind() == read_c0_tcbind()) {
+               if (read_tc_c0_tcbind() == read_c0_tcbind()) {
                        /* Are we dumping ourself?  */
                        haltval = 0; /* Then we're not halted, and mustn't be */
                        tcstatval = flags; /* And pre-dump TCStatus is flags */
@@ -384,7 +388,7 @@ void mips_mt_set_cpuoptions(void)
                mt_fpemul_threshold = fpaff_threshold;
        } else {
                mt_fpemul_threshold =
-                       (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
+                       (FPUSEFACTOR * (loops_per_jiffy / (500000 / HZ))) / HZ;
        }
        printk("FPU Affinity set after %ld emulations\n",
                        mt_fpemul_threshold);
index 7e9cb5b1b4a7e489ae69790b9230ec293d3bc532..c598e890a880103ea0af04f5f4f7b85221d1f55f 100644 (file)
@@ -752,6 +752,33 @@ asmlinkage void do_ri(struct pt_regs *regs)
        force_sig(SIGILL, current);
 }
 
+/*
+ * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
+ * emulated more than some threshold number of instructions, force migration to
+ * a "CPU" that has FP support.
+ */
+static void mt_ase_fp_affinity(void)
+{
+#ifdef CONFIG_MIPS_MT_FPAFF
+       if (mt_fpemul_threshold > 0 &&
+            ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
+               /*
+                * If there's no FPU present, or if the application has already
+                * restricted the allowed set to exclude any CPUs with FPUs,
+                * we'll skip the procedure.
+                */
+               if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
+                       cpumask_t tmask;
+
+                       cpus_and(tmask, current->thread.user_cpus_allowed,
+                                mt_fpu_cpumask);
+                       set_cpus_allowed(current, tmask);
+                       current->thread.mflags |= MF_FPUBOUND;
+               }
+       }
+#endif /* CONFIG_MIPS_MT_FPAFF */
+}
+
 asmlinkage void do_cpu(struct pt_regs *regs)
 {
        unsigned int cpid;
@@ -785,36 +812,8 @@ asmlinkage void do_cpu(struct pt_regs *regs)
                                                &current->thread.fpu, 0);
                        if (sig)
                                force_sig(sig, current);
-#ifdef CONFIG_MIPS_MT_FPAFF
-                       else {
-                       /*
-                        * MIPS MT processors may have fewer FPU contexts
-                        * than CPU threads. If we've emulated more than
-                        * some threshold number of instructions, force
-                        * migration to a "CPU" that has FP support.
-                        */
-                        if(mt_fpemul_threshold > 0
-                        && ((current->thread.emulated_fp++
-                           > mt_fpemul_threshold))) {
-                         /*
-                          * If there's no FPU present, or if the
-                          * application has already restricted
-                          * the allowed set to exclude any CPUs
-                          * with FPUs, we'll skip the procedure.
-                          */
-                         if (cpus_intersects(current->cpus_allowed,
-                                               mt_fpu_cpumask)) {
-                           cpumask_t tmask;
-
-                           cpus_and(tmask,
-                                       current->thread.user_cpus_allowed,
-                                       mt_fpu_cpumask);
-                           set_cpus_allowed(current, tmask);
-                           current->thread.mflags |= MF_FPUBOUND;
-                         }
-                        }
-                       }
-#endif /* CONFIG_MIPS_MT_FPAFF */
+                       else
+                               mt_ase_fp_affinity();
                }
 
                return;
index bb0b289dbc9ef25ac7f211b620309ab66bfbe76b..9b3a8dd2c3db5ec0c1d2bc1520057ccff3dd6e75 100644 (file)
@@ -44,7 +44,7 @@ struct task_struct;
  * different thread.
  */
 
-#define switch_to(prev,next,last)                                      \
+#define __mips_mt_fpaff_switch_to(prev)                                        \
 do {                                                                   \
        if (cpu_has_fpu &&                                              \
            (prev->thread.mflags & MF_FPUBOUND) &&                      \
@@ -52,24 +52,22 @@ do {                                                                        \
                prev->thread.mflags &= ~MF_FPUBOUND;                    \
                prev->cpus_allowed = prev->thread.user_cpus_allowed;    \
        }                                                               \
-       if (cpu_has_dsp)                                                \
-               __save_dsp(prev);                                       \
        next->thread.emulated_fp = 0;                                   \
-       (last) = resume(prev, next, task_thread_info(next));            \
-       if (cpu_has_dsp)                                                \
-               __restore_dsp(current);                                 \
 } while(0)
 
 #else
+#define __mips_mt_fpaff_switch_to(prev) do { (prev); } while (0)
+#endif
+
 #define switch_to(prev,next,last)                                      \
 do {                                                                   \
+       __mips_mt_fpaff_switch_to(prev);                                \
        if (cpu_has_dsp)                                                \
                __save_dsp(prev);                                       \
        (last) = resume(prev, next, task_thread_info(next));            \
        if (cpu_has_dsp)                                                \
                __restore_dsp(current);                                 \
 } while(0)
-#endif
 
 /*
  * On SMP systems, when the scheduler does migration-cost autodetection,