]> err.no Git - linux-2.6/blobdiff - include/asm-mips/system.h
Merge branch 'upstream-fixes' of git://lost.foo-projects.org/~ahkok/git/netdev-2...
[linux-2.6] / include / asm-mips / system.h
index 330c4e497af38d248d3ff5e6a8ff584040ffd6f1..261f71d16a074f4dd4b3ab2fc2d42ba2c583d2e9 100644 (file)
@@ -155,15 +155,57 @@ extern asmlinkage void *resume(void *last, void *next, void *next_ti);
 
 struct task_struct;
 
+#ifdef CONFIG_MIPS_MT_FPAFF
+
+/*
+ * Handle the scheduler resume end of FPU affinity management.  We do this
+ * inline to try to keep the overhead down. If we have been forced to run on
+ * a "CPU" with an FPU because of a previous high level of FP computation,
+ * but did not actually use the FPU during the most recent time-slice (CU1
+ * isn't set), we undo the restriction on cpus_allowed.
+ *
+ * We're not calling set_cpus_allowed() here, because we have no need to
+ * force prompt migration - we're already switching the current CPU to a
+ * different thread.
+ */
+
 #define switch_to(prev,next,last)                                      \
 do {                                                                   \
+       if (cpu_has_fpu &&                                              \
+           (prev->thread.mflags & MF_FPUBOUND) &&                      \
+            (!(KSTK_STATUS(prev) & ST0_CU1))) {                        \
+               prev->thread.mflags &= ~MF_FPUBOUND;                    \
+               prev->cpus_allowed = prev->thread.user_cpus_allowed;    \
+       }                                                               \
        if (cpu_has_dsp)                                                \
                __save_dsp(prev);                                       \
+       next->thread.emulated_fp = 0;                                   \
        (last) = resume(prev, next, next->thread_info);                 \
        if (cpu_has_dsp)                                                \
                __restore_dsp(current);                                 \
 } while(0)
 
+#else
+#define switch_to(prev,next,last)                                      \
+do {                                                                   \
+       if (cpu_has_dsp)                                                \
+               __save_dsp(prev);                                       \
+       (last) = resume(prev, next, task_thread_info(next));            \
+       if (cpu_has_dsp)                                                \
+               __restore_dsp(current);                                 \
+} while(0)
+#endif
+
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
 {
        __u32 retval;
@@ -276,10 +318,10 @@ extern void __xchg_called_with_bad_pointer(void);
 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
 {
        switch (size) {
-               case 4:
-                       return __xchg_u32(ptr, x);
-               case 8:
-                       return __xchg_u64(ptr, x);
+       case 4:
+               return __xchg_u32(ptr, x);
+       case 8:
+               return __xchg_u64(ptr, x);
        }
        __xchg_called_with_bad_pointer();
        return x;
@@ -312,7 +354,7 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
 #endif
                "2:                                                     \n"
                "       .set    pop                                     \n"
-               : "=&r" (retval), "=m" (*m)
+               : "=&r" (retval), "=R" (*m)
                : "R" (*m), "Jr" (old), "Jr" (new)
                : "memory");
        } else if (cpu_has_llsc) {
@@ -332,7 +374,7 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
 #endif
                "2:                                                     \n"
                "       .set    pop                                     \n"
-               : "=&r" (retval), "=m" (*m)
+               : "=&r" (retval), "=R" (*m)
                : "R" (*m), "Jr" (old), "Jr" (new)
                : "memory");
        } else {
@@ -369,7 +411,7 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
 #endif
                "2:                                                     \n"
                "       .set    pop                                     \n"
-               : "=&r" (retval), "=m" (*m)
+               : "=&r" (retval), "=R" (*m)
                : "R" (*m), "Jr" (old), "Jr" (new)
                : "memory");
        } else if (cpu_has_llsc) {
@@ -387,7 +429,7 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
 #endif
                "2:                                                     \n"
                "       .set    pop                                     \n"
-               : "=&r" (retval), "=m" (*m)
+               : "=&r" (retval), "=R" (*m)
                : "R" (*m), "Jr" (old), "Jr" (new)
                : "memory");
        } else {
@@ -430,8 +472,8 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
 extern void set_handler (unsigned long offset, void *addr, unsigned long len);
 extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len);
 extern void *set_vi_handler (int n, void *addr);
-extern void *set_vi_srs_handler (int n, void *addr, int regset);
 extern void *set_except_vector(int n, void *addr);
+extern unsigned long ebase;
 extern void per_cpu_trap_init(void);
 
 extern NORET_TYPE void die(const char *, struct pt_regs *);