]> err.no Git - linux-2.6/commitdiff
[PATCH] Clean up and minor fixes to TLB flush
authorAndi Kleen <ak@suse.de>
Tue, 26 Sep 2006 08:52:29 +0000 (10:52 +0200)
committerAndi Kleen <andi@basil.nowhere.org>
Tue, 26 Sep 2006 08:52:29 +0000 (10:52 +0200)
- Convert CR* accesses to dedicated inline functions and rewrite
the rest as C inlines
- Don't do a double flush for global flushes (pointed out by Zach Amsden)
This was a bug workaround for old CPUs that don't do 64bit and is obsolete.
- Add a proper memory clobber to invlpg
- Remove an unused extern

Signed-off-by: Andi Kleen <ak@suse.de>
include/asm-x86_64/pgtable.h
include/asm-x86_64/tlbflush.h

index a31ab4e68a9bbb4755757210383ed818220ca107..0c1e2422400a8d7dd713d983e451e72d2db158cd 100644 (file)
@@ -25,8 +25,6 @@ extern int nonx_setup(char *str);
 extern void paging_init(void);
 extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
 
-extern unsigned long pgkern_mask;
-
 /*
  * ZERO_PAGE is a global shared page that is always zero: used
  * for zero-mapped memory areas etc..
index d16d5b60f419b4680d20bd14bb7b93bba8f74308..983bd296c81a304281530c972f6f38c4b1384d95 100644 (file)
@@ -4,44 +4,44 @@
 #include <linux/mm.h>
 #include <asm/processor.h>
 
-#define __flush_tlb()                                                  \
-       do {                                                            \
-               unsigned long tmpreg;                                   \
-                                                                       \
-               __asm__ __volatile__(                                   \
-                       "movq %%cr3, %0;  # flush TLB \n"               \
-                       "movq %0, %%cr3;              \n"               \
-                       : "=r" (tmpreg)                                 \
-                       :: "memory");                                   \
-       } while (0)
+static inline unsigned long get_cr3(void)
+{
+       unsigned long cr3;
+       asm volatile("mov %%cr3,%0" : "=r" (cr3));
+       return cr3;
+}
 
-/*
- * Global pages have to be flushed a bit differently. Not a real
- * performance problem because this does not happen often.
- */
-#define __flush_tlb_global()                                           \
-       do {                                                            \
-               unsigned long tmpreg, cr4, cr4_orig;                    \
-                                                                       \
-               __asm__ __volatile__(                                   \
-                       "movq %%cr4, %2;  # turn off PGE     \n"        \
-                       "movq %2, %1;                        \n"        \
-                       "andq %3, %1;                        \n"        \
-                       "movq %1, %%cr4;                     \n"        \
-                       "movq %%cr3, %0;  # flush TLB        \n"        \
-                       "movq %0, %%cr3;                     \n"        \
-                       "movq %2, %%cr4;  # turn PGE back on \n"        \
-                       : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \
-                       : "i" (~X86_CR4_PGE)                            \
-                       : "memory");                                    \
-       } while (0)
-
-extern unsigned long pgkern_mask;
-
-#define __flush_tlb_all() __flush_tlb_global()
+static inline void set_cr3(unsigned long cr3)
+{
+       asm volatile("mov %0,%%cr3" :: "r" (cr3) : "memory");
+}
+
+static inline void __flush_tlb(void)
+{
+       set_cr3(get_cr3());
+}
+
+static inline unsigned long get_cr4(void)
+{
+       unsigned long cr4;
+       asm volatile("mov %%cr4,%0" : "=r" (cr4));
+       return cr4;
+}
+
+static inline void set_cr4(unsigned long cr4)
+{
+       asm volatile("mov %0,%%cr4" :: "r" (cr4) : "memory");
+}
+
+static inline void __flush_tlb_all(void)
+{
+       unsigned long cr4 = get_cr4();
+       set_cr4(cr4 & ~X86_CR4_PGE);    /* clear PGE */
+       set_cr4(cr4);                   /* write old PGE again and flush TLBs */
+}
 
 #define __flush_tlb_one(addr) \
-       __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
+       __asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory")
 
 
 /*