#define _ASM_X86_SYSTEM_H_
#include <asm/asm.h>
+#include <asm/segment.h>
+#include <asm/cpufeature.h>
+#include <asm/cmpxchg.h>
+#include <asm/nops.h>
#include <linux/kernel.h>
+#include <linux/irqflags.h>
+
+/* entries in ARCH_DLINFO: */
+#ifdef CONFIG_IA32_EMULATION
+# define AT_VECTOR_SIZE_ARCH 2
+#else
+# define AT_VECTOR_SIZE_ARCH 1
+#endif
#ifdef CONFIG_X86_32
-#define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */
struct task_struct; /* one of the stranger aspects of C forward declarations */
-extern struct task_struct *FASTCALL(__switch_to(struct task_struct *prev,
- struct task_struct *next));
+struct task_struct *__switch_to(struct task_struct *prev,
+ struct task_struct *next);
/*
* Saving eflags is important. It switches not only IOPL between tasks,
"2" (prev), "d" (next)); \
} while (0)
-# include "system_32.h"
+/*
+ * disable hlt during certain critical i/o operations
+ */
+#define HAVE_DISABLE_HLT
#else
#define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
#define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
/* Save restore flags to clear handle leaking NT */
#define switch_to(prev, next, last) \
- asm volatile(SAVE_CONTEXT \
+ asm volatile(SAVE_CONTEXT \
"movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
"movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
"call __switch_to\n\t" \
[thread_info] "i" (offsetof(struct task_struct, stack)), \
[pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
: "memory", "cc" __EXTRA_CLOBBER)
-# include "system_64.h"
#endif
#ifdef __KERNEL__
"movl %k1, %%" #seg "\n\t" \
"jmp 2b\n" \
".previous\n" \
- ".section __ex_table,\"a\"\n\t" \
- _ASM_ALIGN "\n\t" \
- _ASM_PTR " 1b,3b\n" \
- ".previous" \
+ _ASM_EXTABLE(1b,3b) \
: :"r" (value), "r" (0))
/* This could fault if %cr4 does not exist. In x86_64, a cr4 always
* exists, so it will never fail. */
#ifdef CONFIG_X86_32
- asm volatile("1: mov %%cr4, %0 \n"
- "2: \n"
- ".section __ex_table,\"a\" \n"
- ".long 1b,2b \n"
- ".previous \n"
- : "=r" (val), "=m" (__force_order) : "0" (0));
+ asm volatile("1: mov %%cr4, %0\n"
+ "2:\n"
+ _ASM_EXTABLE(1b,2b)
+ : "=r" (val), "=m" (__force_order) : "0" (0));
#else
val = native_read_cr4();
#endif
asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order));
}
+#ifdef CONFIG_X86_64
+static inline unsigned long native_read_cr8(void)
+{
+ unsigned long cr8;
+ asm volatile("movq %%cr8,%0" : "=r" (cr8));
+ return cr8;
+}
+
+static inline void native_write_cr8(unsigned long val)
+{
+ asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
+}
+#endif
+
static inline void native_wbinvd(void)
{
asm volatile("wbinvd": : :"memory");
#define read_cr4_safe() (native_read_cr4_safe())
#define write_cr4(x) (native_write_cr4(x))
#define wbinvd() (native_wbinvd())
+#ifdef CONFIG_X86_64
+#define read_cr8() (native_read_cr8())
+#define write_cr8(x) (native_write_cr8(x))
+#endif
/* Clear the 'TS' bit */
#define clts() (native_clts())
#endif /* __KERNEL__ */
-static inline void clflush(void *__p)
+static inline void clflush(volatile void *__p)
{
- asm volatile("clflush %0" : "+m" (*(char __force *)__p));
+ asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
}
#define nop() __asm__ __volatile__ ("nop")
*/
#ifdef CONFIG_X86_32
/*
- * For now, "wmb()" doesn't actually do anything, as all
- * Intel CPU's follow what Intel calls a *Processor Order*,
- * in which all writes are seen in the program order even
- * outside the CPU.
- *
- * I expect future Intel CPU's to have a weaker ordering,
- * but I'd also expect them to finally get their act together
- * and add some real memory barriers if so.
- *
- * Some non intel clones support out of order store. wmb() ceases to be a
+ * Some non-Intel clones support out of order store. wmb() ceases to be a
* nop for these.
*/
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif
+/*
+ * Stop RDTSC speculation. This is needed when you need to use RDTSC
+ * (or get_cycles or vread that possibly accesses the TSC) in a defined
+ * code region.
+ *
+ * (Could use an alternative three way for this if there was one.)
+ */
+static inline void rdtsc_barrier(void)
+{
+ alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
+ alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
+}
#endif