[threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
[ti_flags] "i" (offsetof(struct thread_info, flags)),\
[tif_fork] "i" (TIF_FORK), \
- [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
+ [thread_info] "i" (offsetof(struct task_struct, stack)), \
[pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
: "memory", "cc" __EXTRA_CLOBBER)
unsigned long cr0;
asm volatile("movq %%cr0,%0" : "=r" (cr0));
return cr0;
-}
+}
static inline void write_cr0(unsigned long val)
{
asm volatile("movq %0,%%cr0" :: "r" (val));
-}
+}
+
+static inline unsigned long read_cr2(void)
+{
+ unsigned long cr2;
+ asm("movq %%cr2,%0" : "=r" (cr2));
+ return cr2;
+}
+
+static inline void write_cr2(unsigned long val)
+{
+ asm volatile("movq %0,%%cr2" :: "r" (val));
+}
static inline unsigned long read_cr3(void)
{
unsigned long cr3;
asm("movq %%cr3,%0" : "=r" (cr3));
return cr3;
-}
+}
static inline void write_cr3(unsigned long val)
{
unsigned long cr4;
asm("movq %%cr4,%0" : "=r" (cr4));
return cr4;
-}
+}
static inline void write_cr4(unsigned long val)
{
asm volatile("movq %0,%%cr4" :: "r" (val) : "memory");
-}
-
-#define stts() write_cr0(8 | read_cr0())
+}
-#define wbinvd() \
- __asm__ __volatile__ ("wbinvd": : :"memory");
+static inline unsigned long read_cr8(void)
+{
+ unsigned long cr8;
+ asm("movq %%cr8,%0" : "=r" (cr8));
+ return cr8;
+}
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- */
-static inline void sched_cacheflush(void)
+static inline void write_cr8(unsigned long val)
{
- wbinvd();
+ asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
}
+#define stts() write_cr0(8 | read_cr0())
+
+#define wbinvd() \
+ __asm__ __volatile__ ("wbinvd": : :"memory")
+
#endif /* __KERNEL__ */
#define nop() __asm__ __volatile__ ("nop")