]> err.no Git - linux-2.6/blob - include/asm-i386/paravirt.h
[PATCH] i386: PARAVIRT: revert map_pt_hook.
[linux-2.6] / include / asm-i386 / paravirt.h
1 #ifndef __ASM_PARAVIRT_H
2 #define __ASM_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4  * para-virtualization: those hooks are defined here. */
5
6 #ifdef CONFIG_PARAVIRT
7 #include <asm/page.h>
8
9 /* Bitmask of what can be clobbered: usually at least eax. */
10 #define CLBR_NONE 0x0
11 #define CLBR_EAX 0x1
12 #define CLBR_ECX 0x2
13 #define CLBR_EDX 0x4
14 #define CLBR_ANY 0x7
15
16 #ifndef __ASSEMBLY__
17 #include <linux/types.h>
18 #include <linux/cpumask.h>
19
20 struct thread_struct;
21 struct Xgt_desc_struct;
22 struct tss_struct;
23 struct mm_struct;
24 struct desc_struct;
25
26 /* Lazy mode for batching updates / context switch */
27 enum paravirt_lazy_mode {
28         PARAVIRT_LAZY_NONE = 0,
29         PARAVIRT_LAZY_MMU = 1,
30         PARAVIRT_LAZY_CPU = 2,
31 };
32
33 struct paravirt_ops
34 {
35         unsigned int kernel_rpl;
36         int shared_kernel_pmd;
37         int paravirt_enabled;
38         const char *name;
39
40         /*
41          * Patch may replace one of the defined code sequences with arbitrary
42          * code, subject to the same register constraints.  This generally
43          * means the code is not free to clobber any registers other than EAX.
44          * The patch function should return the number of bytes of code
45          * generated, as we nop pad the rest in generic code.
46          */
47         unsigned (*patch)(u8 type, u16 clobber, void *firstinsn, unsigned len);
48
49         /* Basic arch-specific setup */
50         void (*arch_setup)(void);
51         char *(*memory_setup)(void);
52         void (*init_IRQ)(void);
53         void (*time_init)(void);
54
55         /*
56          * Called before/after init_mm pagetable setup. setup_start
57          * may reset %cr3, and may pre-install parts of the pagetable;
58          * pagetable setup is expected to preserve any existing
59          * mapping.
60          */
61         void (*pagetable_setup_start)(pgd_t *pgd_base);
62         void (*pagetable_setup_done)(pgd_t *pgd_base);
63
64         /* Print a banner to identify the environment */
65         void (*banner)(void);
66
67         /* Set and set time of day */
68         unsigned long (*get_wallclock)(void);
69         int (*set_wallclock)(unsigned long);
70
71         /* cpuid emulation, mostly so that caps bits can be disabled */
72         void (*cpuid)(unsigned int *eax, unsigned int *ebx,
73                       unsigned int *ecx, unsigned int *edx);
74
75         /* hooks for various privileged instructions */
76         unsigned long (*get_debugreg)(int regno);
77         void (*set_debugreg)(int regno, unsigned long value);
78
79         void (*clts)(void);
80
81         unsigned long (*read_cr0)(void);
82         void (*write_cr0)(unsigned long);
83
84         unsigned long (*read_cr2)(void);
85         void (*write_cr2)(unsigned long);
86
87         unsigned long (*read_cr3)(void);
88         void (*write_cr3)(unsigned long);
89
90         unsigned long (*read_cr4_safe)(void);
91         unsigned long (*read_cr4)(void);
92         void (*write_cr4)(unsigned long);
93
94         /*
95          * Get/set interrupt state.  save_fl and restore_fl are only
96          * expected to use X86_EFLAGS_IF; all other bits
97          * returned from save_fl are undefined, and may be ignored by
98          * restore_fl.
99          */
100         unsigned long (*save_fl)(void);
101         void (*restore_fl)(unsigned long);
102         void (*irq_disable)(void);
103         void (*irq_enable)(void);
104         void (*safe_halt)(void);
105         void (*halt)(void);
106
107         void (*wbinvd)(void);
108
109         /* MSR, PMC and TSR operations.
110            err = 0/-EFAULT.  wrmsr returns 0/-EFAULT. */
111         u64 (*read_msr)(unsigned int msr, int *err);
112         int (*write_msr)(unsigned int msr, u64 val);
113
114         u64 (*read_tsc)(void);
115         u64 (*read_pmc)(void);
116         u64 (*get_scheduled_cycles)(void);
117         unsigned long (*get_cpu_khz)(void);
118
119         /* Segment descriptor handling */
120         void (*load_tr_desc)(void);
121         void (*load_gdt)(const struct Xgt_desc_struct *);
122         void (*load_idt)(const struct Xgt_desc_struct *);
123         void (*store_gdt)(struct Xgt_desc_struct *);
124         void (*store_idt)(struct Xgt_desc_struct *);
125         void (*set_ldt)(const void *desc, unsigned entries);
126         unsigned long (*store_tr)(void);
127         void (*load_tls)(struct thread_struct *t, unsigned int cpu);
128         void (*write_ldt_entry)(struct desc_struct *,
129                                 int entrynum, u32 low, u32 high);
130         void (*write_gdt_entry)(struct desc_struct *,
131                                 int entrynum, u32 low, u32 high);
132         void (*write_idt_entry)(struct desc_struct *,
133                                 int entrynum, u32 low, u32 high);
134         void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t);
135
136         void (*set_iopl_mask)(unsigned mask);
137         void (*io_delay)(void);
138
139         /*
140          * Hooks for intercepting the creation/use/destruction of an
141          * mm_struct.
142          */
143         void (*activate_mm)(struct mm_struct *prev,
144                             struct mm_struct *next);
145         void (*dup_mmap)(struct mm_struct *oldmm,
146                          struct mm_struct *mm);
147         void (*exit_mmap)(struct mm_struct *mm);
148
149 #ifdef CONFIG_X86_LOCAL_APIC
150         /*
151          * Direct APIC operations, principally for VMI.  Ideally
152          * these shouldn't be in this interface.
153          */
154         void (*apic_write)(unsigned long reg, unsigned long v);
155         void (*apic_write_atomic)(unsigned long reg, unsigned long v);
156         unsigned long (*apic_read)(unsigned long reg);
157         void (*setup_boot_clock)(void);
158         void (*setup_secondary_clock)(void);
159
160         void (*startup_ipi_hook)(int phys_apicid,
161                                  unsigned long start_eip,
162                                  unsigned long start_esp);
163 #endif
164
165         /* TLB operations */
166         void (*flush_tlb_user)(void);
167         void (*flush_tlb_kernel)(void);
168         void (*flush_tlb_single)(unsigned long addr);
169         void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
170                                  unsigned long va);
171
172         /* Hooks for allocating/releasing pagetable pages */
173         void (*alloc_pt)(u32 pfn);
174         void (*alloc_pd)(u32 pfn);
175         void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
176         void (*release_pt)(u32 pfn);
177         void (*release_pd)(u32 pfn);
178
179         /* Pagetable manipulation functions */
180         void (*set_pte)(pte_t *ptep, pte_t pteval);
181         void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
182                            pte_t *ptep, pte_t pteval);
183         void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
184         void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
185         void (*pte_update_defer)(struct mm_struct *mm,
186                                  unsigned long addr, pte_t *ptep);
187
188         pte_t (*ptep_get_and_clear)(pte_t *ptep);
189
190 #ifdef CONFIG_X86_PAE
191         void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
192         void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte);
193         void (*set_pud)(pud_t *pudp, pud_t pudval);
194         void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
195         void (*pmd_clear)(pmd_t *pmdp);
196
197         unsigned long long (*pte_val)(pte_t);
198         unsigned long long (*pmd_val)(pmd_t);
199         unsigned long long (*pgd_val)(pgd_t);
200
201         pte_t (*make_pte)(unsigned long long pte);
202         pmd_t (*make_pmd)(unsigned long long pmd);
203         pgd_t (*make_pgd)(unsigned long long pgd);
204 #else
205         unsigned long (*pte_val)(pte_t);
206         unsigned long (*pgd_val)(pgd_t);
207
208         pte_t (*make_pte)(unsigned long pte);
209         pgd_t (*make_pgd)(unsigned long pgd);
210 #endif
211
212         /* Set deferred update mode, used for batching operations. */
213         void (*set_lazy_mode)(enum paravirt_lazy_mode mode);
214
215         /* These two are jmp to, not actually called. */
216         void (*irq_enable_sysexit)(void);
217         void (*iret)(void);
218 };
219
220 /* Mark a paravirt probe function. */
221 #define paravirt_probe(fn)                                              \
222  static asmlinkage void (*__paravirtprobe_##fn)(void) __attribute_used__ \
223                 __attribute__((__section__(".paravirtprobe"))) = fn
224
225 extern struct paravirt_ops paravirt_ops;
226
227 #define PARAVIRT_PATCH(x)                                       \
228         (offsetof(struct paravirt_ops, x) / sizeof(void *))
229
230 #define paravirt_type(type)                                     \
231         [paravirt_typenum] "i" (PARAVIRT_PATCH(type))
232 #define paravirt_clobber(clobber)               \
233         [paravirt_clobber] "i" (clobber)
234
235 /*
236  * Generate some code, and mark it as patchable by the
237  * apply_paravirt() alternate instruction patcher.
238  */
239 #define _paravirt_alt(insn_string, type, clobber)       \
240         "771:\n\t" insn_string "\n" "772:\n"            \
241         ".pushsection .parainstructions,\"a\"\n"        \
242         "  .long 771b\n"                                \
243         "  .byte " type "\n"                            \
244         "  .byte 772b-771b\n"                           \
245         "  .short " clobber "\n"                        \
246         ".popsection\n"
247
248 /* Generate patchable code, with the default asm parameters. */
249 #define paravirt_alt(insn_string)                                       \
250         _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
251
252 unsigned paravirt_patch_nop(void);
253 unsigned paravirt_patch_ignore(unsigned len);
254 unsigned paravirt_patch_call(void *target, u16 tgt_clobbers,
255                              void *site, u16 site_clobbers,
256                              unsigned len);
257 unsigned paravirt_patch_jmp(void *target, void *site, unsigned len);
258 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *site, unsigned len);
259
260 unsigned paravirt_patch_insns(void *site, unsigned len,
261                               const char *start, const char *end);
262
263
264 /*
265  * This generates an indirect call based on the operation type number.
266  * The type number, computed in PARAVIRT_PATCH, is derived from the
267  * offset into the paravirt_ops structure, and can therefore be freely
268  * converted back into a structure offset.
269  */
270 #define PARAVIRT_CALL   "call *(paravirt_ops+%c[paravirt_typenum]*4);"
271
272 /*
273  * These macros are intended to wrap calls into a paravirt_ops
274  * operation, so that they can be later identified and patched at
275  * runtime.
276  *
277  * Normally, a call to a pv_op function is a simple indirect call:
278  * (paravirt_ops.operations)(args...).
279  *
280  * Unfortunately, this is a relatively slow operation for modern CPUs,
281  * because it cannot necessarily determine what the destination
282  * address is.  In this case, the address is a runtime constant, so at
283  * the very least we can patch the call to e a simple direct call, or
284  * ideally, patch an inline implementation into the callsite.  (Direct
285  * calls are essentially free, because the call and return addresses
286  * are completely predictable.)
287  *
288  * These macros rely on the standard gcc "regparm(3)" calling
289  * convention, in which the first three arguments are placed in %eax,
290  * %edx, %ecx (in that order), and the remaining arguments are placed
291  * on the stack.  All caller-save registers (eax,edx,ecx) are expected
292  * to be modified (either clobbered or used for return values).
293  *
294  * The call instruction itself is marked by placing its start address
295  * and size into the .parainstructions section, so that
296  * apply_paravirt() in arch/i386/kernel/alternative.c can do the
297  * appropriate patching under the control of the backend paravirt_ops
298  * implementation.
299  *
300  * Unfortunately there's no way to get gcc to generate the args setup
301  * for the call, and then allow the call itself to be generated by an
302  * inline asm.  Because of this, we must do the complete arg setup and
303  * return value handling from within these macros.  This is fairly
304  * cumbersome.
305  *
306  * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
307  * It could be extended to more arguments, but there would be little
308  * to be gained from that.  For each number of arguments, there are
309  * the two VCALL and CALL variants for void and non-void functions.
310  *
311  * When there is a return value, the invoker of the macro must specify
312  * the return type.  The macro then uses sizeof() on that type to
313  * determine whether its a 32 or 64 bit value, and places the return
314  * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
315  * 64-bit).
316  *
317  * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
318  * in low,high order.
319  *
320  * Small structures are passed and returned in registers.  The macro
321  * calling convention can't directly deal with this, so the wrapper
322  * functions must do this.
323  *
324  * These PVOP_* macros are only defined within this header.  This
325  * means that all uses must be wrapped in inline functions.  This also
326  * makes sure the incoming and outgoing types are always correct.
327  */
328 #define PVOP_CALL0(__rettype, __op)                                     \
329         ({                                                              \
330                 __rettype __ret;                                        \
331                 if (sizeof(__rettype) > sizeof(unsigned long)) {        \
332                         unsigned long long __tmp;                       \
333                         unsigned long __ecx;                            \
334                         asm volatile(paravirt_alt(PARAVIRT_CALL)        \
335                                      : "=A" (__tmp), "=c" (__ecx)       \
336                                      : paravirt_type(__op),             \
337                                        paravirt_clobber(CLBR_ANY)       \
338                                      : "memory", "cc");                 \
339                         __ret = (__rettype)__tmp;                       \
340                 } else {                                                \
341                         unsigned long __tmp, __edx, __ecx;              \
342                         asm volatile(paravirt_alt(PARAVIRT_CALL)        \
343                                      : "=a" (__tmp), "=d" (__edx),      \
344                                        "=c" (__ecx)                     \
345                                      : paravirt_type(__op),             \
346                                        paravirt_clobber(CLBR_ANY)       \
347                                      : "memory", "cc");                 \
348                         __ret = (__rettype)__tmp;                       \
349                 }                                                       \
350                 __ret;                                                  \
351         })
352 #define PVOP_VCALL0(__op)                                               \
353         ({                                                              \
354                 unsigned long __eax, __edx, __ecx;                      \
355                 asm volatile(paravirt_alt(PARAVIRT_CALL)                \
356                              : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \
357                              : paravirt_type(__op),                     \
358                                paravirt_clobber(CLBR_ANY)               \
359                              : "memory", "cc");                         \
360         })
361
362 #define PVOP_CALL1(__rettype, __op, arg1)                               \
363         ({                                                              \
364                 __rettype __ret;                                        \
365                 if (sizeof(__rettype) > sizeof(unsigned long)) {        \
366                         unsigned long long __tmp;                       \
367                         unsigned long __ecx;                            \
368                         asm volatile(paravirt_alt(PARAVIRT_CALL)        \
369                                      : "=A" (__tmp), "=c" (__ecx)       \
370                                      : "a" ((u32)(arg1)),               \
371                                        paravirt_type(__op),             \
372                                        paravirt_clobber(CLBR_ANY)       \
373                                      : "memory", "cc");                 \
374                         __ret = (__rettype)__tmp;                       \
375                 } else {                                                \
376                         unsigned long __tmp, __edx, __ecx;              \
377                         asm volatile(paravirt_alt(PARAVIRT_CALL)        \
378                                      : "=a" (__tmp), "=d" (__edx),      \
379                                        "=c" (__ecx)                     \
380                                      : "0" ((u32)(arg1)),               \
381                                        paravirt_type(__op),             \
382                                        paravirt_clobber(CLBR_ANY)       \
383                                      : "memory", "cc");                 \
384                         __ret = (__rettype)__tmp;                       \
385                 }                                                       \
386                 __ret;                                                  \
387         })
388 #define PVOP_VCALL1(__op, arg1)                                         \
389         ({                                                              \
390                 unsigned long __eax, __edx, __ecx;                      \
391                 asm volatile(paravirt_alt(PARAVIRT_CALL)                \
392                              : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \
393                              : "0" ((u32)(arg1)),                       \
394                                paravirt_type(__op),                     \
395                                paravirt_clobber(CLBR_ANY)               \
396                              : "memory", "cc");                         \
397         })
398
399 #define PVOP_CALL2(__rettype, __op, arg1, arg2)                         \
400         ({                                                              \
401                 __rettype __ret;                                        \
402                 if (sizeof(__rettype) > sizeof(unsigned long)) {        \
403                         unsigned long long __tmp;                       \
404                         unsigned long __ecx;                            \
405                         asm volatile(paravirt_alt(PARAVIRT_CALL)        \
406                                      : "=A" (__tmp), "=c" (__ecx)       \
407                                      : "a" ((u32)(arg1)),               \
408                                        "d" ((u32)(arg2)),               \
409                                        paravirt_type(__op),             \
410                                        paravirt_clobber(CLBR_ANY)       \
411                                      : "memory", "cc");                 \
412                         __ret = (__rettype)__tmp;                       \
413                 } else {                                                \
414                         unsigned long __tmp, __edx, __ecx;              \
415                         asm volatile(paravirt_alt(PARAVIRT_CALL)        \
416                                      : "=a" (__tmp), "=d" (__edx),      \
417                                        "=c" (__ecx)                     \
418                                      : "0" ((u32)(arg1)),               \
419                                        "1" ((u32)(arg2)),               \
420                                        paravirt_type(__op),             \
421                                        paravirt_clobber(CLBR_ANY)       \
422                                      : "memory", "cc");                 \
423                         __ret = (__rettype)__tmp;                       \
424                 }                                                       \
425                 __ret;                                                  \
426         })
427 #define PVOP_VCALL2(__op, arg1, arg2)                                   \
428         ({                                                              \
429                 unsigned long __eax, __edx, __ecx;                      \
430                 asm volatile(paravirt_alt(PARAVIRT_CALL)                \
431                              : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \
432                              : "0" ((u32)(arg1)),                       \
433                                "1" ((u32)(arg2)),                       \
434                                paravirt_type(__op),                     \
435                                paravirt_clobber(CLBR_ANY)               \
436                              : "memory", "cc");                         \
437         })
438
439 #define PVOP_CALL3(__rettype, __op, arg1, arg2, arg3)                   \
440         ({                                                              \
441                 __rettype __ret;                                        \
442                 if (sizeof(__rettype) > sizeof(unsigned long)) {        \
443                         unsigned long long __tmp;                       \
444                         unsigned long __ecx;                            \
445                         asm volatile(paravirt_alt(PARAVIRT_CALL)        \
446                                      : "=A" (__tmp), "=c" (__ecx)       \
447                                      : "a" ((u32)(arg1)),               \
448                                        "d" ((u32)(arg2)),               \
449                                        "1" ((u32)(arg3)),               \
450                                        paravirt_type(__op),             \
451                                        paravirt_clobber(CLBR_ANY)       \
452                                      : "memory", "cc");                 \
453                         __ret = (__rettype)__tmp;                       \
454                 } else {                                                \
455                         unsigned long __tmp, __edx, __ecx;      \
456                         asm volatile(paravirt_alt(PARAVIRT_CALL)        \
457                                      : "=a" (__tmp), "=d" (__edx),      \
458                                        "=c" (__ecx)                     \
459                                      : "0" ((u32)(arg1)),               \
460                                        "1" ((u32)(arg2)),               \
461                                        "2" ((u32)(arg3)),               \
462                                        paravirt_type(__op),             \
463                                        paravirt_clobber(CLBR_ANY)       \
464                                      : "memory", "cc");                 \
465                         __ret = (__rettype)__tmp;                       \
466                 }                                                       \
467                 __ret;                                                  \
468         })
469 #define PVOP_VCALL3(__op, arg1, arg2, arg3)                             \
470         ({                                                              \
471                 unsigned long __eax, __edx, __ecx;                      \
472                 asm volatile(paravirt_alt(PARAVIRT_CALL)                \
473                              : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \
474                              : "0" ((u32)(arg1)),                       \
475                                "1" ((u32)(arg2)),                       \
476                                "2" ((u32)(arg3)),                       \
477                                paravirt_type(__op),                     \
478                                paravirt_clobber(CLBR_ANY)               \
479                              : "memory", "cc");                         \
480         })
481
482 #define PVOP_CALL4(__rettype, __op, arg1, arg2, arg3, arg4)             \
483         ({                                                              \
484                 __rettype __ret;                                        \
485                 if (sizeof(__rettype) > sizeof(unsigned long)) {        \
486                         unsigned long long __tmp;                       \
487                         unsigned long __ecx;                            \
488                         asm volatile("push %[_arg4]; "                  \
489                                      paravirt_alt(PARAVIRT_CALL)        \
490                                      "lea 4(%%esp),%%esp"               \
491                                      : "=A" (__tmp), "=c" (__ecx)       \
492                                      : "a" ((u32)(arg1)),               \
493                                        "d" ((u32)(arg2)),               \
494                                        "1" ((u32)(arg3)),               \
495                                        [_arg4] "mr" ((u32)(arg4)),      \
496                                        paravirt_type(__op),             \
497                                        paravirt_clobber(CLBR_ANY)       \
498                                      : "memory", "cc",);                \
499                         __ret = (__rettype)__tmp;                       \
500                 } else {                                                \
501                         unsigned long __tmp, __edx, __ecx;              \
502                         asm volatile("push %[_arg4]; "                  \
503                                      paravirt_alt(PARAVIRT_CALL)        \
504                                      "lea 4(%%esp),%%esp"               \
505                                      : "=a" (__tmp), "=d" (__edx), "=c" (__ecx) \
506                                      : "0" ((u32)(arg1)),               \
507                                        "1" ((u32)(arg2)),               \
508                                        "2" ((u32)(arg3)),               \
509                                        [_arg4]"mr" ((u32)(arg4)),       \
510                                        paravirt_type(__op),             \
511                                        paravirt_clobber(CLBR_ANY)       \
512                                      : "memory", "cc");                 \
513                         __ret = (__rettype)__tmp;                       \
514                 }                                                       \
515                 __ret;                                                  \
516         })
517 #define PVOP_VCALL4(__op, arg1, arg2, arg3, arg4)                       \
518         ({                                                              \
519                 unsigned long __eax, __edx, __ecx;                      \
520                 asm volatile("push %[_arg4]; "                          \
521                              paravirt_alt(PARAVIRT_CALL)                \
522                              "lea 4(%%esp),%%esp"                       \
523                              : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \
524                              : "0" ((u32)(arg1)),                       \
525                                "1" ((u32)(arg2)),                       \
526                                "2" ((u32)(arg3)),                       \
527                                [_arg4]"mr" ((u32)(arg4)),               \
528                                paravirt_type(__op),                     \
529                                paravirt_clobber(CLBR_ANY)               \
530                              : "memory", "cc");                         \
531         })
532
533 static inline int paravirt_enabled(void)
534 {
535         return paravirt_ops.paravirt_enabled;
536 }
537
538 static inline void load_esp0(struct tss_struct *tss,
539                              struct thread_struct *thread)
540 {
541         PVOP_VCALL2(load_esp0, tss, thread);
542 }
543
544 #define ARCH_SETUP                      paravirt_ops.arch_setup();
545 static inline unsigned long get_wallclock(void)
546 {
547         return PVOP_CALL0(unsigned long, get_wallclock);
548 }
549
550 static inline int set_wallclock(unsigned long nowtime)
551 {
552         return PVOP_CALL1(int, set_wallclock, nowtime);
553 }
554
555 static inline void (*choose_time_init(void))(void)
556 {
557         return paravirt_ops.time_init;
558 }
559
560 /* The paravirtualized CPUID instruction. */
561 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
562                            unsigned int *ecx, unsigned int *edx)
563 {
564         PVOP_VCALL4(cpuid, eax, ebx, ecx, edx);
565 }
566
567 /*
568  * These special macros can be used to get or set a debugging register
569  */
570 static inline unsigned long paravirt_get_debugreg(int reg)
571 {
572         return PVOP_CALL1(unsigned long, get_debugreg, reg);
573 }
574 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
575 static inline void set_debugreg(unsigned long val, int reg)
576 {
577         PVOP_VCALL2(set_debugreg, reg, val);
578 }
579
580 static inline void clts(void)
581 {
582         PVOP_VCALL0(clts);
583 }
584
585 static inline unsigned long read_cr0(void)
586 {
587         return PVOP_CALL0(unsigned long, read_cr0);
588 }
589
590 static inline void write_cr0(unsigned long x)
591 {
592         PVOP_VCALL1(write_cr0, x);
593 }
594
595 static inline unsigned long read_cr2(void)
596 {
597         return PVOP_CALL0(unsigned long, read_cr2);
598 }
599
600 static inline void write_cr2(unsigned long x)
601 {
602         PVOP_VCALL1(write_cr2, x);
603 }
604
605 static inline unsigned long read_cr3(void)
606 {
607         return PVOP_CALL0(unsigned long, read_cr3);
608 }
609
610 static inline void write_cr3(unsigned long x)
611 {
612         PVOP_VCALL1(write_cr3, x);
613 }
614
615 static inline unsigned long read_cr4(void)
616 {
617         return PVOP_CALL0(unsigned long, read_cr4);
618 }
619 static inline unsigned long read_cr4_safe(void)
620 {
621         return PVOP_CALL0(unsigned long, read_cr4_safe);
622 }
623
624 static inline void write_cr4(unsigned long x)
625 {
626         PVOP_VCALL1(write_cr4, x);
627 }
628
629 static inline void raw_safe_halt(void)
630 {
631         PVOP_VCALL0(safe_halt);
632 }
633
634 static inline void halt(void)
635 {
636         PVOP_VCALL0(safe_halt);
637 }
638
639 static inline void wbinvd(void)
640 {
641         PVOP_VCALL0(wbinvd);
642 }
643
644 #define get_kernel_rpl()  (paravirt_ops.kernel_rpl)
645
646 static inline u64 paravirt_read_msr(unsigned msr, int *err)
647 {
648         return PVOP_CALL2(u64, read_msr, msr, err);
649 }
650 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
651 {
652         return PVOP_CALL3(int, write_msr, msr, low, high);
653 }
654
655 /* These should all do BUG_ON(_err), but our headers are too tangled. */
656 #define rdmsr(msr,val1,val2) do {               \
657         int _err;                               \
658         u64 _l = paravirt_read_msr(msr, &_err); \
659         val1 = (u32)_l;                         \
660         val2 = _l >> 32;                        \
661 } while(0)
662
663 #define wrmsr(msr,val1,val2) do {               \
664         paravirt_write_msr(msr, val1, val2);    \
665 } while(0)
666
667 #define rdmsrl(msr,val) do {                    \
668         int _err;                               \
669         val = paravirt_read_msr(msr, &_err);    \
670 } while(0)
671
672 #define wrmsrl(msr,val)         ((void)paravirt_write_msr(msr, val, 0))
673 #define wrmsr_safe(msr,a,b)     paravirt_write_msr(msr, a, b)
674
675 /* rdmsr with exception handling */
676 #define rdmsr_safe(msr,a,b) ({                  \
677         int _err;                               \
678         u64 _l = paravirt_read_msr(msr, &_err); \
679         (*a) = (u32)_l;                         \
680         (*b) = _l >> 32;                        \
681         _err; })
682
683
684 static inline u64 paravirt_read_tsc(void)
685 {
686         return PVOP_CALL0(u64, read_tsc);
687 }
688 #define rdtsc(low,high) do {                    \
689         u64 _l = paravirt_read_tsc();           \
690         low = (u32)_l;                          \
691         high = _l >> 32;                        \
692 } while(0)
693
694 #define rdtscl(low) do {                        \
695         u64 _l = paravirt_read_tsc();           \
696         low = (int)_l;                          \
697 } while(0)
698
699 #define rdtscll(val) (val = paravirt_read_tsc())
700
701 #define get_scheduled_cycles(val) (val = paravirt_ops.get_scheduled_cycles())
702 #define calculate_cpu_khz() (paravirt_ops.get_cpu_khz())
703
704 #define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
705
706 static inline unsigned long long paravirt_read_pmc(int counter)
707 {
708         return PVOP_CALL1(u64, read_pmc, counter);
709 }
710
711 #define rdpmc(counter,low,high) do {            \
712         u64 _l = paravirt_read_pmc(counter);    \
713         low = (u32)_l;                          \
714         high = _l >> 32;                        \
715 } while(0)
716
717 static inline void load_TR_desc(void)
718 {
719         PVOP_VCALL0(load_tr_desc);
720 }
721 static inline void load_gdt(const struct Xgt_desc_struct *dtr)
722 {
723         PVOP_VCALL1(load_gdt, dtr);
724 }
725 static inline void load_idt(const struct Xgt_desc_struct *dtr)
726 {
727         PVOP_VCALL1(load_idt, dtr);
728 }
729 static inline void set_ldt(const void *addr, unsigned entries)
730 {
731         PVOP_VCALL2(set_ldt, addr, entries);
732 }
733 static inline void store_gdt(struct Xgt_desc_struct *dtr)
734 {
735         PVOP_VCALL1(store_gdt, dtr);
736 }
737 static inline void store_idt(struct Xgt_desc_struct *dtr)
738 {
739         PVOP_VCALL1(store_idt, dtr);
740 }
741 static inline unsigned long paravirt_store_tr(void)
742 {
743         return PVOP_CALL0(unsigned long, store_tr);
744 }
745 #define store_tr(tr)    ((tr) = paravirt_store_tr())
746 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
747 {
748         PVOP_VCALL2(load_tls, t, cpu);
749 }
750 static inline void write_ldt_entry(void *dt, int entry, u32 low, u32 high)
751 {
752         PVOP_VCALL4(write_ldt_entry, dt, entry, low, high);
753 }
754 static inline void write_gdt_entry(void *dt, int entry, u32 low, u32 high)
755 {
756         PVOP_VCALL4(write_gdt_entry, dt, entry, low, high);
757 }
758 static inline void write_idt_entry(void *dt, int entry, u32 low, u32 high)
759 {
760         PVOP_VCALL4(write_idt_entry, dt, entry, low, high);
761 }
762 static inline void set_iopl_mask(unsigned mask)
763 {
764         PVOP_VCALL1(set_iopl_mask, mask);
765 }
766
767 /* The paravirtualized I/O functions */
768 static inline void slow_down_io(void) {
769         paravirt_ops.io_delay();
770 #ifdef REALLY_SLOW_IO
771         paravirt_ops.io_delay();
772         paravirt_ops.io_delay();
773         paravirt_ops.io_delay();
774 #endif
775 }
776
777 #ifdef CONFIG_X86_LOCAL_APIC
778 /*
779  * Basic functions accessing APICs.
780  */
781 static inline void apic_write(unsigned long reg, unsigned long v)
782 {
783         PVOP_VCALL2(apic_write, reg, v);
784 }
785
786 static inline void apic_write_atomic(unsigned long reg, unsigned long v)
787 {
788         PVOP_VCALL2(apic_write_atomic, reg, v);
789 }
790
791 static inline unsigned long apic_read(unsigned long reg)
792 {
793         return PVOP_CALL1(unsigned long, apic_read, reg);
794 }
795
796 static inline void setup_boot_clock(void)
797 {
798         PVOP_VCALL0(setup_boot_clock);
799 }
800
801 static inline void setup_secondary_clock(void)
802 {
803         PVOP_VCALL0(setup_secondary_clock);
804 }
805 #endif
806
807 static inline void paravirt_pagetable_setup_start(pgd_t *base)
808 {
809         if (paravirt_ops.pagetable_setup_start)
810                 (*paravirt_ops.pagetable_setup_start)(base);
811 }
812
813 static inline void paravirt_pagetable_setup_done(pgd_t *base)
814 {
815         if (paravirt_ops.pagetable_setup_done)
816                 (*paravirt_ops.pagetable_setup_done)(base);
817 }
818
819 #ifdef CONFIG_SMP
820 static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
821                                     unsigned long start_esp)
822 {
823         PVOP_VCALL3(startup_ipi_hook, phys_apicid, start_eip, start_esp);
824 }
825 #endif
826
827 static inline void paravirt_activate_mm(struct mm_struct *prev,
828                                         struct mm_struct *next)
829 {
830         PVOP_VCALL2(activate_mm, prev, next);
831 }
832
833 static inline void arch_dup_mmap(struct mm_struct *oldmm,
834                                  struct mm_struct *mm)
835 {
836         PVOP_VCALL2(dup_mmap, oldmm, mm);
837 }
838
839 static inline void arch_exit_mmap(struct mm_struct *mm)
840 {
841         PVOP_VCALL1(exit_mmap, mm);
842 }
843
844 static inline void __flush_tlb(void)
845 {
846         PVOP_VCALL0(flush_tlb_user);
847 }
848 static inline void __flush_tlb_global(void)
849 {
850         PVOP_VCALL0(flush_tlb_kernel);
851 }
852 static inline void __flush_tlb_single(unsigned long addr)
853 {
854         PVOP_VCALL1(flush_tlb_single, addr);
855 }
856
857 static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
858                                     unsigned long va)
859 {
860         PVOP_VCALL3(flush_tlb_others, &cpumask, mm, va);
861 }
862
863 static inline void paravirt_alloc_pt(unsigned pfn)
864 {
865         PVOP_VCALL1(alloc_pt, pfn);
866 }
867 static inline void paravirt_release_pt(unsigned pfn)
868 {
869         PVOP_VCALL1(release_pt, pfn);
870 }
871
872 static inline void paravirt_alloc_pd(unsigned pfn)
873 {
874         PVOP_VCALL1(alloc_pd, pfn);
875 }
876
877 static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn,
878                                            unsigned start, unsigned count)
879 {
880         PVOP_VCALL4(alloc_pd_clone, pfn, clonepfn, start, count);
881 }
882 static inline void paravirt_release_pd(unsigned pfn)
883 {
884         PVOP_VCALL1(release_pd, pfn);
885 }
886
887 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
888                               pte_t *ptep)
889 {
890         PVOP_VCALL3(pte_update, mm, addr, ptep);
891 }
892
893 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
894                                     pte_t *ptep)
895 {
896         PVOP_VCALL3(pte_update_defer, mm, addr, ptep);
897 }
898
899 #ifdef CONFIG_X86_PAE
900 static inline pte_t __pte(unsigned long long val)
901 {
902         unsigned long long ret = PVOP_CALL2(unsigned long long, make_pte,
903                                             val, val >> 32);
904         return (pte_t) { ret, ret >> 32 };
905 }
906
907 static inline pmd_t __pmd(unsigned long long val)
908 {
909         return (pmd_t) { PVOP_CALL2(unsigned long long, make_pmd, val, val >> 32) };
910 }
911
912 static inline pgd_t __pgd(unsigned long long val)
913 {
914         return (pgd_t) { PVOP_CALL2(unsigned long long, make_pgd, val, val >> 32) };
915 }
916
917 static inline unsigned long long pte_val(pte_t x)
918 {
919         return PVOP_CALL2(unsigned long long, pte_val, x.pte_low, x.pte_high);
920 }
921
922 static inline unsigned long long pmd_val(pmd_t x)
923 {
924         return PVOP_CALL2(unsigned long long, pmd_val, x.pmd, x.pmd >> 32);
925 }
926
927 static inline unsigned long long pgd_val(pgd_t x)
928 {
929         return PVOP_CALL2(unsigned long long, pgd_val, x.pgd, x.pgd >> 32);
930 }
931
932 static inline void set_pte(pte_t *ptep, pte_t pteval)
933 {
934         PVOP_VCALL3(set_pte, ptep, pteval.pte_low, pteval.pte_high);
935 }
936
937 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
938                               pte_t *ptep, pte_t pteval)
939 {
940         /* 5 arg words */
941         paravirt_ops.set_pte_at(mm, addr, ptep, pteval);
942 }
943
944 static inline void set_pte_atomic(pte_t *ptep, pte_t pteval)
945 {
946         PVOP_VCALL3(set_pte_atomic, ptep, pteval.pte_low, pteval.pte_high);
947 }
948
949 static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
950                                    pte_t *ptep, pte_t pte)
951 {
952         /* 5 arg words */
953         paravirt_ops.set_pte_present(mm, addr, ptep, pte);
954 }
955
956 static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
957 {
958         PVOP_VCALL3(set_pmd, pmdp, pmdval.pmd, pmdval.pmd >> 32);
959 }
960
961 static inline void set_pud(pud_t *pudp, pud_t pudval)
962 {
963         PVOP_VCALL3(set_pud, pudp, pudval.pgd.pgd, pudval.pgd.pgd >> 32);
964 }
965
966 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
967 {
968         PVOP_VCALL3(pte_clear, mm, addr, ptep);
969 }
970
971 static inline void pmd_clear(pmd_t *pmdp)
972 {
973         PVOP_VCALL1(pmd_clear, pmdp);
974 }
975
976 static inline pte_t raw_ptep_get_and_clear(pte_t *p)
977 {
978         unsigned long long val = PVOP_CALL1(unsigned long long, ptep_get_and_clear, p);
979         return (pte_t) { val, val >> 32 };
980 }
981 #else  /* !CONFIG_X86_PAE */
982 static inline pte_t __pte(unsigned long val)
983 {
984         return (pte_t) { PVOP_CALL1(unsigned long, make_pte, val) };
985 }
986
987 static inline pgd_t __pgd(unsigned long val)
988 {
989         return (pgd_t) { PVOP_CALL1(unsigned long, make_pgd, val) };
990 }
991
992 static inline unsigned long pte_val(pte_t x)
993 {
994         return PVOP_CALL1(unsigned long, pte_val, x.pte_low);
995 }
996
997 static inline unsigned long pgd_val(pgd_t x)
998 {
999         return PVOP_CALL1(unsigned long, pgd_val, x.pgd);
1000 }
1001
1002 static inline void set_pte(pte_t *ptep, pte_t pteval)
1003 {
1004         PVOP_VCALL2(set_pte, ptep, pteval.pte_low);
1005 }
1006
1007 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1008                               pte_t *ptep, pte_t pteval)
1009 {
1010         PVOP_VCALL4(set_pte_at, mm, addr, ptep, pteval.pte_low);
1011 }
1012
1013 static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
1014 {
1015         PVOP_VCALL2(set_pmd, pmdp, pmdval.pud.pgd.pgd);
1016 }
1017
1018 static inline pte_t raw_ptep_get_and_clear(pte_t *p)
1019 {
1020         return (pte_t) { PVOP_CALL1(unsigned long, ptep_get_and_clear, p) };
1021 }
1022 #endif  /* CONFIG_X86_PAE */
1023
1024 /* Lazy mode for batching updates / context switch */
1025 #define PARAVIRT_LAZY_NONE 0
1026 #define PARAVIRT_LAZY_MMU  1
1027 #define PARAVIRT_LAZY_CPU  2
1028 #define PARAVIRT_LAZY_FLUSH 3
1029
1030 #define  __HAVE_ARCH_ENTER_LAZY_CPU_MODE
1031 static inline void arch_enter_lazy_cpu_mode(void)
1032 {
1033         PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_CPU);
1034 }
1035
1036 static inline void arch_leave_lazy_cpu_mode(void)
1037 {
1038         PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE);
1039 }
1040
1041 static inline void arch_flush_lazy_cpu_mode(void)
1042 {
1043         PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH);
1044 }
1045
1046
1047 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1048 static inline void arch_enter_lazy_mmu_mode(void)
1049 {
1050         PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_MMU);
1051 }
1052
1053 static inline void arch_leave_lazy_mmu_mode(void)
1054 {
1055         PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE);
1056 }
1057
1058 static inline void arch_flush_lazy_mmu_mode(void)
1059 {
1060         PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH);
1061 }
1062
1063 void _paravirt_nop(void);
1064 #define paravirt_nop    ((void *)_paravirt_nop)
1065
1066 /* These all sit in the .parainstructions section to tell us what to patch. */
1067 struct paravirt_patch_site {
1068         u8 *instr;              /* original instructions */
1069         u8 instrtype;           /* type of this instruction */
1070         u8 len;                 /* length of original instruction */
1071         u16 clobbers;           /* what registers you may clobber */
1072 };
1073
1074 extern struct paravirt_patch_site __parainstructions[],
1075         __parainstructions_end[];
1076
1077 static inline unsigned long __raw_local_save_flags(void)
1078 {
1079         unsigned long f;
1080
1081         asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
1082                                   PARAVIRT_CALL
1083                                   "popl %%edx; popl %%ecx")
1084                      : "=a"(f)
1085                      : paravirt_type(save_fl),
1086                        paravirt_clobber(CLBR_EAX)
1087                      : "memory", "cc");
1088         return f;
1089 }
1090
1091 static inline void raw_local_irq_restore(unsigned long f)
1092 {
1093         asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
1094                                   PARAVIRT_CALL
1095                                   "popl %%edx; popl %%ecx")
1096                      : "=a"(f)
1097                      : "0"(f),
1098                        paravirt_type(restore_fl),
1099                        paravirt_clobber(CLBR_EAX)
1100                      : "memory", "cc");
1101 }
1102
1103 static inline void raw_local_irq_disable(void)
1104 {
1105         asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
1106                                   PARAVIRT_CALL
1107                                   "popl %%edx; popl %%ecx")
1108                      :
1109                      : paravirt_type(irq_disable),
1110                        paravirt_clobber(CLBR_EAX)
1111                      : "memory", "eax", "cc");
1112 }
1113
1114 static inline void raw_local_irq_enable(void)
1115 {
1116         asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
1117                                   PARAVIRT_CALL
1118                                   "popl %%edx; popl %%ecx")
1119                      :
1120                      : paravirt_type(irq_enable),
1121                        paravirt_clobber(CLBR_EAX)
1122                      : "memory", "eax", "cc");
1123 }
1124
1125 static inline unsigned long __raw_local_irq_save(void)
1126 {
1127         unsigned long f;
1128
1129         f = __raw_local_save_flags();
1130         raw_local_irq_disable();
1131         return f;
1132 }
1133
1134 #define CLI_STRING                                                      \
1135         _paravirt_alt("pushl %%ecx; pushl %%edx;"                       \
1136                       "call *paravirt_ops+%c[paravirt_cli_type]*4;"     \
1137                       "popl %%edx; popl %%ecx",                         \
1138                       "%c[paravirt_cli_type]", "%c[paravirt_clobber]")
1139
1140 #define STI_STRING                                                      \
1141         _paravirt_alt("pushl %%ecx; pushl %%edx;"                       \
1142                       "call *paravirt_ops+%c[paravirt_sti_type]*4;"     \
1143                       "popl %%edx; popl %%ecx",                         \
1144                       "%c[paravirt_sti_type]", "%c[paravirt_clobber]")
1145
1146 #define CLI_STI_CLOBBERS , "%eax"
1147 #define CLI_STI_INPUT_ARGS                                              \
1148         ,                                                               \
1149         [paravirt_cli_type] "i" (PARAVIRT_PATCH(irq_disable)),          \
1150         [paravirt_sti_type] "i" (PARAVIRT_PATCH(irq_enable)),           \
1151         paravirt_clobber(CLBR_EAX)
1152
1153 /* Make sure as little as possible of this mess escapes. */
1154 #undef PARAVIRT_CALL
1155 #undef PVOP_VCALL0
1156 #undef PVOP_CALL0
1157 #undef PVOP_VCALL1
1158 #undef PVOP_CALL1
1159 #undef PVOP_VCALL2
1160 #undef PVOP_CALL2
1161 #undef PVOP_VCALL3
1162 #undef PVOP_CALL3
1163 #undef PVOP_VCALL4
1164 #undef PVOP_CALL4
1165
1166 #else  /* __ASSEMBLY__ */
1167
1168 #define PARA_PATCH(off) ((off) / 4)
1169
1170 #define PARA_SITE(ptype, clobbers, ops)         \
1171 771:;                                           \
1172         ops;                                    \
1173 772:;                                           \
1174         .pushsection .parainstructions,"a";     \
1175          .long 771b;                            \
1176          .byte ptype;                           \
1177          .byte 772b-771b;                       \
1178          .short clobbers;                       \
1179         .popsection
1180
1181 #define INTERRUPT_RETURN                                        \
1182         PARA_SITE(PARA_PATCH(PARAVIRT_iret), CLBR_NONE,         \
1183                   jmp *%cs:paravirt_ops+PARAVIRT_iret)
1184
1185 #define DISABLE_INTERRUPTS(clobbers)                                    \
1186         PARA_SITE(PARA_PATCH(PARAVIRT_irq_disable), clobbers,           \
1187                   pushl %eax; pushl %ecx; pushl %edx;                   \
1188                   call *%cs:paravirt_ops+PARAVIRT_irq_disable;          \
1189                   popl %edx; popl %ecx; popl %eax)                      \
1190
1191 #define ENABLE_INTERRUPTS(clobbers)                                     \
1192         PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable), clobbers,            \
1193                   pushl %eax; pushl %ecx; pushl %edx;                   \
1194                   call *%cs:paravirt_ops+PARAVIRT_irq_enable;           \
1195                   popl %edx; popl %ecx; popl %eax)
1196
1197 #define ENABLE_INTERRUPTS_SYSEXIT                                       \
1198         PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable_sysexit), CLBR_NONE,   \
1199                   jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit)
1200
1201 #define GET_CR0_INTO_EAX                        \
1202         push %ecx; push %edx;                   \
1203         call *paravirt_ops+PARAVIRT_read_cr0;   \
1204         pop %edx; pop %ecx
1205
1206 #endif /* __ASSEMBLY__ */
1207 #endif /* CONFIG_PARAVIRT */
1208 #endif  /* __ASM_PARAVIRT_H */