]> err.no Git - linux-2.6/blob - drivers/kvm/vmx.c
KVM: VMX: Import some constants of vmcs from IA32 SDM
[linux-2.6] / drivers / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  *
9  * Authors:
10  *   Avi Kivity   <avi@qumranet.com>
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  *
16  */
17
18 #include "kvm.h"
19 #include "vmx.h"
20 #include "segment_descriptor.h"
21
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/highmem.h>
26 #include <linux/profile.h>
27 #include <linux/sched.h>
28
29 #include <asm/io.h>
30 #include <asm/desc.h>
31
32 MODULE_AUTHOR("Qumranet");
33 MODULE_LICENSE("GPL");
34
35 static int init_rmode_tss(struct kvm *kvm);
36
37 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
38 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
39
40 static struct page *vmx_io_bitmap_a;
41 static struct page *vmx_io_bitmap_b;
42
43 #ifdef CONFIG_X86_64
44 #define HOST_IS_64 1
45 #else
46 #define HOST_IS_64 0
47 #endif
48 #define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE)
49
50 static struct vmcs_descriptor {
51         int size;
52         int order;
53         u32 revision_id;
54 } vmcs_descriptor;
55
56 #define VMX_SEGMENT_FIELD(seg)                                  \
57         [VCPU_SREG_##seg] = {                                   \
58                 .selector = GUEST_##seg##_SELECTOR,             \
59                 .base = GUEST_##seg##_BASE,                     \
60                 .limit = GUEST_##seg##_LIMIT,                   \
61                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
62         }
63
64 static struct kvm_vmx_segment_field {
65         unsigned selector;
66         unsigned base;
67         unsigned limit;
68         unsigned ar_bytes;
69 } kvm_vmx_segment_fields[] = {
70         VMX_SEGMENT_FIELD(CS),
71         VMX_SEGMENT_FIELD(DS),
72         VMX_SEGMENT_FIELD(ES),
73         VMX_SEGMENT_FIELD(FS),
74         VMX_SEGMENT_FIELD(GS),
75         VMX_SEGMENT_FIELD(SS),
76         VMX_SEGMENT_FIELD(TR),
77         VMX_SEGMENT_FIELD(LDTR),
78 };
79
80 /*
81  * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
82  * away by decrementing the array size.
83  */
84 static const u32 vmx_msr_index[] = {
85 #ifdef CONFIG_X86_64
86         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
87 #endif
88         MSR_EFER, MSR_K6_STAR,
89 };
90 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
91
92 static inline u64 msr_efer_save_restore_bits(struct vmx_msr_entry msr)
93 {
94         return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
95 }
96
97 static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu)
98 {
99         int efer_offset = vcpu->msr_offset_efer;
100         return msr_efer_save_restore_bits(vcpu->host_msrs[efer_offset]) !=
101                 msr_efer_save_restore_bits(vcpu->guest_msrs[efer_offset]);
102 }
103
104 static inline int is_page_fault(u32 intr_info)
105 {
106         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
107                              INTR_INFO_VALID_MASK)) ==
108                 (INTR_TYPE_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
109 }
110
111 static inline int is_no_device(u32 intr_info)
112 {
113         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
114                              INTR_INFO_VALID_MASK)) ==
115                 (INTR_TYPE_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
116 }
117
118 static inline int is_external_interrupt(u32 intr_info)
119 {
120         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
121                 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
122 }
123
124 static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr)
125 {
126         int i;
127
128         for (i = 0; i < vcpu->nmsrs; ++i)
129                 if (vcpu->guest_msrs[i].index == msr)
130                         return i;
131         return -1;
132 }
133
134 static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
135 {
136         int i;
137
138         i = __find_msr_index(vcpu, msr);
139         if (i >= 0)
140                 return &vcpu->guest_msrs[i];
141         return NULL;
142 }
143
144 static void vmcs_clear(struct vmcs *vmcs)
145 {
146         u64 phys_addr = __pa(vmcs);
147         u8 error;
148
149         asm volatile (ASM_VMX_VMCLEAR_RAX "; setna %0"
150                       : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
151                       : "cc", "memory");
152         if (error)
153                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
154                        vmcs, phys_addr);
155 }
156
157 static void __vcpu_clear(void *arg)
158 {
159         struct kvm_vcpu *vcpu = arg;
160         int cpu = raw_smp_processor_id();
161
162         if (vcpu->cpu == cpu)
163                 vmcs_clear(vcpu->vmcs);
164         if (per_cpu(current_vmcs, cpu) == vcpu->vmcs)
165                 per_cpu(current_vmcs, cpu) = NULL;
166         rdtscll(vcpu->host_tsc);
167 }
168
169 static void vcpu_clear(struct kvm_vcpu *vcpu)
170 {
171         if (vcpu->cpu != raw_smp_processor_id() && vcpu->cpu != -1)
172                 smp_call_function_single(vcpu->cpu, __vcpu_clear, vcpu, 0, 1);
173         else
174                 __vcpu_clear(vcpu);
175         vcpu->launched = 0;
176 }
177
178 static unsigned long vmcs_readl(unsigned long field)
179 {
180         unsigned long value;
181
182         asm volatile (ASM_VMX_VMREAD_RDX_RAX
183                       : "=a"(value) : "d"(field) : "cc");
184         return value;
185 }
186
187 static u16 vmcs_read16(unsigned long field)
188 {
189         return vmcs_readl(field);
190 }
191
192 static u32 vmcs_read32(unsigned long field)
193 {
194         return vmcs_readl(field);
195 }
196
197 static u64 vmcs_read64(unsigned long field)
198 {
199 #ifdef CONFIG_X86_64
200         return vmcs_readl(field);
201 #else
202         return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
203 #endif
204 }
205
206 static noinline void vmwrite_error(unsigned long field, unsigned long value)
207 {
208         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
209                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
210         dump_stack();
211 }
212
213 static void vmcs_writel(unsigned long field, unsigned long value)
214 {
215         u8 error;
216
217         asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
218                        : "=q"(error) : "a"(value), "d"(field) : "cc" );
219         if (unlikely(error))
220                 vmwrite_error(field, value);
221 }
222
223 static void vmcs_write16(unsigned long field, u16 value)
224 {
225         vmcs_writel(field, value);
226 }
227
228 static void vmcs_write32(unsigned long field, u32 value)
229 {
230         vmcs_writel(field, value);
231 }
232
233 static void vmcs_write64(unsigned long field, u64 value)
234 {
235 #ifdef CONFIG_X86_64
236         vmcs_writel(field, value);
237 #else
238         vmcs_writel(field, value);
239         asm volatile ("");
240         vmcs_writel(field+1, value >> 32);
241 #endif
242 }
243
244 static void vmcs_clear_bits(unsigned long field, u32 mask)
245 {
246         vmcs_writel(field, vmcs_readl(field) & ~mask);
247 }
248
249 static void vmcs_set_bits(unsigned long field, u32 mask)
250 {
251         vmcs_writel(field, vmcs_readl(field) | mask);
252 }
253
254 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
255 {
256         u32 eb;
257
258         eb = 1u << PF_VECTOR;
259         if (!vcpu->fpu_active)
260                 eb |= 1u << NM_VECTOR;
261         if (vcpu->guest_debug.enabled)
262                 eb |= 1u << 1;
263         if (vcpu->rmode.active)
264                 eb = ~0;
265         vmcs_write32(EXCEPTION_BITMAP, eb);
266 }
267
268 static void reload_tss(void)
269 {
270 #ifndef CONFIG_X86_64
271
272         /*
273          * VT restores TR but not its size.  Useless.
274          */
275         struct descriptor_table gdt;
276         struct segment_descriptor *descs;
277
278         get_gdt(&gdt);
279         descs = (void *)gdt.base;
280         descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
281         load_TR_desc();
282 #endif
283 }
284
285 static void load_transition_efer(struct kvm_vcpu *vcpu)
286 {
287         u64 trans_efer;
288         int efer_offset = vcpu->msr_offset_efer;
289
290         trans_efer = vcpu->host_msrs[efer_offset].data;
291         trans_efer &= ~EFER_SAVE_RESTORE_BITS;
292         trans_efer |= msr_efer_save_restore_bits(
293                                 vcpu->guest_msrs[efer_offset]);
294         wrmsrl(MSR_EFER, trans_efer);
295         vcpu->stat.efer_reload++;
296 }
297
298 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
299 {
300         struct vmx_host_state *hs = &vcpu->vmx_host_state;
301
302         if (hs->loaded)
303                 return;
304
305         hs->loaded = 1;
306         /*
307          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
308          * allow segment selectors with cpl > 0 or ti == 1.
309          */
310         hs->ldt_sel = read_ldt();
311         hs->fs_gs_ldt_reload_needed = hs->ldt_sel;
312         hs->fs_sel = read_fs();
313         if (!(hs->fs_sel & 7))
314                 vmcs_write16(HOST_FS_SELECTOR, hs->fs_sel);
315         else {
316                 vmcs_write16(HOST_FS_SELECTOR, 0);
317                 hs->fs_gs_ldt_reload_needed = 1;
318         }
319         hs->gs_sel = read_gs();
320         if (!(hs->gs_sel & 7))
321                 vmcs_write16(HOST_GS_SELECTOR, hs->gs_sel);
322         else {
323                 vmcs_write16(HOST_GS_SELECTOR, 0);
324                 hs->fs_gs_ldt_reload_needed = 1;
325         }
326
327 #ifdef CONFIG_X86_64
328         vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
329         vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
330 #else
331         vmcs_writel(HOST_FS_BASE, segment_base(hs->fs_sel));
332         vmcs_writel(HOST_GS_BASE, segment_base(hs->gs_sel));
333 #endif
334
335 #ifdef CONFIG_X86_64
336         if (is_long_mode(vcpu)) {
337                 save_msrs(vcpu->host_msrs + vcpu->msr_offset_kernel_gs_base, 1);
338         }
339 #endif
340         load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
341         if (msr_efer_need_save_restore(vcpu))
342                 load_transition_efer(vcpu);
343 }
344
345 static void vmx_load_host_state(struct kvm_vcpu *vcpu)
346 {
347         struct vmx_host_state *hs = &vcpu->vmx_host_state;
348
349         if (!hs->loaded)
350                 return;
351
352         hs->loaded = 0;
353         if (hs->fs_gs_ldt_reload_needed) {
354                 load_ldt(hs->ldt_sel);
355                 load_fs(hs->fs_sel);
356                 /*
357                  * If we have to reload gs, we must take care to
358                  * preserve our gs base.
359                  */
360                 local_irq_disable();
361                 load_gs(hs->gs_sel);
362 #ifdef CONFIG_X86_64
363                 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
364 #endif
365                 local_irq_enable();
366
367                 reload_tss();
368         }
369         save_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
370         load_msrs(vcpu->host_msrs, vcpu->save_nmsrs);
371         if (msr_efer_need_save_restore(vcpu))
372                 load_msrs(vcpu->host_msrs + vcpu->msr_offset_efer, 1);
373 }
374
375 /*
376  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
377  * vcpu mutex is already taken.
378  */
379 static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
380 {
381         u64 phys_addr = __pa(vcpu->vmcs);
382         int cpu;
383         u64 tsc_this, delta;
384
385         cpu = get_cpu();
386
387         if (vcpu->cpu != cpu)
388                 vcpu_clear(vcpu);
389
390         if (per_cpu(current_vmcs, cpu) != vcpu->vmcs) {
391                 u8 error;
392
393                 per_cpu(current_vmcs, cpu) = vcpu->vmcs;
394                 asm volatile (ASM_VMX_VMPTRLD_RAX "; setna %0"
395                               : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
396                               : "cc");
397                 if (error)
398                         printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
399                                vcpu->vmcs, phys_addr);
400         }
401
402         if (vcpu->cpu != cpu) {
403                 struct descriptor_table dt;
404                 unsigned long sysenter_esp;
405
406                 vcpu->cpu = cpu;
407                 /*
408                  * Linux uses per-cpu TSS and GDT, so set these when switching
409                  * processors.
410                  */
411                 vmcs_writel(HOST_TR_BASE, read_tr_base()); /* 22.2.4 */
412                 get_gdt(&dt);
413                 vmcs_writel(HOST_GDTR_BASE, dt.base);   /* 22.2.4 */
414
415                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
416                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
417
418                 /*
419                  * Make sure the time stamp counter is monotonous.
420                  */
421                 rdtscll(tsc_this);
422                 delta = vcpu->host_tsc - tsc_this;
423                 vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
424         }
425 }
426
427 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
428 {
429         vmx_load_host_state(vcpu);
430         kvm_put_guest_fpu(vcpu);
431         put_cpu();
432 }
433
434 static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
435 {
436         if (vcpu->fpu_active)
437                 return;
438         vcpu->fpu_active = 1;
439         vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
440         if (vcpu->cr0 & X86_CR0_TS)
441                 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
442         update_exception_bitmap(vcpu);
443 }
444
445 static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
446 {
447         if (!vcpu->fpu_active)
448                 return;
449         vcpu->fpu_active = 0;
450         vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
451         update_exception_bitmap(vcpu);
452 }
453
454 static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
455 {
456         vcpu_clear(vcpu);
457 }
458
459 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
460 {
461         return vmcs_readl(GUEST_RFLAGS);
462 }
463
464 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
465 {
466         vmcs_writel(GUEST_RFLAGS, rflags);
467 }
468
469 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
470 {
471         unsigned long rip;
472         u32 interruptibility;
473
474         rip = vmcs_readl(GUEST_RIP);
475         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
476         vmcs_writel(GUEST_RIP, rip);
477
478         /*
479          * We emulated an instruction, so temporary interrupt blocking
480          * should be removed, if set.
481          */
482         interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
483         if (interruptibility & 3)
484                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
485                              interruptibility & ~3);
486         vcpu->interrupt_window_open = 1;
487 }
488
489 static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
490 {
491         printk(KERN_DEBUG "inject_general_protection: rip 0x%lx\n",
492                vmcs_readl(GUEST_RIP));
493         vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
494         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
495                      GP_VECTOR |
496                      INTR_TYPE_EXCEPTION |
497                      INTR_INFO_DELIEVER_CODE_MASK |
498                      INTR_INFO_VALID_MASK);
499 }
500
501 /*
502  * Swap MSR entry in host/guest MSR entry array.
503  */
504 void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
505 {
506         struct vmx_msr_entry tmp;
507         tmp = vcpu->guest_msrs[to];
508         vcpu->guest_msrs[to] = vcpu->guest_msrs[from];
509         vcpu->guest_msrs[from] = tmp;
510         tmp = vcpu->host_msrs[to];
511         vcpu->host_msrs[to] = vcpu->host_msrs[from];
512         vcpu->host_msrs[from] = tmp;
513 }
514
515 /*
516  * Set up the vmcs to automatically save and restore system
517  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
518  * mode, as fiddling with msrs is very expensive.
519  */
520 static void setup_msrs(struct kvm_vcpu *vcpu)
521 {
522         int save_nmsrs;
523
524         save_nmsrs = 0;
525 #ifdef CONFIG_X86_64
526         if (is_long_mode(vcpu)) {
527                 int index;
528
529                 index = __find_msr_index(vcpu, MSR_SYSCALL_MASK);
530                 if (index >= 0)
531                         move_msr_up(vcpu, index, save_nmsrs++);
532                 index = __find_msr_index(vcpu, MSR_LSTAR);
533                 if (index >= 0)
534                         move_msr_up(vcpu, index, save_nmsrs++);
535                 index = __find_msr_index(vcpu, MSR_CSTAR);
536                 if (index >= 0)
537                         move_msr_up(vcpu, index, save_nmsrs++);
538                 index = __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
539                 if (index >= 0)
540                         move_msr_up(vcpu, index, save_nmsrs++);
541                 /*
542                  * MSR_K6_STAR is only needed on long mode guests, and only
543                  * if efer.sce is enabled.
544                  */
545                 index = __find_msr_index(vcpu, MSR_K6_STAR);
546                 if ((index >= 0) && (vcpu->shadow_efer & EFER_SCE))
547                         move_msr_up(vcpu, index, save_nmsrs++);
548         }
549 #endif
550         vcpu->save_nmsrs = save_nmsrs;
551
552 #ifdef CONFIG_X86_64
553         vcpu->msr_offset_kernel_gs_base =
554                 __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
555 #endif
556         vcpu->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
557 }
558
559 /*
560  * reads and returns guest's timestamp counter "register"
561  * guest_tsc = host_tsc + tsc_offset    -- 21.3
562  */
563 static u64 guest_read_tsc(void)
564 {
565         u64 host_tsc, tsc_offset;
566
567         rdtscll(host_tsc);
568         tsc_offset = vmcs_read64(TSC_OFFSET);
569         return host_tsc + tsc_offset;
570 }
571
572 /*
573  * writes 'guest_tsc' into guest's timestamp counter "register"
574  * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
575  */
576 static void guest_write_tsc(u64 guest_tsc)
577 {
578         u64 host_tsc;
579
580         rdtscll(host_tsc);
581         vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
582 }
583
584 /*
585  * Reads an msr value (of 'msr_index') into 'pdata'.
586  * Returns 0 on success, non-0 otherwise.
587  * Assumes vcpu_load() was already called.
588  */
589 static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
590 {
591         u64 data;
592         struct vmx_msr_entry *msr;
593
594         if (!pdata) {
595                 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
596                 return -EINVAL;
597         }
598
599         switch (msr_index) {
600 #ifdef CONFIG_X86_64
601         case MSR_FS_BASE:
602                 data = vmcs_readl(GUEST_FS_BASE);
603                 break;
604         case MSR_GS_BASE:
605                 data = vmcs_readl(GUEST_GS_BASE);
606                 break;
607         case MSR_EFER:
608                 return kvm_get_msr_common(vcpu, msr_index, pdata);
609 #endif
610         case MSR_IA32_TIME_STAMP_COUNTER:
611                 data = guest_read_tsc();
612                 break;
613         case MSR_IA32_SYSENTER_CS:
614                 data = vmcs_read32(GUEST_SYSENTER_CS);
615                 break;
616         case MSR_IA32_SYSENTER_EIP:
617                 data = vmcs_readl(GUEST_SYSENTER_EIP);
618                 break;
619         case MSR_IA32_SYSENTER_ESP:
620                 data = vmcs_readl(GUEST_SYSENTER_ESP);
621                 break;
622         default:
623                 msr = find_msr_entry(vcpu, msr_index);
624                 if (msr) {
625                         data = msr->data;
626                         break;
627                 }
628                 return kvm_get_msr_common(vcpu, msr_index, pdata);
629         }
630
631         *pdata = data;
632         return 0;
633 }
634
635 /*
636  * Writes msr value into into the appropriate "register".
637  * Returns 0 on success, non-0 otherwise.
638  * Assumes vcpu_load() was already called.
639  */
640 static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
641 {
642         struct vmx_msr_entry *msr;
643         int ret = 0;
644
645         switch (msr_index) {
646 #ifdef CONFIG_X86_64
647         case MSR_EFER:
648                 ret = kvm_set_msr_common(vcpu, msr_index, data);
649                 if (vcpu->vmx_host_state.loaded)
650                         load_transition_efer(vcpu);
651                 break;
652         case MSR_FS_BASE:
653                 vmcs_writel(GUEST_FS_BASE, data);
654                 break;
655         case MSR_GS_BASE:
656                 vmcs_writel(GUEST_GS_BASE, data);
657                 break;
658 #endif
659         case MSR_IA32_SYSENTER_CS:
660                 vmcs_write32(GUEST_SYSENTER_CS, data);
661                 break;
662         case MSR_IA32_SYSENTER_EIP:
663                 vmcs_writel(GUEST_SYSENTER_EIP, data);
664                 break;
665         case MSR_IA32_SYSENTER_ESP:
666                 vmcs_writel(GUEST_SYSENTER_ESP, data);
667                 break;
668         case MSR_IA32_TIME_STAMP_COUNTER:
669                 guest_write_tsc(data);
670                 break;
671         default:
672                 msr = find_msr_entry(vcpu, msr_index);
673                 if (msr) {
674                         msr->data = data;
675                         if (vcpu->vmx_host_state.loaded)
676                                 load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
677                         break;
678                 }
679                 ret = kvm_set_msr_common(vcpu, msr_index, data);
680         }
681
682         return ret;
683 }
684
685 /*
686  * Sync the rsp and rip registers into the vcpu structure.  This allows
687  * registers to be accessed by indexing vcpu->regs.
688  */
689 static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu)
690 {
691         vcpu->regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
692         vcpu->rip = vmcs_readl(GUEST_RIP);
693 }
694
695 /*
696  * Syncs rsp and rip back into the vmcs.  Should be called after possible
697  * modification.
698  */
699 static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
700 {
701         vmcs_writel(GUEST_RSP, vcpu->regs[VCPU_REGS_RSP]);
702         vmcs_writel(GUEST_RIP, vcpu->rip);
703 }
704
705 static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
706 {
707         unsigned long dr7 = 0x400;
708         int old_singlestep;
709
710         old_singlestep = vcpu->guest_debug.singlestep;
711
712         vcpu->guest_debug.enabled = dbg->enabled;
713         if (vcpu->guest_debug.enabled) {
714                 int i;
715
716                 dr7 |= 0x200;  /* exact */
717                 for (i = 0; i < 4; ++i) {
718                         if (!dbg->breakpoints[i].enabled)
719                                 continue;
720                         vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
721                         dr7 |= 2 << (i*2);    /* global enable */
722                         dr7 |= 0 << (i*4+16); /* execution breakpoint */
723                 }
724
725                 vcpu->guest_debug.singlestep = dbg->singlestep;
726         } else
727                 vcpu->guest_debug.singlestep = 0;
728
729         if (old_singlestep && !vcpu->guest_debug.singlestep) {
730                 unsigned long flags;
731
732                 flags = vmcs_readl(GUEST_RFLAGS);
733                 flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
734                 vmcs_writel(GUEST_RFLAGS, flags);
735         }
736
737         update_exception_bitmap(vcpu);
738         vmcs_writel(GUEST_DR7, dr7);
739
740         return 0;
741 }
742
743 static __init int cpu_has_kvm_support(void)
744 {
745         unsigned long ecx = cpuid_ecx(1);
746         return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
747 }
748
749 static __init int vmx_disabled_by_bios(void)
750 {
751         u64 msr;
752
753         rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
754         return (msr & (MSR_IA32_FEATURE_CONTROL_LOCKED |
755                        MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
756             == MSR_IA32_FEATURE_CONTROL_LOCKED;
757         /* locked but not enabled */
758 }
759
760 static void hardware_enable(void *garbage)
761 {
762         int cpu = raw_smp_processor_id();
763         u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
764         u64 old;
765
766         rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
767         if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED |
768                     MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
769             != (MSR_IA32_FEATURE_CONTROL_LOCKED |
770                 MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED))
771                 /* enable and lock */
772                 wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
773                        MSR_IA32_FEATURE_CONTROL_LOCKED |
774                        MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED);
775         write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
776         asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr)
777                       : "memory", "cc");
778 }
779
780 static void hardware_disable(void *garbage)
781 {
782         asm volatile (ASM_VMX_VMXOFF : : : "cc");
783 }
784
785 static __init void setup_vmcs_descriptor(void)
786 {
787         u32 vmx_msr_low, vmx_msr_high;
788
789         rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
790         vmcs_descriptor.size = vmx_msr_high & 0x1fff;
791         vmcs_descriptor.order = get_order(vmcs_descriptor.size);
792         vmcs_descriptor.revision_id = vmx_msr_low;
793 }
794
795 static struct vmcs *alloc_vmcs_cpu(int cpu)
796 {
797         int node = cpu_to_node(cpu);
798         struct page *pages;
799         struct vmcs *vmcs;
800
801         pages = alloc_pages_node(node, GFP_KERNEL, vmcs_descriptor.order);
802         if (!pages)
803                 return NULL;
804         vmcs = page_address(pages);
805         memset(vmcs, 0, vmcs_descriptor.size);
806         vmcs->revision_id = vmcs_descriptor.revision_id; /* vmcs revision id */
807         return vmcs;
808 }
809
810 static struct vmcs *alloc_vmcs(void)
811 {
812         return alloc_vmcs_cpu(raw_smp_processor_id());
813 }
814
815 static void free_vmcs(struct vmcs *vmcs)
816 {
817         free_pages((unsigned long)vmcs, vmcs_descriptor.order);
818 }
819
820 static void free_kvm_area(void)
821 {
822         int cpu;
823
824         for_each_online_cpu(cpu)
825                 free_vmcs(per_cpu(vmxarea, cpu));
826 }
827
828 extern struct vmcs *alloc_vmcs_cpu(int cpu);
829
830 static __init int alloc_kvm_area(void)
831 {
832         int cpu;
833
834         for_each_online_cpu(cpu) {
835                 struct vmcs *vmcs;
836
837                 vmcs = alloc_vmcs_cpu(cpu);
838                 if (!vmcs) {
839                         free_kvm_area();
840                         return -ENOMEM;
841                 }
842
843                 per_cpu(vmxarea, cpu) = vmcs;
844         }
845         return 0;
846 }
847
848 static __init int hardware_setup(void)
849 {
850         setup_vmcs_descriptor();
851         return alloc_kvm_area();
852 }
853
854 static __exit void hardware_unsetup(void)
855 {
856         free_kvm_area();
857 }
858
859 static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
860 {
861         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
862
863         if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
864                 vmcs_write16(sf->selector, save->selector);
865                 vmcs_writel(sf->base, save->base);
866                 vmcs_write32(sf->limit, save->limit);
867                 vmcs_write32(sf->ar_bytes, save->ar);
868         } else {
869                 u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
870                         << AR_DPL_SHIFT;
871                 vmcs_write32(sf->ar_bytes, 0x93 | dpl);
872         }
873 }
874
875 static void enter_pmode(struct kvm_vcpu *vcpu)
876 {
877         unsigned long flags;
878
879         vcpu->rmode.active = 0;
880
881         vmcs_writel(GUEST_TR_BASE, vcpu->rmode.tr.base);
882         vmcs_write32(GUEST_TR_LIMIT, vcpu->rmode.tr.limit);
883         vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar);
884
885         flags = vmcs_readl(GUEST_RFLAGS);
886         flags &= ~(IOPL_MASK | X86_EFLAGS_VM);
887         flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT);
888         vmcs_writel(GUEST_RFLAGS, flags);
889
890         vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
891                         (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
892
893         update_exception_bitmap(vcpu);
894
895         fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->rmode.es);
896         fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->rmode.ds);
897         fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->rmode.gs);
898         fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->rmode.fs);
899
900         vmcs_write16(GUEST_SS_SELECTOR, 0);
901         vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
902
903         vmcs_write16(GUEST_CS_SELECTOR,
904                      vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
905         vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
906 }
907
908 static int rmode_tss_base(struct kvm* kvm)
909 {
910         gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3;
911         return base_gfn << PAGE_SHIFT;
912 }
913
914 static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
915 {
916         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
917
918         save->selector = vmcs_read16(sf->selector);
919         save->base = vmcs_readl(sf->base);
920         save->limit = vmcs_read32(sf->limit);
921         save->ar = vmcs_read32(sf->ar_bytes);
922         vmcs_write16(sf->selector, vmcs_readl(sf->base) >> 4);
923         vmcs_write32(sf->limit, 0xffff);
924         vmcs_write32(sf->ar_bytes, 0xf3);
925 }
926
927 static void enter_rmode(struct kvm_vcpu *vcpu)
928 {
929         unsigned long flags;
930
931         vcpu->rmode.active = 1;
932
933         vcpu->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
934         vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
935
936         vcpu->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
937         vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
938
939         vcpu->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
940         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
941
942         flags = vmcs_readl(GUEST_RFLAGS);
943         vcpu->rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT;
944
945         flags |= IOPL_MASK | X86_EFLAGS_VM;
946
947         vmcs_writel(GUEST_RFLAGS, flags);
948         vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
949         update_exception_bitmap(vcpu);
950
951         vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
952         vmcs_write32(GUEST_SS_LIMIT, 0xffff);
953         vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
954
955         vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
956         vmcs_write32(GUEST_CS_LIMIT, 0xffff);
957         if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
958                 vmcs_writel(GUEST_CS_BASE, 0xf0000);
959         vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
960
961         fix_rmode_seg(VCPU_SREG_ES, &vcpu->rmode.es);
962         fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
963         fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
964         fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
965
966         init_rmode_tss(vcpu->kvm);
967 }
968
969 #ifdef CONFIG_X86_64
970
971 static void enter_lmode(struct kvm_vcpu *vcpu)
972 {
973         u32 guest_tr_ar;
974
975         guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
976         if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
977                 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
978                        __FUNCTION__);
979                 vmcs_write32(GUEST_TR_AR_BYTES,
980                              (guest_tr_ar & ~AR_TYPE_MASK)
981                              | AR_TYPE_BUSY_64_TSS);
982         }
983
984         vcpu->shadow_efer |= EFER_LMA;
985
986         find_msr_entry(vcpu, MSR_EFER)->data |= EFER_LMA | EFER_LME;
987         vmcs_write32(VM_ENTRY_CONTROLS,
988                      vmcs_read32(VM_ENTRY_CONTROLS)
989                      | VM_ENTRY_CONTROLS_IA32E_MASK);
990 }
991
992 static void exit_lmode(struct kvm_vcpu *vcpu)
993 {
994         vcpu->shadow_efer &= ~EFER_LMA;
995
996         vmcs_write32(VM_ENTRY_CONTROLS,
997                      vmcs_read32(VM_ENTRY_CONTROLS)
998                      & ~VM_ENTRY_CONTROLS_IA32E_MASK);
999 }
1000
1001 #endif
1002
1003 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1004 {
1005         vcpu->cr4 &= KVM_GUEST_CR4_MASK;
1006         vcpu->cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
1007 }
1008
1009 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1010 {
1011         vmx_fpu_deactivate(vcpu);
1012
1013         if (vcpu->rmode.active && (cr0 & X86_CR0_PE))
1014                 enter_pmode(vcpu);
1015
1016         if (!vcpu->rmode.active && !(cr0 & X86_CR0_PE))
1017                 enter_rmode(vcpu);
1018
1019 #ifdef CONFIG_X86_64
1020         if (vcpu->shadow_efer & EFER_LME) {
1021                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
1022                         enter_lmode(vcpu);
1023                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
1024                         exit_lmode(vcpu);
1025         }
1026 #endif
1027
1028         vmcs_writel(CR0_READ_SHADOW, cr0);
1029         vmcs_writel(GUEST_CR0,
1030                     (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
1031         vcpu->cr0 = cr0;
1032
1033         if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
1034                 vmx_fpu_activate(vcpu);
1035 }
1036
1037 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1038 {
1039         vmcs_writel(GUEST_CR3, cr3);
1040         if (vcpu->cr0 & X86_CR0_PE)
1041                 vmx_fpu_deactivate(vcpu);
1042 }
1043
1044 static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1045 {
1046         vmcs_writel(CR4_READ_SHADOW, cr4);
1047         vmcs_writel(GUEST_CR4, cr4 | (vcpu->rmode.active ?
1048                     KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON));
1049         vcpu->cr4 = cr4;
1050 }
1051
1052 #ifdef CONFIG_X86_64
1053
1054 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1055 {
1056         struct vmx_msr_entry *msr = find_msr_entry(vcpu, MSR_EFER);
1057
1058         vcpu->shadow_efer = efer;
1059         if (efer & EFER_LMA) {
1060                 vmcs_write32(VM_ENTRY_CONTROLS,
1061                                      vmcs_read32(VM_ENTRY_CONTROLS) |
1062                                      VM_ENTRY_CONTROLS_IA32E_MASK);
1063                 msr->data = efer;
1064
1065         } else {
1066                 vmcs_write32(VM_ENTRY_CONTROLS,
1067                                      vmcs_read32(VM_ENTRY_CONTROLS) &
1068                                      ~VM_ENTRY_CONTROLS_IA32E_MASK);
1069
1070                 msr->data = efer & ~EFER_LME;
1071         }
1072         setup_msrs(vcpu);
1073 }
1074
1075 #endif
1076
1077 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1078 {
1079         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1080
1081         return vmcs_readl(sf->base);
1082 }
1083
1084 static void vmx_get_segment(struct kvm_vcpu *vcpu,
1085                             struct kvm_segment *var, int seg)
1086 {
1087         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1088         u32 ar;
1089
1090         var->base = vmcs_readl(sf->base);
1091         var->limit = vmcs_read32(sf->limit);
1092         var->selector = vmcs_read16(sf->selector);
1093         ar = vmcs_read32(sf->ar_bytes);
1094         if (ar & AR_UNUSABLE_MASK)
1095                 ar = 0;
1096         var->type = ar & 15;
1097         var->s = (ar >> 4) & 1;
1098         var->dpl = (ar >> 5) & 3;
1099         var->present = (ar >> 7) & 1;
1100         var->avl = (ar >> 12) & 1;
1101         var->l = (ar >> 13) & 1;
1102         var->db = (ar >> 14) & 1;
1103         var->g = (ar >> 15) & 1;
1104         var->unusable = (ar >> 16) & 1;
1105 }
1106
1107 static u32 vmx_segment_access_rights(struct kvm_segment *var)
1108 {
1109         u32 ar;
1110
1111         if (var->unusable)
1112                 ar = 1 << 16;
1113         else {
1114                 ar = var->type & 15;
1115                 ar |= (var->s & 1) << 4;
1116                 ar |= (var->dpl & 3) << 5;
1117                 ar |= (var->present & 1) << 7;
1118                 ar |= (var->avl & 1) << 12;
1119                 ar |= (var->l & 1) << 13;
1120                 ar |= (var->db & 1) << 14;
1121                 ar |= (var->g & 1) << 15;
1122         }
1123         if (ar == 0) /* a 0 value means unusable */
1124                 ar = AR_UNUSABLE_MASK;
1125
1126         return ar;
1127 }
1128
1129 static void vmx_set_segment(struct kvm_vcpu *vcpu,
1130                             struct kvm_segment *var, int seg)
1131 {
1132         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1133         u32 ar;
1134
1135         if (vcpu->rmode.active && seg == VCPU_SREG_TR) {
1136                 vcpu->rmode.tr.selector = var->selector;
1137                 vcpu->rmode.tr.base = var->base;
1138                 vcpu->rmode.tr.limit = var->limit;
1139                 vcpu->rmode.tr.ar = vmx_segment_access_rights(var);
1140                 return;
1141         }
1142         vmcs_writel(sf->base, var->base);
1143         vmcs_write32(sf->limit, var->limit);
1144         vmcs_write16(sf->selector, var->selector);
1145         if (vcpu->rmode.active && var->s) {
1146                 /*
1147                  * Hack real-mode segments into vm86 compatibility.
1148                  */
1149                 if (var->base == 0xffff0000 && var->selector == 0xf000)
1150                         vmcs_writel(sf->base, 0xf0000);
1151                 ar = 0xf3;
1152         } else
1153                 ar = vmx_segment_access_rights(var);
1154         vmcs_write32(sf->ar_bytes, ar);
1155 }
1156
1157 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1158 {
1159         u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
1160
1161         *db = (ar >> 14) & 1;
1162         *l = (ar >> 13) & 1;
1163 }
1164
1165 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1166 {
1167         dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
1168         dt->base = vmcs_readl(GUEST_IDTR_BASE);
1169 }
1170
1171 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1172 {
1173         vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
1174         vmcs_writel(GUEST_IDTR_BASE, dt->base);
1175 }
1176
1177 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1178 {
1179         dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
1180         dt->base = vmcs_readl(GUEST_GDTR_BASE);
1181 }
1182
1183 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1184 {
1185         vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
1186         vmcs_writel(GUEST_GDTR_BASE, dt->base);
1187 }
1188
1189 static int init_rmode_tss(struct kvm* kvm)
1190 {
1191         struct page *p1, *p2, *p3;
1192         gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
1193         char *page;
1194
1195         p1 = gfn_to_page(kvm, fn++);
1196         p2 = gfn_to_page(kvm, fn++);
1197         p3 = gfn_to_page(kvm, fn);
1198
1199         if (!p1 || !p2 || !p3) {
1200                 kvm_printf(kvm,"%s: gfn_to_page failed\n", __FUNCTION__);
1201                 return 0;
1202         }
1203
1204         page = kmap_atomic(p1, KM_USER0);
1205         clear_page(page);
1206         *(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
1207         kunmap_atomic(page, KM_USER0);
1208
1209         page = kmap_atomic(p2, KM_USER0);
1210         clear_page(page);
1211         kunmap_atomic(page, KM_USER0);
1212
1213         page = kmap_atomic(p3, KM_USER0);
1214         clear_page(page);
1215         *(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
1216         kunmap_atomic(page, KM_USER0);
1217
1218         return 1;
1219 }
1220
1221 static void vmcs_write32_fixedbits(u32 msr, u32 vmcs_field, u32 val)
1222 {
1223         u32 msr_high, msr_low;
1224
1225         rdmsr(msr, msr_low, msr_high);
1226
1227         val &= msr_high;
1228         val |= msr_low;
1229         vmcs_write32(vmcs_field, val);
1230 }
1231
1232 static void seg_setup(int seg)
1233 {
1234         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1235
1236         vmcs_write16(sf->selector, 0);
1237         vmcs_writel(sf->base, 0);
1238         vmcs_write32(sf->limit, 0xffff);
1239         vmcs_write32(sf->ar_bytes, 0x93);
1240 }
1241
1242 /*
1243  * Sets up the vmcs for emulated real mode.
1244  */
1245 static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1246 {
1247         u32 host_sysenter_cs;
1248         u32 junk;
1249         unsigned long a;
1250         struct descriptor_table dt;
1251         int i;
1252         int ret = 0;
1253         unsigned long kvm_vmx_return;
1254
1255         if (!init_rmode_tss(vcpu->kvm)) {
1256                 ret = -ENOMEM;
1257                 goto out;
1258         }
1259
1260         memset(vcpu->regs, 0, sizeof(vcpu->regs));
1261         vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
1262         vcpu->cr8 = 0;
1263         vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
1264         if (vcpu->vcpu_id == 0)
1265                 vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
1266
1267         fx_init(vcpu);
1268
1269         /*
1270          * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
1271          * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4.  Sigh.
1272          */
1273         vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
1274         vmcs_writel(GUEST_CS_BASE, 0x000f0000);
1275         vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1276         vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1277
1278         seg_setup(VCPU_SREG_DS);
1279         seg_setup(VCPU_SREG_ES);
1280         seg_setup(VCPU_SREG_FS);
1281         seg_setup(VCPU_SREG_GS);
1282         seg_setup(VCPU_SREG_SS);
1283
1284         vmcs_write16(GUEST_TR_SELECTOR, 0);
1285         vmcs_writel(GUEST_TR_BASE, 0);
1286         vmcs_write32(GUEST_TR_LIMIT, 0xffff);
1287         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1288
1289         vmcs_write16(GUEST_LDTR_SELECTOR, 0);
1290         vmcs_writel(GUEST_LDTR_BASE, 0);
1291         vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
1292         vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
1293
1294         vmcs_write32(GUEST_SYSENTER_CS, 0);
1295         vmcs_writel(GUEST_SYSENTER_ESP, 0);
1296         vmcs_writel(GUEST_SYSENTER_EIP, 0);
1297
1298         vmcs_writel(GUEST_RFLAGS, 0x02);
1299         vmcs_writel(GUEST_RIP, 0xfff0);
1300         vmcs_writel(GUEST_RSP, 0);
1301
1302         //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0
1303         vmcs_writel(GUEST_DR7, 0x400);
1304
1305         vmcs_writel(GUEST_GDTR_BASE, 0);
1306         vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
1307
1308         vmcs_writel(GUEST_IDTR_BASE, 0);
1309         vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
1310
1311         vmcs_write32(GUEST_ACTIVITY_STATE, 0);
1312         vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
1313         vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
1314
1315         /* I/O */
1316         vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
1317         vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
1318
1319         guest_write_tsc(0);
1320
1321         vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
1322
1323         /* Special registers */
1324         vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
1325
1326         /* Control */
1327         vmcs_write32_fixedbits(MSR_IA32_VMX_PINBASED_CTLS,
1328                                PIN_BASED_VM_EXEC_CONTROL,
1329                                PIN_BASED_EXT_INTR_MASK   /* 20.6.1 */
1330                                | PIN_BASED_NMI_EXITING   /* 20.6.1 */
1331                         );
1332         vmcs_write32_fixedbits(MSR_IA32_VMX_PROCBASED_CTLS,
1333                                CPU_BASED_VM_EXEC_CONTROL,
1334                                CPU_BASED_HLT_EXITING         /* 20.6.2 */
1335                                | CPU_BASED_CR8_LOAD_EXITING    /* 20.6.2 */
1336                                | CPU_BASED_CR8_STORE_EXITING   /* 20.6.2 */
1337                                | CPU_BASED_USE_IO_BITMAPS  /* 20.6.2 */
1338                                | CPU_BASED_MOV_DR_EXITING
1339                                | CPU_BASED_USE_TSC_OFFSETING   /* 21.3 */
1340                         );
1341
1342         vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
1343         vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
1344         vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
1345
1346         vmcs_writel(HOST_CR0, read_cr0());  /* 22.2.3 */
1347         vmcs_writel(HOST_CR4, read_cr4());  /* 22.2.3, 22.2.5 */
1348         vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
1349
1350         vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
1351         vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
1352         vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
1353         vmcs_write16(HOST_FS_SELECTOR, read_fs());    /* 22.2.4 */
1354         vmcs_write16(HOST_GS_SELECTOR, read_gs());    /* 22.2.4 */
1355         vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
1356 #ifdef CONFIG_X86_64
1357         rdmsrl(MSR_FS_BASE, a);
1358         vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
1359         rdmsrl(MSR_GS_BASE, a);
1360         vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
1361 #else
1362         vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
1363         vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
1364 #endif
1365
1366         vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
1367
1368         get_idt(&dt);
1369         vmcs_writel(HOST_IDTR_BASE, dt.base);   /* 22.2.4 */
1370
1371         asm ("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
1372         vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
1373         vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
1374         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
1375         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
1376
1377         rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
1378         vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
1379         rdmsrl(MSR_IA32_SYSENTER_ESP, a);
1380         vmcs_writel(HOST_IA32_SYSENTER_ESP, a);   /* 22.2.3 */
1381         rdmsrl(MSR_IA32_SYSENTER_EIP, a);
1382         vmcs_writel(HOST_IA32_SYSENTER_EIP, a);   /* 22.2.3 */
1383
1384         for (i = 0; i < NR_VMX_MSR; ++i) {
1385                 u32 index = vmx_msr_index[i];
1386                 u32 data_low, data_high;
1387                 u64 data;
1388                 int j = vcpu->nmsrs;
1389
1390                 if (rdmsr_safe(index, &data_low, &data_high) < 0)
1391                         continue;
1392                 if (wrmsr_safe(index, data_low, data_high) < 0)
1393                         continue;
1394                 data = data_low | ((u64)data_high << 32);
1395                 vcpu->host_msrs[j].index = index;
1396                 vcpu->host_msrs[j].reserved = 0;
1397                 vcpu->host_msrs[j].data = data;
1398                 vcpu->guest_msrs[j] = vcpu->host_msrs[j];
1399                 ++vcpu->nmsrs;
1400         }
1401
1402         setup_msrs(vcpu);
1403
1404         vmcs_write32_fixedbits(MSR_IA32_VMX_EXIT_CTLS, VM_EXIT_CONTROLS,
1405                                (HOST_IS_64 << 9));  /* 22.2,1, 20.7.1 */
1406
1407         /* 22.2.1, 20.8.1 */
1408         vmcs_write32_fixedbits(MSR_IA32_VMX_ENTRY_CTLS,
1409                                VM_ENTRY_CONTROLS, 0);
1410         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
1411
1412 #ifdef CONFIG_X86_64
1413         vmcs_writel(VIRTUAL_APIC_PAGE_ADDR, 0);
1414         vmcs_writel(TPR_THRESHOLD, 0);
1415 #endif
1416
1417         vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
1418         vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
1419
1420         vcpu->cr0 = 0x60000010;
1421         vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode
1422         vmx_set_cr4(vcpu, 0);
1423 #ifdef CONFIG_X86_64
1424         vmx_set_efer(vcpu, 0);
1425 #endif
1426         vmx_fpu_activate(vcpu);
1427         update_exception_bitmap(vcpu);
1428
1429         return 0;
1430
1431 out:
1432         return ret;
1433 }
1434
1435 static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
1436 {
1437         u16 ent[2];
1438         u16 cs;
1439         u16 ip;
1440         unsigned long flags;
1441         unsigned long ss_base = vmcs_readl(GUEST_SS_BASE);
1442         u16 sp =  vmcs_readl(GUEST_RSP);
1443         u32 ss_limit = vmcs_read32(GUEST_SS_LIMIT);
1444
1445         if (sp > ss_limit || sp < 6 ) {
1446                 vcpu_printf(vcpu, "%s: #SS, rsp 0x%lx ss 0x%lx limit 0x%x\n",
1447                             __FUNCTION__,
1448                             vmcs_readl(GUEST_RSP),
1449                             vmcs_readl(GUEST_SS_BASE),
1450                             vmcs_read32(GUEST_SS_LIMIT));
1451                 return;
1452         }
1453
1454         if (kvm_read_guest(vcpu, irq * sizeof(ent), sizeof(ent), &ent) !=
1455                                                                 sizeof(ent)) {
1456                 vcpu_printf(vcpu, "%s: read guest err\n", __FUNCTION__);
1457                 return;
1458         }
1459
1460         flags =  vmcs_readl(GUEST_RFLAGS);
1461         cs =  vmcs_readl(GUEST_CS_BASE) >> 4;
1462         ip =  vmcs_readl(GUEST_RIP);
1463
1464
1465         if (kvm_write_guest(vcpu, ss_base + sp - 2, 2, &flags) != 2 ||
1466             kvm_write_guest(vcpu, ss_base + sp - 4, 2, &cs) != 2 ||
1467             kvm_write_guest(vcpu, ss_base + sp - 6, 2, &ip) != 2) {
1468                 vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__);
1469                 return;
1470         }
1471
1472         vmcs_writel(GUEST_RFLAGS, flags &
1473                     ~( X86_EFLAGS_IF | X86_EFLAGS_AC | X86_EFLAGS_TF));
1474         vmcs_write16(GUEST_CS_SELECTOR, ent[1]) ;
1475         vmcs_writel(GUEST_CS_BASE, ent[1] << 4);
1476         vmcs_writel(GUEST_RIP, ent[0]);
1477         vmcs_writel(GUEST_RSP, (vmcs_readl(GUEST_RSP) & ~0xffff) | (sp - 6));
1478 }
1479
1480 static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1481 {
1482         int word_index = __ffs(vcpu->irq_summary);
1483         int bit_index = __ffs(vcpu->irq_pending[word_index]);
1484         int irq = word_index * BITS_PER_LONG + bit_index;
1485
1486         clear_bit(bit_index, &vcpu->irq_pending[word_index]);
1487         if (!vcpu->irq_pending[word_index])
1488                 clear_bit(word_index, &vcpu->irq_summary);
1489
1490         if (vcpu->rmode.active) {
1491                 inject_rmode_irq(vcpu, irq);
1492                 return;
1493         }
1494         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
1495                         irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1496 }
1497
1498
1499 static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1500                                        struct kvm_run *kvm_run)
1501 {
1502         u32 cpu_based_vm_exec_control;
1503
1504         vcpu->interrupt_window_open =
1505                 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
1506                  (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
1507
1508         if (vcpu->interrupt_window_open &&
1509             vcpu->irq_summary &&
1510             !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
1511                 /*
1512                  * If interrupts enabled, and not blocked by sti or mov ss. Good.
1513                  */
1514                 kvm_do_inject_irq(vcpu);
1515
1516         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
1517         if (!vcpu->interrupt_window_open &&
1518             (vcpu->irq_summary || kvm_run->request_interrupt_window))
1519                 /*
1520                  * Interrupts blocked.  Wait for unblock.
1521                  */
1522                 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
1523         else
1524                 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
1525         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
1526 }
1527
1528 static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
1529 {
1530         struct kvm_guest_debug *dbg = &vcpu->guest_debug;
1531
1532         set_debugreg(dbg->bp[0], 0);
1533         set_debugreg(dbg->bp[1], 1);
1534         set_debugreg(dbg->bp[2], 2);
1535         set_debugreg(dbg->bp[3], 3);
1536
1537         if (dbg->singlestep) {
1538                 unsigned long flags;
1539
1540                 flags = vmcs_readl(GUEST_RFLAGS);
1541                 flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
1542                 vmcs_writel(GUEST_RFLAGS, flags);
1543         }
1544 }
1545
1546 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
1547                                   int vec, u32 err_code)
1548 {
1549         if (!vcpu->rmode.active)
1550                 return 0;
1551
1552         /*
1553          * Instruction with address size override prefix opcode 0x67
1554          * Cause the #SS fault with 0 error code in VM86 mode.
1555          */
1556         if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
1557                 if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE)
1558                         return 1;
1559         return 0;
1560 }
1561
1562 static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1563 {
1564         u32 intr_info, error_code;
1565         unsigned long cr2, rip;
1566         u32 vect_info;
1567         enum emulation_result er;
1568         int r;
1569
1570         vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1571         intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1572
1573         if ((vect_info & VECTORING_INFO_VALID_MASK) &&
1574                                                 !is_page_fault(intr_info)) {
1575                 printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
1576                        "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
1577         }
1578
1579         if (is_external_interrupt(vect_info)) {
1580                 int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
1581                 set_bit(irq, vcpu->irq_pending);
1582                 set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
1583         }
1584
1585         if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
1586                 asm ("int $2");
1587                 return 1;
1588         }
1589
1590         if (is_no_device(intr_info)) {
1591                 vmx_fpu_activate(vcpu);
1592                 return 1;
1593         }
1594
1595         error_code = 0;
1596         rip = vmcs_readl(GUEST_RIP);
1597         if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
1598                 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
1599         if (is_page_fault(intr_info)) {
1600                 cr2 = vmcs_readl(EXIT_QUALIFICATION);
1601
1602                 spin_lock(&vcpu->kvm->lock);
1603                 r = kvm_mmu_page_fault(vcpu, cr2, error_code);
1604                 if (r < 0) {
1605                         spin_unlock(&vcpu->kvm->lock);
1606                         return r;
1607                 }
1608                 if (!r) {
1609                         spin_unlock(&vcpu->kvm->lock);
1610                         return 1;
1611                 }
1612
1613                 er = emulate_instruction(vcpu, kvm_run, cr2, error_code);
1614                 spin_unlock(&vcpu->kvm->lock);
1615
1616                 switch (er) {
1617                 case EMULATE_DONE:
1618                         return 1;
1619                 case EMULATE_DO_MMIO:
1620                         ++vcpu->stat.mmio_exits;
1621                         return 0;
1622                  case EMULATE_FAIL:
1623                         vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
1624                         break;
1625                 default:
1626                         BUG();
1627                 }
1628         }
1629
1630         if (vcpu->rmode.active &&
1631             handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
1632                                                                 error_code)) {
1633                 if (vcpu->halt_request) {
1634                         vcpu->halt_request = 0;
1635                         return kvm_emulate_halt(vcpu);
1636                 }
1637                 return 1;
1638         }
1639
1640         if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) {
1641                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1642                 return 0;
1643         }
1644         kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
1645         kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
1646         kvm_run->ex.error_code = error_code;
1647         return 0;
1648 }
1649
1650 static int handle_external_interrupt(struct kvm_vcpu *vcpu,
1651                                      struct kvm_run *kvm_run)
1652 {
1653         ++vcpu->stat.irq_exits;
1654         return 1;
1655 }
1656
1657 static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1658 {
1659         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1660         return 0;
1661 }
1662
1663 static int get_io_count(struct kvm_vcpu *vcpu, unsigned long *count)
1664 {
1665         u64 inst;
1666         gva_t rip;
1667         int countr_size;
1668         int i, n;
1669
1670         if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_VM)) {
1671                 countr_size = 2;
1672         } else {
1673                 u32 cs_ar = vmcs_read32(GUEST_CS_AR_BYTES);
1674
1675                 countr_size = (cs_ar & AR_L_MASK) ? 8:
1676                               (cs_ar & AR_DB_MASK) ? 4: 2;
1677         }
1678
1679         rip =  vmcs_readl(GUEST_RIP);
1680         if (countr_size != 8)
1681                 rip += vmcs_readl(GUEST_CS_BASE);
1682
1683         n = kvm_read_guest(vcpu, rip, sizeof(inst), &inst);
1684
1685         for (i = 0; i < n; i++) {
1686                 switch (((u8*)&inst)[i]) {
1687                 case 0xf0:
1688                 case 0xf2:
1689                 case 0xf3:
1690                 case 0x2e:
1691                 case 0x36:
1692                 case 0x3e:
1693                 case 0x26:
1694                 case 0x64:
1695                 case 0x65:
1696                 case 0x66:
1697                         break;
1698                 case 0x67:
1699                         countr_size = (countr_size == 2) ? 4: (countr_size >> 1);
1700                 default:
1701                         goto done;
1702                 }
1703         }
1704         return 0;
1705 done:
1706         countr_size *= 8;
1707         *count = vcpu->regs[VCPU_REGS_RCX] & (~0ULL >> (64 - countr_size));
1708         //printk("cx: %lx\n", vcpu->regs[VCPU_REGS_RCX]);
1709         return 1;
1710 }
1711
1712 static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1713 {
1714         u64 exit_qualification;
1715         int size, down, in, string, rep;
1716         unsigned port;
1717         unsigned long count;
1718         gva_t address;
1719
1720         ++vcpu->stat.io_exits;
1721         exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1722         in = (exit_qualification & 8) != 0;
1723         size = (exit_qualification & 7) + 1;
1724         string = (exit_qualification & 16) != 0;
1725         down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
1726         count = 1;
1727         rep = (exit_qualification & 32) != 0;
1728         port = exit_qualification >> 16;
1729         address = 0;
1730         if (string) {
1731                 if (rep && !get_io_count(vcpu, &count))
1732                         return 1;
1733                 address = vmcs_readl(GUEST_LINEAR_ADDRESS);
1734         }
1735         return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down,
1736                              address, rep, port);
1737 }
1738
1739 static void
1740 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
1741 {
1742         /*
1743          * Patch in the VMCALL instruction:
1744          */
1745         hypercall[0] = 0x0f;
1746         hypercall[1] = 0x01;
1747         hypercall[2] = 0xc1;
1748         hypercall[3] = 0xc3;
1749 }
1750
1751 static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1752 {
1753         u64 exit_qualification;
1754         int cr;
1755         int reg;
1756
1757         exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1758         cr = exit_qualification & 15;
1759         reg = (exit_qualification >> 8) & 15;
1760         switch ((exit_qualification >> 4) & 3) {
1761         case 0: /* mov to cr */
1762                 switch (cr) {
1763                 case 0:
1764                         vcpu_load_rsp_rip(vcpu);
1765                         set_cr0(vcpu, vcpu->regs[reg]);
1766                         skip_emulated_instruction(vcpu);
1767                         return 1;
1768                 case 3:
1769                         vcpu_load_rsp_rip(vcpu);
1770                         set_cr3(vcpu, vcpu->regs[reg]);
1771                         skip_emulated_instruction(vcpu);
1772                         return 1;
1773                 case 4:
1774                         vcpu_load_rsp_rip(vcpu);
1775                         set_cr4(vcpu, vcpu->regs[reg]);
1776                         skip_emulated_instruction(vcpu);
1777                         return 1;
1778                 case 8:
1779                         vcpu_load_rsp_rip(vcpu);
1780                         set_cr8(vcpu, vcpu->regs[reg]);
1781                         skip_emulated_instruction(vcpu);
1782                         return 1;
1783                 };
1784                 break;
1785         case 2: /* clts */
1786                 vcpu_load_rsp_rip(vcpu);
1787                 vmx_fpu_deactivate(vcpu);
1788                 vcpu->cr0 &= ~X86_CR0_TS;
1789                 vmcs_writel(CR0_READ_SHADOW, vcpu->cr0);
1790                 vmx_fpu_activate(vcpu);
1791                 skip_emulated_instruction(vcpu);
1792                 return 1;
1793         case 1: /*mov from cr*/
1794                 switch (cr) {
1795                 case 3:
1796                         vcpu_load_rsp_rip(vcpu);
1797                         vcpu->regs[reg] = vcpu->cr3;
1798                         vcpu_put_rsp_rip(vcpu);
1799                         skip_emulated_instruction(vcpu);
1800                         return 1;
1801                 case 8:
1802                         vcpu_load_rsp_rip(vcpu);
1803                         vcpu->regs[reg] = vcpu->cr8;
1804                         vcpu_put_rsp_rip(vcpu);
1805                         skip_emulated_instruction(vcpu);
1806                         return 1;
1807                 }
1808                 break;
1809         case 3: /* lmsw */
1810                 lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
1811
1812                 skip_emulated_instruction(vcpu);
1813                 return 1;
1814         default:
1815                 break;
1816         }
1817         kvm_run->exit_reason = 0;
1818         printk(KERN_ERR "kvm: unhandled control register: op %d cr %d\n",
1819                (int)(exit_qualification >> 4) & 3, cr);
1820         return 0;
1821 }
1822
1823 static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1824 {
1825         u64 exit_qualification;
1826         unsigned long val;
1827         int dr, reg;
1828
1829         /*
1830          * FIXME: this code assumes the host is debugging the guest.
1831          *        need to deal with guest debugging itself too.
1832          */
1833         exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1834         dr = exit_qualification & 7;
1835         reg = (exit_qualification >> 8) & 15;
1836         vcpu_load_rsp_rip(vcpu);
1837         if (exit_qualification & 16) {
1838                 /* mov from dr */
1839                 switch (dr) {
1840                 case 6:
1841                         val = 0xffff0ff0;
1842                         break;
1843                 case 7:
1844                         val = 0x400;
1845                         break;
1846                 default:
1847                         val = 0;
1848                 }
1849                 vcpu->regs[reg] = val;
1850         } else {
1851                 /* mov to dr */
1852         }
1853         vcpu_put_rsp_rip(vcpu);
1854         skip_emulated_instruction(vcpu);
1855         return 1;
1856 }
1857
1858 static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1859 {
1860         kvm_emulate_cpuid(vcpu);
1861         return 1;
1862 }
1863
1864 static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1865 {
1866         u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1867         u64 data;
1868
1869         if (vmx_get_msr(vcpu, ecx, &data)) {
1870                 vmx_inject_gp(vcpu, 0);
1871                 return 1;
1872         }
1873
1874         /* FIXME: handling of bits 32:63 of rax, rdx */
1875         vcpu->regs[VCPU_REGS_RAX] = data & -1u;
1876         vcpu->regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
1877         skip_emulated_instruction(vcpu);
1878         return 1;
1879 }
1880
1881 static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1882 {
1883         u32 ecx = vcpu->regs[VCPU_REGS_RCX];
1884         u64 data = (vcpu->regs[VCPU_REGS_RAX] & -1u)
1885                 | ((u64)(vcpu->regs[VCPU_REGS_RDX] & -1u) << 32);
1886
1887         if (vmx_set_msr(vcpu, ecx, data) != 0) {
1888                 vmx_inject_gp(vcpu, 0);
1889                 return 1;
1890         }
1891
1892         skip_emulated_instruction(vcpu);
1893         return 1;
1894 }
1895
1896 static void post_kvm_run_save(struct kvm_vcpu *vcpu,
1897                               struct kvm_run *kvm_run)
1898 {
1899         kvm_run->if_flag = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) != 0;
1900         kvm_run->cr8 = vcpu->cr8;
1901         kvm_run->apic_base = vcpu->apic_base;
1902         kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
1903                                                   vcpu->irq_summary == 0);
1904 }
1905
1906 static int handle_interrupt_window(struct kvm_vcpu *vcpu,
1907                                    struct kvm_run *kvm_run)
1908 {
1909         /*
1910          * If the user space waits to inject interrupts, exit as soon as
1911          * possible
1912          */
1913         if (kvm_run->request_interrupt_window &&
1914             !vcpu->irq_summary) {
1915                 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1916                 ++vcpu->stat.irq_window_exits;
1917                 return 0;
1918         }
1919         return 1;
1920 }
1921
1922 static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1923 {
1924         skip_emulated_instruction(vcpu);
1925         return kvm_emulate_halt(vcpu);
1926 }
1927
1928 static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1929 {
1930         skip_emulated_instruction(vcpu);
1931         return kvm_hypercall(vcpu, kvm_run);
1932 }
1933
1934 /*
1935  * The exit handlers return 1 if the exit was handled fully and guest execution
1936  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
1937  * to be done to userspace and return 0.
1938  */
1939 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
1940                                       struct kvm_run *kvm_run) = {
1941         [EXIT_REASON_EXCEPTION_NMI]           = handle_exception,
1942         [EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
1943         [EXIT_REASON_TRIPLE_FAULT]            = handle_triple_fault,
1944         [EXIT_REASON_IO_INSTRUCTION]          = handle_io,
1945         [EXIT_REASON_CR_ACCESS]               = handle_cr,
1946         [EXIT_REASON_DR_ACCESS]               = handle_dr,
1947         [EXIT_REASON_CPUID]                   = handle_cpuid,
1948         [EXIT_REASON_MSR_READ]                = handle_rdmsr,
1949         [EXIT_REASON_MSR_WRITE]               = handle_wrmsr,
1950         [EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
1951         [EXIT_REASON_HLT]                     = handle_halt,
1952         [EXIT_REASON_VMCALL]                  = handle_vmcall,
1953 };
1954
1955 static const int kvm_vmx_max_exit_handlers =
1956         ARRAY_SIZE(kvm_vmx_exit_handlers);
1957
1958 /*
1959  * The guest has exited.  See if we can fix it or if we need userspace
1960  * assistance.
1961  */
1962 static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1963 {
1964         u32 vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1965         u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
1966
1967         if ( (vectoring_info & VECTORING_INFO_VALID_MASK) &&
1968                                 exit_reason != EXIT_REASON_EXCEPTION_NMI )
1969                 printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
1970                        "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
1971         if (exit_reason < kvm_vmx_max_exit_handlers
1972             && kvm_vmx_exit_handlers[exit_reason])
1973                 return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
1974         else {
1975                 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
1976                 kvm_run->hw.hardware_exit_reason = exit_reason;
1977         }
1978         return 0;
1979 }
1980
1981 /*
1982  * Check if userspace requested an interrupt window, and that the
1983  * interrupt window is open.
1984  *
1985  * No need to exit to userspace if we already have an interrupt queued.
1986  */
1987 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1988                                           struct kvm_run *kvm_run)
1989 {
1990         return (!vcpu->irq_summary &&
1991                 kvm_run->request_interrupt_window &&
1992                 vcpu->interrupt_window_open &&
1993                 (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
1994 }
1995
1996 static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
1997 {
1998 }
1999
2000 static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2001 {
2002         u8 fail;
2003         int r;
2004
2005 preempted:
2006         if (vcpu->guest_debug.enabled)
2007                 kvm_guest_debug_pre(vcpu);
2008
2009 again:
2010         r = kvm_mmu_reload(vcpu);
2011         if (unlikely(r))
2012                 goto out;
2013
2014         if (!vcpu->mmio_read_completed)
2015                 do_interrupt_requests(vcpu, kvm_run);
2016
2017         vmx_save_host_state(vcpu);
2018         kvm_load_guest_fpu(vcpu);
2019
2020         /*
2021          * Loading guest fpu may have cleared host cr0.ts
2022          */
2023         vmcs_writel(HOST_CR0, read_cr0());
2024
2025         local_irq_disable();
2026
2027         vcpu->guest_mode = 1;
2028         if (vcpu->requests)
2029                 if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
2030                     vmx_flush_tlb(vcpu);
2031
2032         asm (
2033                 /* Store host registers */
2034 #ifdef CONFIG_X86_64
2035                 "push %%rax; push %%rbx; push %%rdx;"
2036                 "push %%rsi; push %%rdi; push %%rbp;"
2037                 "push %%r8;  push %%r9;  push %%r10; push %%r11;"
2038                 "push %%r12; push %%r13; push %%r14; push %%r15;"
2039                 "push %%rcx \n\t"
2040                 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
2041 #else
2042                 "pusha; push %%ecx \n\t"
2043                 ASM_VMX_VMWRITE_RSP_RDX "\n\t"
2044 #endif
2045                 /* Check if vmlaunch of vmresume is needed */
2046                 "cmp $0, %1 \n\t"
2047                 /* Load guest registers.  Don't clobber flags. */
2048 #ifdef CONFIG_X86_64
2049                 "mov %c[cr2](%3), %%rax \n\t"
2050                 "mov %%rax, %%cr2 \n\t"
2051                 "mov %c[rax](%3), %%rax \n\t"
2052                 "mov %c[rbx](%3), %%rbx \n\t"
2053                 "mov %c[rdx](%3), %%rdx \n\t"
2054                 "mov %c[rsi](%3), %%rsi \n\t"
2055                 "mov %c[rdi](%3), %%rdi \n\t"
2056                 "mov %c[rbp](%3), %%rbp \n\t"
2057                 "mov %c[r8](%3),  %%r8  \n\t"
2058                 "mov %c[r9](%3),  %%r9  \n\t"
2059                 "mov %c[r10](%3), %%r10 \n\t"
2060                 "mov %c[r11](%3), %%r11 \n\t"
2061                 "mov %c[r12](%3), %%r12 \n\t"
2062                 "mov %c[r13](%3), %%r13 \n\t"
2063                 "mov %c[r14](%3), %%r14 \n\t"
2064                 "mov %c[r15](%3), %%r15 \n\t"
2065                 "mov %c[rcx](%3), %%rcx \n\t" /* kills %3 (rcx) */
2066 #else
2067                 "mov %c[cr2](%3), %%eax \n\t"
2068                 "mov %%eax,   %%cr2 \n\t"
2069                 "mov %c[rax](%3), %%eax \n\t"
2070                 "mov %c[rbx](%3), %%ebx \n\t"
2071                 "mov %c[rdx](%3), %%edx \n\t"
2072                 "mov %c[rsi](%3), %%esi \n\t"
2073                 "mov %c[rdi](%3), %%edi \n\t"
2074                 "mov %c[rbp](%3), %%ebp \n\t"
2075                 "mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
2076 #endif
2077                 /* Enter guest mode */
2078                 "jne .Llaunched \n\t"
2079                 ASM_VMX_VMLAUNCH "\n\t"
2080                 "jmp .Lkvm_vmx_return \n\t"
2081                 ".Llaunched: " ASM_VMX_VMRESUME "\n\t"
2082                 ".Lkvm_vmx_return: "
2083                 /* Save guest registers, load host registers, keep flags */
2084 #ifdef CONFIG_X86_64
2085                 "xchg %3,     (%%rsp) \n\t"
2086                 "mov %%rax, %c[rax](%3) \n\t"
2087                 "mov %%rbx, %c[rbx](%3) \n\t"
2088                 "pushq (%%rsp); popq %c[rcx](%3) \n\t"
2089                 "mov %%rdx, %c[rdx](%3) \n\t"
2090                 "mov %%rsi, %c[rsi](%3) \n\t"
2091                 "mov %%rdi, %c[rdi](%3) \n\t"
2092                 "mov %%rbp, %c[rbp](%3) \n\t"
2093                 "mov %%r8,  %c[r8](%3) \n\t"
2094                 "mov %%r9,  %c[r9](%3) \n\t"
2095                 "mov %%r10, %c[r10](%3) \n\t"
2096                 "mov %%r11, %c[r11](%3) \n\t"
2097                 "mov %%r12, %c[r12](%3) \n\t"
2098                 "mov %%r13, %c[r13](%3) \n\t"
2099                 "mov %%r14, %c[r14](%3) \n\t"
2100                 "mov %%r15, %c[r15](%3) \n\t"
2101                 "mov %%cr2, %%rax   \n\t"
2102                 "mov %%rax, %c[cr2](%3) \n\t"
2103                 "mov (%%rsp), %3 \n\t"
2104
2105                 "pop  %%rcx; pop  %%r15; pop  %%r14; pop  %%r13; pop  %%r12;"
2106                 "pop  %%r11; pop  %%r10; pop  %%r9;  pop  %%r8;"
2107                 "pop  %%rbp; pop  %%rdi; pop  %%rsi;"
2108                 "pop  %%rdx; pop  %%rbx; pop  %%rax \n\t"
2109 #else
2110                 "xchg %3, (%%esp) \n\t"
2111                 "mov %%eax, %c[rax](%3) \n\t"
2112                 "mov %%ebx, %c[rbx](%3) \n\t"
2113                 "pushl (%%esp); popl %c[rcx](%3) \n\t"
2114                 "mov %%edx, %c[rdx](%3) \n\t"
2115                 "mov %%esi, %c[rsi](%3) \n\t"
2116                 "mov %%edi, %c[rdi](%3) \n\t"
2117                 "mov %%ebp, %c[rbp](%3) \n\t"
2118                 "mov %%cr2, %%eax  \n\t"
2119                 "mov %%eax, %c[cr2](%3) \n\t"
2120                 "mov (%%esp), %3 \n\t"
2121
2122                 "pop %%ecx; popa \n\t"
2123 #endif
2124                 "setbe %0 \n\t"
2125               : "=q" (fail)
2126               : "r"(vcpu->launched), "d"((unsigned long)HOST_RSP),
2127                 "c"(vcpu),
2128                 [rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
2129                 [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
2130                 [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])),
2131                 [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])),
2132                 [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
2133                 [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
2134                 [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP])),
2135 #ifdef CONFIG_X86_64
2136                 [r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
2137                 [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
2138                 [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
2139                 [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])),
2140                 [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])),
2141                 [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])),
2142                 [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])),
2143                 [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15])),
2144 #endif
2145                 [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
2146               : "cc", "memory" );
2147
2148         vcpu->guest_mode = 0;
2149         local_irq_enable();
2150
2151         ++vcpu->stat.exits;
2152
2153         vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
2154
2155         asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
2156
2157         if (unlikely(fail)) {
2158                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2159                 kvm_run->fail_entry.hardware_entry_failure_reason
2160                         = vmcs_read32(VM_INSTRUCTION_ERROR);
2161                 r = 0;
2162                 goto out;
2163         }
2164         /*
2165          * Profile KVM exit RIPs:
2166          */
2167         if (unlikely(prof_on == KVM_PROFILING))
2168                 profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
2169
2170         vcpu->launched = 1;
2171         r = kvm_handle_exit(kvm_run, vcpu);
2172         if (r > 0) {
2173                 /* Give scheduler a change to reschedule. */
2174                 if (signal_pending(current)) {
2175                         r = -EINTR;
2176                         kvm_run->exit_reason = KVM_EXIT_INTR;
2177                         ++vcpu->stat.signal_exits;
2178                         goto out;
2179                 }
2180
2181                 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2182                         r = -EINTR;
2183                         kvm_run->exit_reason = KVM_EXIT_INTR;
2184                         ++vcpu->stat.request_irq_exits;
2185                         goto out;
2186                 }
2187                 if (!need_resched()) {
2188                         ++vcpu->stat.light_exits;
2189                         goto again;
2190                 }
2191         }
2192
2193 out:
2194         if (r > 0) {
2195                 kvm_resched(vcpu);
2196                 goto preempted;
2197         }
2198
2199         post_kvm_run_save(vcpu, kvm_run);
2200         return r;
2201 }
2202
2203 static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
2204                                   unsigned long addr,
2205                                   u32 err_code)
2206 {
2207         u32 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
2208
2209         ++vcpu->stat.pf_guest;
2210
2211         if (is_page_fault(vect_info)) {
2212                 printk(KERN_DEBUG "inject_page_fault: "
2213                        "double fault 0x%lx @ 0x%lx\n",
2214                        addr, vmcs_readl(GUEST_RIP));
2215                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 0);
2216                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2217                              DF_VECTOR |
2218                              INTR_TYPE_EXCEPTION |
2219                              INTR_INFO_DELIEVER_CODE_MASK |
2220                              INTR_INFO_VALID_MASK);
2221                 return;
2222         }
2223         vcpu->cr2 = addr;
2224         vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, err_code);
2225         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2226                      PF_VECTOR |
2227                      INTR_TYPE_EXCEPTION |
2228                      INTR_INFO_DELIEVER_CODE_MASK |
2229                      INTR_INFO_VALID_MASK);
2230
2231 }
2232
2233 static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
2234 {
2235         if (vcpu->vmcs) {
2236                 on_each_cpu(__vcpu_clear, vcpu, 0, 1);
2237                 free_vmcs(vcpu->vmcs);
2238                 vcpu->vmcs = NULL;
2239         }
2240 }
2241
2242 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
2243 {
2244         vmx_free_vmcs(vcpu);
2245 }
2246
2247 static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
2248 {
2249         struct vmcs *vmcs;
2250
2251         vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2252         if (!vcpu->guest_msrs)
2253                 return -ENOMEM;
2254
2255         vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2256         if (!vcpu->host_msrs)
2257                 goto out_free_guest_msrs;
2258
2259         vmcs = alloc_vmcs();
2260         if (!vmcs)
2261                 goto out_free_msrs;
2262
2263         vmcs_clear(vmcs);
2264         vcpu->vmcs = vmcs;
2265         vcpu->launched = 0;
2266
2267         return 0;
2268
2269 out_free_msrs:
2270         kfree(vcpu->host_msrs);
2271         vcpu->host_msrs = NULL;
2272
2273 out_free_guest_msrs:
2274         kfree(vcpu->guest_msrs);
2275         vcpu->guest_msrs = NULL;
2276
2277         return -ENOMEM;
2278 }
2279
2280 static struct kvm_arch_ops vmx_arch_ops = {
2281         .cpu_has_kvm_support = cpu_has_kvm_support,
2282         .disabled_by_bios = vmx_disabled_by_bios,
2283         .hardware_setup = hardware_setup,
2284         .hardware_unsetup = hardware_unsetup,
2285         .hardware_enable = hardware_enable,
2286         .hardware_disable = hardware_disable,
2287
2288         .vcpu_create = vmx_create_vcpu,
2289         .vcpu_free = vmx_free_vcpu,
2290
2291         .vcpu_load = vmx_vcpu_load,
2292         .vcpu_put = vmx_vcpu_put,
2293         .vcpu_decache = vmx_vcpu_decache,
2294
2295         .set_guest_debug = set_guest_debug,
2296         .get_msr = vmx_get_msr,
2297         .set_msr = vmx_set_msr,
2298         .get_segment_base = vmx_get_segment_base,
2299         .get_segment = vmx_get_segment,
2300         .set_segment = vmx_set_segment,
2301         .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
2302         .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
2303         .set_cr0 = vmx_set_cr0,
2304         .set_cr3 = vmx_set_cr3,
2305         .set_cr4 = vmx_set_cr4,
2306 #ifdef CONFIG_X86_64
2307         .set_efer = vmx_set_efer,
2308 #endif
2309         .get_idt = vmx_get_idt,
2310         .set_idt = vmx_set_idt,
2311         .get_gdt = vmx_get_gdt,
2312         .set_gdt = vmx_set_gdt,
2313         .cache_regs = vcpu_load_rsp_rip,
2314         .decache_regs = vcpu_put_rsp_rip,
2315         .get_rflags = vmx_get_rflags,
2316         .set_rflags = vmx_set_rflags,
2317
2318         .tlb_flush = vmx_flush_tlb,
2319         .inject_page_fault = vmx_inject_page_fault,
2320
2321         .inject_gp = vmx_inject_gp,
2322
2323         .run = vmx_vcpu_run,
2324         .skip_emulated_instruction = skip_emulated_instruction,
2325         .vcpu_setup = vmx_vcpu_setup,
2326         .patch_hypercall = vmx_patch_hypercall,
2327 };
2328
2329 static int __init vmx_init(void)
2330 {
2331         void *iova;
2332         int r;
2333
2334         vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2335         if (!vmx_io_bitmap_a)
2336                 return -ENOMEM;
2337
2338         vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2339         if (!vmx_io_bitmap_b) {
2340                 r = -ENOMEM;
2341                 goto out;
2342         }
2343
2344         /*
2345          * Allow direct access to the PC debug port (it is often used for I/O
2346          * delays, but the vmexits simply slow things down).
2347          */
2348         iova = kmap(vmx_io_bitmap_a);
2349         memset(iova, 0xff, PAGE_SIZE);
2350         clear_bit(0x80, iova);
2351         kunmap(vmx_io_bitmap_a);
2352
2353         iova = kmap(vmx_io_bitmap_b);
2354         memset(iova, 0xff, PAGE_SIZE);
2355         kunmap(vmx_io_bitmap_b);
2356
2357         r = kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
2358         if (r)
2359                 goto out1;
2360
2361         return 0;
2362
2363 out1:
2364         __free_page(vmx_io_bitmap_b);
2365 out:
2366         __free_page(vmx_io_bitmap_a);
2367         return r;
2368 }
2369
2370 static void __exit vmx_exit(void)
2371 {
2372         __free_page(vmx_io_bitmap_b);
2373         __free_page(vmx_io_bitmap_a);
2374
2375         kvm_exit_arch();
2376 }
2377
2378 module_init(vmx_init)
2379 module_exit(vmx_exit)