5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
9 #include <linux/types.h>
10 #include <linux/list.h>
11 #include <linux/mutex.h>
12 #include <linux/spinlock.h>
13 #include <linux/signal.h>
14 #include <linux/sched.h>
16 #include <linux/preempt.h>
17 #include <asm/signal.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_para.h>
22 #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
23 #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
24 #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL)
26 #define KVM_GUEST_CR0_MASK \
27 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
28 | X86_CR0_NW | X86_CR0_CD)
29 #define KVM_VM_CR0_ALWAYS_ON \
30 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
32 #define KVM_GUEST_CR4_MASK \
33 (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
34 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
35 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
37 #define INVALID_PAGE (~(hpa_t)0)
38 #define UNMAPPED_GVA (~(gpa_t)0)
40 #define KVM_MAX_VCPUS 4
41 #define KVM_ALIAS_SLOTS 4
42 #define KVM_MEMORY_SLOTS 4
43 #define KVM_NUM_MMU_PAGES 1024
44 #define KVM_MIN_FREE_MMU_PAGES 5
45 #define KVM_REFILL_PAGES 25
46 #define KVM_MAX_CPUID_ENTRIES 40
48 #define FX_IMAGE_SIZE 512
49 #define FX_IMAGE_ALIGN 16
50 #define FX_BUF_SIZE (2 * FX_IMAGE_SIZE + FX_IMAGE_ALIGN)
61 #define SELECTOR_TI_MASK (1 << 2)
62 #define SELECTOR_RPL_MASK 0x03
66 #define KVM_PIO_PAGE_OFFSET 1
69 * vcpu->requests bit members
71 #define KVM_TLB_FLUSH 0
76 * gva - guest virtual address
77 * gpa - guest physical address
78 * gfn - guest frame number
79 * hva - host virtual address
80 * hpa - host physical address
81 * hfn - host frame number
84 typedef unsigned long gva_t;
86 typedef unsigned long gfn_t;
88 typedef unsigned long hva_t;
90 typedef unsigned long hfn_t;
92 #define NR_PTE_CHAIN_ENTRIES 5
94 struct kvm_pte_chain {
95 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
96 struct hlist_node link;
100 * kvm_mmu_page_role, below, is defined as:
102 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
103 * bits 4:7 - page table level for this shadow (1-4)
104 * bits 8:9 - page table quadrant for 2-level guests
105 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
106 * bits 17:19 - "access" - the user, writable, and nx bits of a huge page pde
108 union kvm_mmu_page_role {
111 unsigned glevels : 4;
113 unsigned quadrant : 2;
114 unsigned pad_for_nice_hex_output : 6;
115 unsigned metaphysical : 1;
116 unsigned hugepage_access : 3;
120 struct kvm_mmu_page {
121 struct list_head link;
122 struct hlist_node hash_link;
125 * The following two entries are used to key the shadow page in the
129 union kvm_mmu_page_role role;
132 unsigned long slot_bitmap; /* One bit set per slot which has memory
133 * in this shadow page.
135 int multimapped; /* More than one parent_pte? */
136 int root_count; /* Currently serving as active root */
138 u64 *parent_pte; /* !multimapped */
139 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
146 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
147 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
151 void (*new_cr3)(struct kvm_vcpu *vcpu);
152 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
153 void (*free)(struct kvm_vcpu *vcpu);
154 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
157 int shadow_root_level;
162 #define KVM_NR_MEM_OBJS 20
164 struct kvm_mmu_memory_cache {
166 void *objects[KVM_NR_MEM_OBJS];
170 * We don't want allocation failures within the mmu code, so we preallocate
171 * enough memory for a single page fault in a cache.
173 struct kvm_guest_debug {
212 struct kvm_pio_request {
215 struct page *guest_pages[2];
216 unsigned guest_page_offset;
235 u32 irq_window_exits;
237 u32 request_irq_exits;
243 struct kvm_io_device {
244 void (*read)(struct kvm_io_device *this,
248 void (*write)(struct kvm_io_device *this,
252 int (*in_range)(struct kvm_io_device *this, gpa_t addr);
253 void (*destructor)(struct kvm_io_device *this);
258 static inline void kvm_iodevice_read(struct kvm_io_device *dev,
263 dev->read(dev, addr, len, val);
266 static inline void kvm_iodevice_write(struct kvm_io_device *dev,
271 dev->write(dev, addr, len, val);
274 static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr)
276 return dev->in_range(dev, addr);
279 static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
282 dev->destructor(dev);
286 * It would be nice to use something smarter than a linear search, TBD...
287 * Thankfully we dont expect many devices to register (famous last words :),
288 * so until then it will suffice. At least its abstracted so we can change
293 #define NR_IOBUS_DEVS 6
294 struct kvm_io_device *devs[NR_IOBUS_DEVS];
297 void kvm_io_bus_init(struct kvm_io_bus *bus);
298 void kvm_io_bus_destroy(struct kvm_io_bus *bus);
299 struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
300 void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
301 struct kvm_io_device *dev);
305 struct preempt_notifier preempt_notifier;
311 int interrupt_window_open;
313 unsigned long requests;
314 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
315 DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
316 unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
317 unsigned long rip; /* needs vcpu_load_rsp_rip() */
322 gpa_t para_state_gpa;
323 struct page *para_state_page;
327 u64 pdptrs[4]; /* pae */
330 u64 ia32_misc_enable_msr;
334 struct kvm_mmu_memory_cache mmu_pte_chain_cache;
335 struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
336 struct kvm_mmu_memory_cache mmu_page_cache;
337 struct kvm_mmu_memory_cache mmu_page_header_cache;
339 gfn_t last_pt_write_gfn;
340 int last_pt_write_count;
342 struct kvm_guest_debug guest_debug;
344 char fx_buf[FX_BUF_SIZE];
346 char *guest_fx_image;
348 int guest_fpu_loaded;
351 int mmio_read_completed;
354 unsigned char mmio_data[8];
355 gpa_t mmio_phys_addr;
356 gva_t mmio_fault_cr2;
357 struct kvm_pio_request pio;
363 struct kvm_stat stat;
368 struct kvm_save_segment {
373 } tr, es, ds, fs, gs;
375 int halt_request; /* real mode on Intel only */
378 struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];
381 struct kvm_mem_alias {
383 unsigned long npages;
387 struct kvm_memory_slot {
389 unsigned long npages;
391 struct page **phys_mem;
392 unsigned long *dirty_bitmap;
396 struct mutex lock; /* protects everything except vcpus */
398 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
400 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS];
402 * Hash table of struct kvm_mmu_page.
404 struct list_head active_mmu_pages;
405 int n_free_mmu_pages;
406 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
407 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
408 int memory_config_version;
410 unsigned long rmap_overflow;
411 struct list_head vm_list;
413 struct kvm_io_bus mmio_bus;
414 struct kvm_io_bus pio_bus;
417 struct descriptor_table {
420 } __attribute__((packed));
422 struct kvm_arch_ops {
423 int (*cpu_has_kvm_support)(void); /* __init */
424 int (*disabled_by_bios)(void); /* __init */
425 void (*hardware_enable)(void *dummy); /* __init */
426 void (*hardware_disable)(void *dummy);
427 int (*hardware_setup)(void); /* __init */
428 void (*hardware_unsetup)(void); /* __exit */
430 /* Create, but do not attach this VCPU */
431 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
432 void (*vcpu_free)(struct kvm_vcpu *vcpu);
434 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
435 void (*vcpu_put)(struct kvm_vcpu *vcpu);
436 void (*vcpu_decache)(struct kvm_vcpu *vcpu);
438 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
439 struct kvm_debug_guest *dbg);
440 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
441 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
442 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
443 void (*get_segment)(struct kvm_vcpu *vcpu,
444 struct kvm_segment *var, int seg);
445 void (*set_segment)(struct kvm_vcpu *vcpu,
446 struct kvm_segment *var, int seg);
447 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
448 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
449 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
450 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
451 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
452 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
453 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
454 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
455 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
456 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
457 unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
458 void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
460 void (*cache_regs)(struct kvm_vcpu *vcpu);
461 void (*decache_regs)(struct kvm_vcpu *vcpu);
462 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
463 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
465 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t addr);
466 void (*tlb_flush)(struct kvm_vcpu *vcpu);
467 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
468 unsigned long addr, u32 err_code);
470 void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code);
472 int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
473 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
474 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
475 unsigned char *hypercall_addr);
478 extern struct kvm_arch_ops *kvm_arch_ops;
480 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
481 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
483 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
484 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
486 int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module);
487 void kvm_exit_arch(void);
489 int kvm_mmu_module_init(void);
490 void kvm_mmu_module_exit(void);
492 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
493 int kvm_mmu_create(struct kvm_vcpu *vcpu);
494 int kvm_mmu_setup(struct kvm_vcpu *vcpu);
496 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
497 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
498 void kvm_mmu_zap_all(struct kvm *kvm);
500 hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa);
501 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
502 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
503 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
504 hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva);
505 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
507 void kvm_emulator_want_group7_invlpg(void);
509 extern hpa_t bad_page_address;
511 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
512 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
513 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
515 enum emulation_result {
516 EMULATE_DONE, /* no further processing */
517 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
518 EMULATE_FAIL, /* can't emulate this instruction */
521 int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
522 unsigned long cr2, u16 error_code);
523 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
524 void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
525 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
526 unsigned long *rflags);
528 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
529 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
530 unsigned long *rflags);
531 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
532 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
534 struct x86_emulate_ctxt;
536 int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
537 int size, unsigned long count, int string, int down,
538 gva_t address, int rep, unsigned port);
539 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
540 int kvm_emulate_halt(struct kvm_vcpu *vcpu);
541 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
542 int emulate_clts(struct kvm_vcpu *vcpu);
543 int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr,
544 unsigned long *dest);
545 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
546 unsigned long value);
548 void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
549 void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
550 void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
551 void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
552 void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
554 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
555 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
557 void fx_init(struct kvm_vcpu *vcpu);
559 void kvm_resched(struct kvm_vcpu *vcpu);
560 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
561 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
562 void kvm_flush_remote_tlbs(struct kvm *kvm);
564 int kvm_read_guest(struct kvm_vcpu *vcpu,
569 int kvm_write_guest(struct kvm_vcpu *vcpu,
574 unsigned long segment_base(u16 selector);
576 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
577 const u8 *new, int bytes);
578 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
579 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
580 int kvm_mmu_load(struct kvm_vcpu *vcpu);
581 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
583 int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run);
585 static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
588 return vcpu->mmu.page_fault(vcpu, gva, error_code);
591 static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
593 if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
594 __kvm_mmu_free_some_pages(vcpu);
597 static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
599 if (likely(vcpu->mmu.root_hpa != INVALID_PAGE))
602 return kvm_mmu_load(vcpu);
605 static inline int is_long_mode(struct kvm_vcpu *vcpu)
608 return vcpu->shadow_efer & EFER_LME;
614 static inline int is_pae(struct kvm_vcpu *vcpu)
616 return vcpu->cr4 & X86_CR4_PAE;
619 static inline int is_pse(struct kvm_vcpu *vcpu)
621 return vcpu->cr4 & X86_CR4_PSE;
624 static inline int is_paging(struct kvm_vcpu *vcpu)
626 return vcpu->cr0 & X86_CR0_PG;
629 static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
631 return slot - kvm->memslots;
634 static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
636 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
638 return (struct kvm_mmu_page *)page_private(page);
641 static inline u16 read_fs(void)
644 asm ("mov %%fs, %0" : "=g"(seg));
648 static inline u16 read_gs(void)
651 asm ("mov %%gs, %0" : "=g"(seg));
655 static inline u16 read_ldt(void)
658 asm ("sldt %0" : "=g"(ldt));
662 static inline void load_fs(u16 sel)
664 asm ("mov %0, %%fs" : : "rm"(sel));
667 static inline void load_gs(u16 sel)
669 asm ("mov %0, %%gs" : : "rm"(sel));
673 static inline void load_ldt(u16 sel)
675 asm ("lldt %0" : : "rm"(sel));
679 static inline void get_idt(struct descriptor_table *table)
681 asm ("sidt %0" : "=m"(*table));
684 static inline void get_gdt(struct descriptor_table *table)
686 asm ("sgdt %0" : "=m"(*table));
689 static inline unsigned long read_tr_base(void)
692 asm ("str %0" : "=g"(tr));
693 return segment_base(tr);
697 static inline unsigned long read_msr(unsigned long msr)
706 static inline void fx_save(void *image)
708 asm ("fxsave (%0)":: "r" (image));
711 static inline void fx_restore(void *image)
713 asm ("fxrstor (%0)":: "r" (image));
716 static inline void fpu_init(void)
721 static inline u32 get_rdx_init_val(void)
723 return 0x600; /* P6 family */
726 #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
727 #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
728 #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
729 #define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
730 #define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
731 #define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
732 #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
733 #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
734 #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
736 #define MSR_IA32_TIME_STAMP_COUNTER 0x010
738 #define TSS_IOPB_BASE_OFFSET 0x66
739 #define TSS_BASE_SIZE 0x68
740 #define TSS_IOPB_SIZE (65536 / 8)
741 #define TSS_REDIRECTION_SIZE (256 / 8)
742 #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)