2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Copyright (C) 2006 Qumranet, Inc.
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
19 #include "x86_emulate.h"
20 #include "segment_descriptor.h"
22 #include <linux/kvm.h>
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/percpu.h>
26 #include <linux/gfp.h>
28 #include <linux/miscdevice.h>
29 #include <linux/vmalloc.h>
30 #include <linux/reboot.h>
31 #include <linux/debugfs.h>
32 #include <linux/highmem.h>
33 #include <linux/file.h>
34 #include <linux/sysdev.h>
35 #include <linux/cpu.h>
36 #include <linux/sched.h>
37 #include <linux/cpumask.h>
38 #include <linux/smp.h>
39 #include <linux/anon_inodes.h>
41 #include <asm/processor.h>
44 #include <asm/uaccess.h>
47 MODULE_AUTHOR("Qumranet");
48 MODULE_LICENSE("GPL");
50 static DEFINE_SPINLOCK(kvm_lock);
51 static LIST_HEAD(vm_list);
53 static cpumask_t cpus_hardware_enabled;
55 struct kvm_arch_ops *kvm_arch_ops;
57 #define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
59 static struct kvm_stats_debugfs_item {
62 struct dentry *dentry;
63 } debugfs_entries[] = {
64 { "pf_fixed", STAT_OFFSET(pf_fixed) },
65 { "pf_guest", STAT_OFFSET(pf_guest) },
66 { "tlb_flush", STAT_OFFSET(tlb_flush) },
67 { "invlpg", STAT_OFFSET(invlpg) },
68 { "exits", STAT_OFFSET(exits) },
69 { "io_exits", STAT_OFFSET(io_exits) },
70 { "mmio_exits", STAT_OFFSET(mmio_exits) },
71 { "signal_exits", STAT_OFFSET(signal_exits) },
72 { "irq_window", STAT_OFFSET(irq_window_exits) },
73 { "halt_exits", STAT_OFFSET(halt_exits) },
74 { "request_irq", STAT_OFFSET(request_irq_exits) },
75 { "irq_exits", STAT_OFFSET(irq_exits) },
76 { "light_exits", STAT_OFFSET(light_exits) },
77 { "efer_reload", STAT_OFFSET(efer_reload) },
81 static struct dentry *debugfs_dir;
83 #define MAX_IO_MSRS 256
85 #define CR0_RESERVED_BITS \
86 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
87 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
88 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
89 #define LMSW_GUEST_MASK 0x0eULL
90 #define CR4_RESEVED_BITS (~((1ULL << 11) - 1))
91 #define CR8_RESEVED_BITS (~0x0fULL)
92 #define EFER_RESERVED_BITS 0xfffffffffffff2fe
95 // LDT or TSS descriptor in the GDT. 16 bytes.
96 struct segment_descriptor_64 {
97 struct segment_descriptor s;
104 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
107 unsigned long segment_base(u16 selector)
109 struct descriptor_table gdt;
110 struct segment_descriptor *d;
111 unsigned long table_base;
112 typedef unsigned long ul;
118 asm ("sgdt %0" : "=m"(gdt));
119 table_base = gdt.base;
121 if (selector & 4) { /* from ldt */
124 asm ("sldt %0" : "=g"(ldt_selector));
125 table_base = segment_base(ldt_selector);
127 d = (struct segment_descriptor *)(table_base + (selector & ~7));
128 v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24);
131 && (d->type == 2 || d->type == 9 || d->type == 11))
132 v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32;
136 EXPORT_SYMBOL_GPL(segment_base);
138 static inline int valid_vcpu(int n)
140 return likely(n >= 0 && n < KVM_MAX_VCPUS);
143 int kvm_read_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
146 unsigned char *host_buf = dest;
147 unsigned long req_size = size;
155 paddr = gva_to_hpa(vcpu, addr);
157 if (is_error_hpa(paddr))
160 guest_buf = (hva_t)kmap_atomic(
161 pfn_to_page(paddr >> PAGE_SHIFT),
163 offset = addr & ~PAGE_MASK;
165 now = min(size, PAGE_SIZE - offset);
166 memcpy(host_buf, (void*)guest_buf, now);
170 kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
172 return req_size - size;
174 EXPORT_SYMBOL_GPL(kvm_read_guest);
176 int kvm_write_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
179 unsigned char *host_buf = data;
180 unsigned long req_size = size;
189 paddr = gva_to_hpa(vcpu, addr);
191 if (is_error_hpa(paddr))
194 gfn = vcpu->mmu.gva_to_gpa(vcpu, addr) >> PAGE_SHIFT;
195 mark_page_dirty(vcpu->kvm, gfn);
196 guest_buf = (hva_t)kmap_atomic(
197 pfn_to_page(paddr >> PAGE_SHIFT), KM_USER0);
198 offset = addr & ~PAGE_MASK;
200 now = min(size, PAGE_SIZE - offset);
201 memcpy((void*)guest_buf, host_buf, now);
205 kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
207 return req_size - size;
209 EXPORT_SYMBOL_GPL(kvm_write_guest);
211 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
213 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
216 vcpu->guest_fpu_loaded = 1;
217 fx_save(vcpu->host_fx_image);
218 fx_restore(vcpu->guest_fx_image);
220 EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
222 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
224 if (!vcpu->guest_fpu_loaded)
227 vcpu->guest_fpu_loaded = 0;
228 fx_save(vcpu->guest_fx_image);
229 fx_restore(vcpu->host_fx_image);
231 EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
234 * Switches to specified vcpu, until a matching vcpu_put()
236 static void vcpu_load(struct kvm_vcpu *vcpu)
238 mutex_lock(&vcpu->mutex);
239 kvm_arch_ops->vcpu_load(vcpu);
242 static void vcpu_put(struct kvm_vcpu *vcpu)
244 kvm_arch_ops->vcpu_put(vcpu);
245 mutex_unlock(&vcpu->mutex);
248 static void ack_flush(void *_completed)
250 atomic_t *completed = _completed;
252 atomic_inc(completed);
255 void kvm_flush_remote_tlbs(struct kvm *kvm)
259 struct kvm_vcpu *vcpu;
262 atomic_set(&completed, 0);
265 for (i = 0; i < kvm->nvcpus; ++i) {
266 vcpu = &kvm->vcpus[i];
267 if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests))
270 if (cpu != -1 && cpu != raw_smp_processor_id())
271 if (!cpu_isset(cpu, cpus)) {
278 * We really want smp_call_function_mask() here. But that's not
279 * available, so ipi all cpus in parallel and wait for them
282 for (cpu = first_cpu(cpus); cpu != NR_CPUS; cpu = next_cpu(cpu, cpus))
283 smp_call_function_single(cpu, ack_flush, &completed, 1, 0);
284 while (atomic_read(&completed) != needed) {
290 static struct kvm *kvm_create_vm(void)
292 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
296 return ERR_PTR(-ENOMEM);
298 kvm_io_bus_init(&kvm->pio_bus);
299 spin_lock_init(&kvm->lock);
300 INIT_LIST_HEAD(&kvm->active_mmu_pages);
301 kvm_io_bus_init(&kvm->mmio_bus);
302 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
303 struct kvm_vcpu *vcpu = &kvm->vcpus[i];
305 mutex_init(&vcpu->mutex);
308 vcpu->mmu.root_hpa = INVALID_PAGE;
310 spin_lock(&kvm_lock);
311 list_add(&kvm->vm_list, &vm_list);
312 spin_unlock(&kvm_lock);
316 static int kvm_dev_open(struct inode *inode, struct file *filp)
322 * Free any memory in @free but not in @dont.
324 static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
325 struct kvm_memory_slot *dont)
329 if (!dont || free->phys_mem != dont->phys_mem)
330 if (free->phys_mem) {
331 for (i = 0; i < free->npages; ++i)
332 if (free->phys_mem[i])
333 __free_page(free->phys_mem[i]);
334 vfree(free->phys_mem);
337 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
338 vfree(free->dirty_bitmap);
340 free->phys_mem = NULL;
342 free->dirty_bitmap = NULL;
345 static void kvm_free_physmem(struct kvm *kvm)
349 for (i = 0; i < kvm->nmemslots; ++i)
350 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
353 static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
357 for (i = 0; i < 2; ++i)
358 if (vcpu->pio.guest_pages[i]) {
359 __free_page(vcpu->pio.guest_pages[i]);
360 vcpu->pio.guest_pages[i] = NULL;
364 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
370 kvm_mmu_unload(vcpu);
374 static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
380 kvm_mmu_destroy(vcpu);
382 kvm_arch_ops->vcpu_free(vcpu);
383 free_page((unsigned long)vcpu->run);
385 free_page((unsigned long)vcpu->pio_data);
386 vcpu->pio_data = NULL;
387 free_pio_guest_pages(vcpu);
390 static void kvm_free_vcpus(struct kvm *kvm)
395 * Unpin any mmu pages first.
397 for (i = 0; i < KVM_MAX_VCPUS; ++i)
398 kvm_unload_vcpu_mmu(&kvm->vcpus[i]);
399 for (i = 0; i < KVM_MAX_VCPUS; ++i)
400 kvm_free_vcpu(&kvm->vcpus[i]);
403 static int kvm_dev_release(struct inode *inode, struct file *filp)
408 static void kvm_destroy_vm(struct kvm *kvm)
410 spin_lock(&kvm_lock);
411 list_del(&kvm->vm_list);
412 spin_unlock(&kvm_lock);
413 kvm_io_bus_destroy(&kvm->pio_bus);
414 kvm_io_bus_destroy(&kvm->mmio_bus);
416 kvm_free_physmem(kvm);
420 static int kvm_vm_release(struct inode *inode, struct file *filp)
422 struct kvm *kvm = filp->private_data;
428 static void inject_gp(struct kvm_vcpu *vcpu)
430 kvm_arch_ops->inject_gp(vcpu, 0);
434 * Load the pae pdptrs. Return true is they are all valid.
436 static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
438 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
439 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
446 spin_lock(&vcpu->kvm->lock);
447 page = gfn_to_page(vcpu->kvm, pdpt_gfn);
448 /* FIXME: !page - emulate? 0xff? */
449 pdpt = kmap_atomic(page, KM_USER0);
452 for (i = 0; i < 4; ++i) {
453 pdpte = pdpt[offset + i];
454 if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull)) {
460 for (i = 0; i < 4; ++i)
461 vcpu->pdptrs[i] = pdpt[offset + i];
464 kunmap_atomic(pdpt, KM_USER0);
465 spin_unlock(&vcpu->kvm->lock);
470 void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
472 if (cr0 & CR0_RESERVED_BITS) {
473 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
479 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
480 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
485 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
486 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
487 "and a clear PE flag\n");
492 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
494 if ((vcpu->shadow_efer & EFER_LME)) {
498 printk(KERN_DEBUG "set_cr0: #GP, start paging "
499 "in long mode while PAE is disabled\n");
503 kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
505 printk(KERN_DEBUG "set_cr0: #GP, start paging "
506 "in long mode while CS.L == 1\n");
513 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
514 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
522 kvm_arch_ops->set_cr0(vcpu, cr0);
525 spin_lock(&vcpu->kvm->lock);
526 kvm_mmu_reset_context(vcpu);
527 spin_unlock(&vcpu->kvm->lock);
530 EXPORT_SYMBOL_GPL(set_cr0);
532 void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
534 set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
536 EXPORT_SYMBOL_GPL(lmsw);
538 void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
540 if (cr4 & CR4_RESEVED_BITS) {
541 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
546 if (is_long_mode(vcpu)) {
547 if (!(cr4 & CR4_PAE_MASK)) {
548 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
553 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK)
554 && !load_pdptrs(vcpu, vcpu->cr3)) {
555 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
559 if (cr4 & CR4_VMXE_MASK) {
560 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
564 kvm_arch_ops->set_cr4(vcpu, cr4);
565 spin_lock(&vcpu->kvm->lock);
566 kvm_mmu_reset_context(vcpu);
567 spin_unlock(&vcpu->kvm->lock);
569 EXPORT_SYMBOL_GPL(set_cr4);
571 void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
573 if (is_long_mode(vcpu)) {
574 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
575 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
581 if (cr3 & CR3_PAE_RESERVED_BITS) {
583 "set_cr3: #GP, reserved bits\n");
587 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
588 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
594 if (cr3 & CR3_NONPAE_RESERVED_BITS) {
596 "set_cr3: #GP, reserved bits\n");
604 spin_lock(&vcpu->kvm->lock);
606 * Does the new cr3 value map to physical memory? (Note, we
607 * catch an invalid cr3 even in real-mode, because it would
608 * cause trouble later on when we turn on paging anyway.)
610 * A real CPU would silently accept an invalid cr3 and would
611 * attempt to use it - with largely undefined (and often hard
612 * to debug) behavior on the guest side.
614 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
617 vcpu->mmu.new_cr3(vcpu);
618 spin_unlock(&vcpu->kvm->lock);
620 EXPORT_SYMBOL_GPL(set_cr3);
622 void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
624 if ( cr8 & CR8_RESEVED_BITS) {
625 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
631 EXPORT_SYMBOL_GPL(set_cr8);
633 void fx_init(struct kvm_vcpu *vcpu)
635 struct __attribute__ ((__packed__)) fx_image_s {
641 u64 operand;// fpu dp
647 fx_save(vcpu->host_fx_image);
649 fx_save(vcpu->guest_fx_image);
650 fx_restore(vcpu->host_fx_image);
652 fx_image = (struct fx_image_s *)vcpu->guest_fx_image;
653 fx_image->mxcsr = 0x1f80;
654 memset(vcpu->guest_fx_image + sizeof(struct fx_image_s),
655 0, FX_IMAGE_SIZE - sizeof(struct fx_image_s));
657 EXPORT_SYMBOL_GPL(fx_init);
660 * Allocate some memory and give it an address in the guest physical address
663 * Discontiguous memory is allowed, mostly for framebuffers.
665 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
666 struct kvm_memory_region *mem)
670 unsigned long npages;
672 struct kvm_memory_slot *memslot;
673 struct kvm_memory_slot old, new;
674 int memory_config_version;
677 /* General sanity checks */
678 if (mem->memory_size & (PAGE_SIZE - 1))
680 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
682 if (mem->slot >= KVM_MEMORY_SLOTS)
684 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
687 memslot = &kvm->memslots[mem->slot];
688 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
689 npages = mem->memory_size >> PAGE_SHIFT;
692 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
695 spin_lock(&kvm->lock);
697 memory_config_version = kvm->memory_config_version;
698 new = old = *memslot;
700 new.base_gfn = base_gfn;
702 new.flags = mem->flags;
704 /* Disallow changing a memory slot's size. */
706 if (npages && old.npages && npages != old.npages)
709 /* Check for overlaps */
711 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
712 struct kvm_memory_slot *s = &kvm->memslots[i];
716 if (!((base_gfn + npages <= s->base_gfn) ||
717 (base_gfn >= s->base_gfn + s->npages)))
721 * Do memory allocations outside lock. memory_config_version will
724 spin_unlock(&kvm->lock);
726 /* Deallocate if slot is being removed */
730 /* Free page dirty bitmap if unneeded */
731 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
732 new.dirty_bitmap = NULL;
736 /* Allocate if a slot is being created */
737 if (npages && !new.phys_mem) {
738 new.phys_mem = vmalloc(npages * sizeof(struct page *));
743 memset(new.phys_mem, 0, npages * sizeof(struct page *));
744 for (i = 0; i < npages; ++i) {
745 new.phys_mem[i] = alloc_page(GFP_HIGHUSER
747 if (!new.phys_mem[i])
749 set_page_private(new.phys_mem[i],0);
753 /* Allocate page dirty bitmap if needed */
754 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
755 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
757 new.dirty_bitmap = vmalloc(dirty_bytes);
758 if (!new.dirty_bitmap)
760 memset(new.dirty_bitmap, 0, dirty_bytes);
763 spin_lock(&kvm->lock);
765 if (memory_config_version != kvm->memory_config_version) {
766 spin_unlock(&kvm->lock);
767 kvm_free_physmem_slot(&new, &old);
775 if (mem->slot >= kvm->nmemslots)
776 kvm->nmemslots = mem->slot + 1;
779 ++kvm->memory_config_version;
781 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
782 kvm_flush_remote_tlbs(kvm);
784 spin_unlock(&kvm->lock);
786 kvm_free_physmem_slot(&old, &new);
790 spin_unlock(&kvm->lock);
792 kvm_free_physmem_slot(&new, &old);
798 * Get (and clear) the dirty memory log for a memory slot.
800 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
801 struct kvm_dirty_log *log)
803 struct kvm_memory_slot *memslot;
806 unsigned long any = 0;
808 spin_lock(&kvm->lock);
811 * Prevent changes to guest memory configuration even while the lock
815 spin_unlock(&kvm->lock);
817 if (log->slot >= KVM_MEMORY_SLOTS)
820 memslot = &kvm->memslots[log->slot];
822 if (!memslot->dirty_bitmap)
825 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
827 for (i = 0; !any && i < n/sizeof(long); ++i)
828 any = memslot->dirty_bitmap[i];
831 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
834 spin_lock(&kvm->lock);
835 kvm_mmu_slot_remove_write_access(kvm, log->slot);
836 kvm_flush_remote_tlbs(kvm);
837 memset(memslot->dirty_bitmap, 0, n);
838 spin_unlock(&kvm->lock);
843 spin_lock(&kvm->lock);
845 spin_unlock(&kvm->lock);
850 * Set a new alias region. Aliases map a portion of physical memory into
851 * another portion. This is useful for memory windows, for example the PC
854 static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
855 struct kvm_memory_alias *alias)
858 struct kvm_mem_alias *p;
861 /* General sanity checks */
862 if (alias->memory_size & (PAGE_SIZE - 1))
864 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
866 if (alias->slot >= KVM_ALIAS_SLOTS)
868 if (alias->guest_phys_addr + alias->memory_size
869 < alias->guest_phys_addr)
871 if (alias->target_phys_addr + alias->memory_size
872 < alias->target_phys_addr)
875 spin_lock(&kvm->lock);
877 p = &kvm->aliases[alias->slot];
878 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
879 p->npages = alias->memory_size >> PAGE_SHIFT;
880 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
882 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
883 if (kvm->aliases[n - 1].npages)
887 kvm_mmu_zap_all(kvm);
889 spin_unlock(&kvm->lock);
897 static gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
900 struct kvm_mem_alias *alias;
902 for (i = 0; i < kvm->naliases; ++i) {
903 alias = &kvm->aliases[i];
904 if (gfn >= alias->base_gfn
905 && gfn < alias->base_gfn + alias->npages)
906 return alias->target_gfn + gfn - alias->base_gfn;
911 static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
915 for (i = 0; i < kvm->nmemslots; ++i) {
916 struct kvm_memory_slot *memslot = &kvm->memslots[i];
918 if (gfn >= memslot->base_gfn
919 && gfn < memslot->base_gfn + memslot->npages)
925 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
927 gfn = unalias_gfn(kvm, gfn);
928 return __gfn_to_memslot(kvm, gfn);
931 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
933 struct kvm_memory_slot *slot;
935 gfn = unalias_gfn(kvm, gfn);
936 slot = __gfn_to_memslot(kvm, gfn);
939 return slot->phys_mem[gfn - slot->base_gfn];
941 EXPORT_SYMBOL_GPL(gfn_to_page);
943 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
946 struct kvm_memory_slot *memslot;
947 unsigned long rel_gfn;
949 for (i = 0; i < kvm->nmemslots; ++i) {
950 memslot = &kvm->memslots[i];
952 if (gfn >= memslot->base_gfn
953 && gfn < memslot->base_gfn + memslot->npages) {
955 if (!memslot->dirty_bitmap)
958 rel_gfn = gfn - memslot->base_gfn;
961 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
962 set_bit(rel_gfn, memslot->dirty_bitmap);
968 static int emulator_read_std(unsigned long addr,
971 struct x86_emulate_ctxt *ctxt)
973 struct kvm_vcpu *vcpu = ctxt->vcpu;
977 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
978 unsigned offset = addr & (PAGE_SIZE-1);
979 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
984 if (gpa == UNMAPPED_GVA)
985 return X86EMUL_PROPAGATE_FAULT;
986 pfn = gpa >> PAGE_SHIFT;
987 page = gfn_to_page(vcpu->kvm, pfn);
989 return X86EMUL_UNHANDLEABLE;
990 page_virt = kmap_atomic(page, KM_USER0);
992 memcpy(data, page_virt + offset, tocopy);
994 kunmap_atomic(page_virt, KM_USER0);
1001 return X86EMUL_CONTINUE;
1004 static int emulator_write_std(unsigned long addr,
1007 struct x86_emulate_ctxt *ctxt)
1009 printk(KERN_ERR "emulator_write_std: addr %lx n %d\n",
1011 return X86EMUL_UNHANDLEABLE;
1014 static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
1018 * Note that its important to have this wrapper function because
1019 * in the very near future we will be checking for MMIOs against
1020 * the LAPIC as well as the general MMIO bus
1022 return kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
1025 static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
1028 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
1031 static int emulator_read_emulated(unsigned long addr,
1034 struct x86_emulate_ctxt *ctxt)
1036 struct kvm_vcpu *vcpu = ctxt->vcpu;
1037 struct kvm_io_device *mmio_dev;
1040 if (vcpu->mmio_read_completed) {
1041 memcpy(val, vcpu->mmio_data, bytes);
1042 vcpu->mmio_read_completed = 0;
1043 return X86EMUL_CONTINUE;
1044 } else if (emulator_read_std(addr, val, bytes, ctxt)
1045 == X86EMUL_CONTINUE)
1046 return X86EMUL_CONTINUE;
1048 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1049 if (gpa == UNMAPPED_GVA)
1050 return X86EMUL_PROPAGATE_FAULT;
1053 * Is this MMIO handled locally?
1055 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1057 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
1058 return X86EMUL_CONTINUE;
1061 vcpu->mmio_needed = 1;
1062 vcpu->mmio_phys_addr = gpa;
1063 vcpu->mmio_size = bytes;
1064 vcpu->mmio_is_write = 0;
1066 return X86EMUL_UNHANDLEABLE;
1069 static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1070 const void *val, int bytes)
1074 unsigned offset = offset_in_page(gpa);
1076 if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
1078 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1081 mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
1082 virt = kmap_atomic(page, KM_USER0);
1083 kvm_mmu_pte_write(vcpu, gpa, virt + offset, val, bytes);
1084 memcpy(virt + offset_in_page(gpa), val, bytes);
1085 kunmap_atomic(virt, KM_USER0);
1089 static int emulator_write_emulated_onepage(unsigned long addr,
1092 struct x86_emulate_ctxt *ctxt)
1094 struct kvm_vcpu *vcpu = ctxt->vcpu;
1095 struct kvm_io_device *mmio_dev;
1096 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1098 if (gpa == UNMAPPED_GVA) {
1099 kvm_arch_ops->inject_page_fault(vcpu, addr, 2);
1100 return X86EMUL_PROPAGATE_FAULT;
1103 if (emulator_write_phys(vcpu, gpa, val, bytes))
1104 return X86EMUL_CONTINUE;
1107 * Is this MMIO handled locally?
1109 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1111 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
1112 return X86EMUL_CONTINUE;
1115 vcpu->mmio_needed = 1;
1116 vcpu->mmio_phys_addr = gpa;
1117 vcpu->mmio_size = bytes;
1118 vcpu->mmio_is_write = 1;
1119 memcpy(vcpu->mmio_data, val, bytes);
1121 return X86EMUL_CONTINUE;
1124 static int emulator_write_emulated(unsigned long addr,
1127 struct x86_emulate_ctxt *ctxt)
1129 /* Crossing a page boundary? */
1130 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
1133 now = -addr & ~PAGE_MASK;
1134 rc = emulator_write_emulated_onepage(addr, val, now, ctxt);
1135 if (rc != X86EMUL_CONTINUE)
1141 return emulator_write_emulated_onepage(addr, val, bytes, ctxt);
1144 static int emulator_cmpxchg_emulated(unsigned long addr,
1148 struct x86_emulate_ctxt *ctxt)
1150 static int reported;
1154 printk(KERN_WARNING "kvm: emulating exchange as write\n");
1156 return emulator_write_emulated(addr, new, bytes, ctxt);
1159 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
1161 return kvm_arch_ops->get_segment_base(vcpu, seg);
1164 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
1166 return X86EMUL_CONTINUE;
1169 int emulate_clts(struct kvm_vcpu *vcpu)
1173 cr0 = vcpu->cr0 & ~X86_CR0_TS;
1174 kvm_arch_ops->set_cr0(vcpu, cr0);
1175 return X86EMUL_CONTINUE;
1178 int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, unsigned long *dest)
1180 struct kvm_vcpu *vcpu = ctxt->vcpu;
1184 *dest = kvm_arch_ops->get_dr(vcpu, dr);
1185 return X86EMUL_CONTINUE;
1187 printk(KERN_DEBUG "%s: unexpected dr %u\n",
1189 return X86EMUL_UNHANDLEABLE;
1193 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
1195 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
1198 kvm_arch_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
1200 /* FIXME: better handling */
1201 return X86EMUL_UNHANDLEABLE;
1203 return X86EMUL_CONTINUE;
1206 static void report_emulation_failure(struct x86_emulate_ctxt *ctxt)
1208 static int reported;
1210 unsigned long rip = ctxt->vcpu->rip;
1211 unsigned long rip_linear;
1213 rip_linear = rip + get_segment_base(ctxt->vcpu, VCPU_SREG_CS);
1218 emulator_read_std(rip_linear, (void *)opcodes, 4, ctxt);
1220 printk(KERN_ERR "emulation failed but !mmio_needed?"
1221 " rip %lx %02x %02x %02x %02x\n",
1222 rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
1226 struct x86_emulate_ops emulate_ops = {
1227 .read_std = emulator_read_std,
1228 .write_std = emulator_write_std,
1229 .read_emulated = emulator_read_emulated,
1230 .write_emulated = emulator_write_emulated,
1231 .cmpxchg_emulated = emulator_cmpxchg_emulated,
1234 int emulate_instruction(struct kvm_vcpu *vcpu,
1235 struct kvm_run *run,
1239 struct x86_emulate_ctxt emulate_ctxt;
1243 vcpu->mmio_fault_cr2 = cr2;
1244 kvm_arch_ops->cache_regs(vcpu);
1246 kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1248 emulate_ctxt.vcpu = vcpu;
1249 emulate_ctxt.eflags = kvm_arch_ops->get_rflags(vcpu);
1250 emulate_ctxt.cr2 = cr2;
1251 emulate_ctxt.mode = (emulate_ctxt.eflags & X86_EFLAGS_VM)
1252 ? X86EMUL_MODE_REAL : cs_l
1253 ? X86EMUL_MODE_PROT64 : cs_db
1254 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
1256 if (emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
1257 emulate_ctxt.cs_base = 0;
1258 emulate_ctxt.ds_base = 0;
1259 emulate_ctxt.es_base = 0;
1260 emulate_ctxt.ss_base = 0;
1262 emulate_ctxt.cs_base = get_segment_base(vcpu, VCPU_SREG_CS);
1263 emulate_ctxt.ds_base = get_segment_base(vcpu, VCPU_SREG_DS);
1264 emulate_ctxt.es_base = get_segment_base(vcpu, VCPU_SREG_ES);
1265 emulate_ctxt.ss_base = get_segment_base(vcpu, VCPU_SREG_SS);
1268 emulate_ctxt.gs_base = get_segment_base(vcpu, VCPU_SREG_GS);
1269 emulate_ctxt.fs_base = get_segment_base(vcpu, VCPU_SREG_FS);
1271 vcpu->mmio_is_write = 0;
1272 r = x86_emulate_memop(&emulate_ctxt, &emulate_ops);
1274 if ((r || vcpu->mmio_is_write) && run) {
1275 run->mmio.phys_addr = vcpu->mmio_phys_addr;
1276 memcpy(run->mmio.data, vcpu->mmio_data, 8);
1277 run->mmio.len = vcpu->mmio_size;
1278 run->mmio.is_write = vcpu->mmio_is_write;
1282 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1283 return EMULATE_DONE;
1284 if (!vcpu->mmio_needed) {
1285 report_emulation_failure(&emulate_ctxt);
1286 return EMULATE_FAIL;
1288 return EMULATE_DO_MMIO;
1291 kvm_arch_ops->decache_regs(vcpu);
1292 kvm_arch_ops->set_rflags(vcpu, emulate_ctxt.eflags);
1294 if (vcpu->mmio_is_write) {
1295 vcpu->mmio_needed = 0;
1296 return EMULATE_DO_MMIO;
1299 return EMULATE_DONE;
1301 EXPORT_SYMBOL_GPL(emulate_instruction);
1303 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
1305 if (vcpu->irq_summary)
1308 vcpu->run->exit_reason = KVM_EXIT_HLT;
1309 ++vcpu->stat.halt_exits;
1312 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
1314 int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run)
1316 unsigned long nr, a0, a1, a2, a3, a4, a5, ret;
1318 kvm_arch_ops->cache_regs(vcpu);
1320 #ifdef CONFIG_X86_64
1321 if (is_long_mode(vcpu)) {
1322 nr = vcpu->regs[VCPU_REGS_RAX];
1323 a0 = vcpu->regs[VCPU_REGS_RDI];
1324 a1 = vcpu->regs[VCPU_REGS_RSI];
1325 a2 = vcpu->regs[VCPU_REGS_RDX];
1326 a3 = vcpu->regs[VCPU_REGS_RCX];
1327 a4 = vcpu->regs[VCPU_REGS_R8];
1328 a5 = vcpu->regs[VCPU_REGS_R9];
1332 nr = vcpu->regs[VCPU_REGS_RBX] & -1u;
1333 a0 = vcpu->regs[VCPU_REGS_RAX] & -1u;
1334 a1 = vcpu->regs[VCPU_REGS_RCX] & -1u;
1335 a2 = vcpu->regs[VCPU_REGS_RDX] & -1u;
1336 a3 = vcpu->regs[VCPU_REGS_RSI] & -1u;
1337 a4 = vcpu->regs[VCPU_REGS_RDI] & -1u;
1338 a5 = vcpu->regs[VCPU_REGS_RBP] & -1u;
1342 run->hypercall.args[0] = a0;
1343 run->hypercall.args[1] = a1;
1344 run->hypercall.args[2] = a2;
1345 run->hypercall.args[3] = a3;
1346 run->hypercall.args[4] = a4;
1347 run->hypercall.args[5] = a5;
1348 run->hypercall.ret = ret;
1349 run->hypercall.longmode = is_long_mode(vcpu);
1350 kvm_arch_ops->decache_regs(vcpu);
1353 vcpu->regs[VCPU_REGS_RAX] = ret;
1354 kvm_arch_ops->decache_regs(vcpu);
1357 EXPORT_SYMBOL_GPL(kvm_hypercall);
1359 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
1361 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
1364 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1366 struct descriptor_table dt = { limit, base };
1368 kvm_arch_ops->set_gdt(vcpu, &dt);
1371 void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1373 struct descriptor_table dt = { limit, base };
1375 kvm_arch_ops->set_idt(vcpu, &dt);
1378 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
1379 unsigned long *rflags)
1382 *rflags = kvm_arch_ops->get_rflags(vcpu);
1385 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
1387 kvm_arch_ops->decache_cr4_guest_bits(vcpu);
1398 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1403 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
1404 unsigned long *rflags)
1408 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
1409 *rflags = kvm_arch_ops->get_rflags(vcpu);
1418 set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
1421 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1426 * Register the para guest with the host:
1428 static int vcpu_register_para(struct kvm_vcpu *vcpu, gpa_t para_state_gpa)
1430 struct kvm_vcpu_para_state *para_state;
1431 hpa_t para_state_hpa, hypercall_hpa;
1432 struct page *para_state_page;
1433 unsigned char *hypercall;
1434 gpa_t hypercall_gpa;
1436 printk(KERN_DEBUG "kvm: guest trying to enter paravirtual mode\n");
1437 printk(KERN_DEBUG ".... para_state_gpa: %08Lx\n", para_state_gpa);
1440 * Needs to be page aligned:
1442 if (para_state_gpa != PAGE_ALIGN(para_state_gpa))
1445 para_state_hpa = gpa_to_hpa(vcpu, para_state_gpa);
1446 printk(KERN_DEBUG ".... para_state_hpa: %08Lx\n", para_state_hpa);
1447 if (is_error_hpa(para_state_hpa))
1450 mark_page_dirty(vcpu->kvm, para_state_gpa >> PAGE_SHIFT);
1451 para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT);
1452 para_state = kmap_atomic(para_state_page, KM_USER0);
1454 printk(KERN_DEBUG ".... guest version: %d\n", para_state->guest_version);
1455 printk(KERN_DEBUG ".... size: %d\n", para_state->size);
1457 para_state->host_version = KVM_PARA_API_VERSION;
1459 * We cannot support guests that try to register themselves
1460 * with a newer API version than the host supports:
1462 if (para_state->guest_version > KVM_PARA_API_VERSION) {
1463 para_state->ret = -KVM_EINVAL;
1464 goto err_kunmap_skip;
1467 hypercall_gpa = para_state->hypercall_gpa;
1468 hypercall_hpa = gpa_to_hpa(vcpu, hypercall_gpa);
1469 printk(KERN_DEBUG ".... hypercall_hpa: %08Lx\n", hypercall_hpa);
1470 if (is_error_hpa(hypercall_hpa)) {
1471 para_state->ret = -KVM_EINVAL;
1472 goto err_kunmap_skip;
1475 printk(KERN_DEBUG "kvm: para guest successfully registered.\n");
1476 vcpu->para_state_page = para_state_page;
1477 vcpu->para_state_gpa = para_state_gpa;
1478 vcpu->hypercall_gpa = hypercall_gpa;
1480 mark_page_dirty(vcpu->kvm, hypercall_gpa >> PAGE_SHIFT);
1481 hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT),
1482 KM_USER1) + (hypercall_hpa & ~PAGE_MASK);
1483 kvm_arch_ops->patch_hypercall(vcpu, hypercall);
1484 kunmap_atomic(hypercall, KM_USER1);
1486 para_state->ret = 0;
1488 kunmap_atomic(para_state, KM_USER0);
1494 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1499 case 0xc0010010: /* SYSCFG */
1500 case 0xc0010015: /* HWCR */
1501 case MSR_IA32_PLATFORM_ID:
1502 case MSR_IA32_P5_MC_ADDR:
1503 case MSR_IA32_P5_MC_TYPE:
1504 case MSR_IA32_MC0_CTL:
1505 case MSR_IA32_MCG_STATUS:
1506 case MSR_IA32_MCG_CAP:
1507 case MSR_IA32_MC0_MISC:
1508 case MSR_IA32_MC0_MISC+4:
1509 case MSR_IA32_MC0_MISC+8:
1510 case MSR_IA32_MC0_MISC+12:
1511 case MSR_IA32_MC0_MISC+16:
1512 case MSR_IA32_UCODE_REV:
1513 case MSR_IA32_PERF_STATUS:
1514 case MSR_IA32_EBL_CR_POWERON:
1515 /* MTRR registers */
1517 case 0x200 ... 0x2ff:
1520 case 0xcd: /* fsb frequency */
1523 case MSR_IA32_APICBASE:
1524 data = vcpu->apic_base;
1526 case MSR_IA32_MISC_ENABLE:
1527 data = vcpu->ia32_misc_enable_msr;
1529 #ifdef CONFIG_X86_64
1531 data = vcpu->shadow_efer;
1535 printk(KERN_ERR "kvm: unhandled rdmsr: 0x%x\n", msr);
1541 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1544 * Reads an msr value (of 'msr_index') into 'pdata'.
1545 * Returns 0 on success, non-0 otherwise.
1546 * Assumes vcpu_load() was already called.
1548 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1550 return kvm_arch_ops->get_msr(vcpu, msr_index, pdata);
1553 #ifdef CONFIG_X86_64
1555 static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
1557 if (efer & EFER_RESERVED_BITS) {
1558 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
1565 && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
1566 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
1571 kvm_arch_ops->set_efer(vcpu, efer);
1574 efer |= vcpu->shadow_efer & EFER_LMA;
1576 vcpu->shadow_efer = efer;
1581 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1584 #ifdef CONFIG_X86_64
1586 set_efer(vcpu, data);
1589 case MSR_IA32_MC0_STATUS:
1590 printk(KERN_WARNING "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
1591 __FUNCTION__, data);
1593 case MSR_IA32_MCG_STATUS:
1594 printk(KERN_WARNING "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
1595 __FUNCTION__, data);
1597 case MSR_IA32_UCODE_REV:
1598 case MSR_IA32_UCODE_WRITE:
1599 case 0x200 ... 0x2ff: /* MTRRs */
1601 case MSR_IA32_APICBASE:
1602 vcpu->apic_base = data;
1604 case MSR_IA32_MISC_ENABLE:
1605 vcpu->ia32_misc_enable_msr = data;
1608 * This is the 'probe whether the host is KVM' logic:
1610 case MSR_KVM_API_MAGIC:
1611 return vcpu_register_para(vcpu, data);
1614 printk(KERN_ERR "kvm: unhandled wrmsr: 0x%x\n", msr);
1619 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1622 * Writes msr value into into the appropriate "register".
1623 * Returns 0 on success, non-0 otherwise.
1624 * Assumes vcpu_load() was already called.
1626 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1628 return kvm_arch_ops->set_msr(vcpu, msr_index, data);
1631 void kvm_resched(struct kvm_vcpu *vcpu)
1633 if (!need_resched())
1639 EXPORT_SYMBOL_GPL(kvm_resched);
1641 void load_msrs(struct vmx_msr_entry *e, int n)
1645 for (i = 0; i < n; ++i)
1646 wrmsrl(e[i].index, e[i].data);
1648 EXPORT_SYMBOL_GPL(load_msrs);
1650 void save_msrs(struct vmx_msr_entry *e, int n)
1654 for (i = 0; i < n; ++i)
1655 rdmsrl(e[i].index, e[i].data);
1657 EXPORT_SYMBOL_GPL(save_msrs);
1659 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1663 struct kvm_cpuid_entry *e, *best;
1665 kvm_arch_ops->cache_regs(vcpu);
1666 function = vcpu->regs[VCPU_REGS_RAX];
1667 vcpu->regs[VCPU_REGS_RAX] = 0;
1668 vcpu->regs[VCPU_REGS_RBX] = 0;
1669 vcpu->regs[VCPU_REGS_RCX] = 0;
1670 vcpu->regs[VCPU_REGS_RDX] = 0;
1672 for (i = 0; i < vcpu->cpuid_nent; ++i) {
1673 e = &vcpu->cpuid_entries[i];
1674 if (e->function == function) {
1679 * Both basic or both extended?
1681 if (((e->function ^ function) & 0x80000000) == 0)
1682 if (!best || e->function > best->function)
1686 vcpu->regs[VCPU_REGS_RAX] = best->eax;
1687 vcpu->regs[VCPU_REGS_RBX] = best->ebx;
1688 vcpu->regs[VCPU_REGS_RCX] = best->ecx;
1689 vcpu->regs[VCPU_REGS_RDX] = best->edx;
1691 kvm_arch_ops->decache_regs(vcpu);
1692 kvm_arch_ops->skip_emulated_instruction(vcpu);
1694 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
1696 static int pio_copy_data(struct kvm_vcpu *vcpu)
1698 void *p = vcpu->pio_data;
1701 int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1;
1703 kvm_arch_ops->vcpu_put(vcpu);
1704 q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
1707 kvm_arch_ops->vcpu_load(vcpu);
1708 free_pio_guest_pages(vcpu);
1711 q += vcpu->pio.guest_page_offset;
1712 bytes = vcpu->pio.size * vcpu->pio.cur_count;
1714 memcpy(q, p, bytes);
1716 memcpy(p, q, bytes);
1717 q -= vcpu->pio.guest_page_offset;
1719 kvm_arch_ops->vcpu_load(vcpu);
1720 free_pio_guest_pages(vcpu);
1724 static int complete_pio(struct kvm_vcpu *vcpu)
1726 struct kvm_pio_request *io = &vcpu->pio;
1730 kvm_arch_ops->cache_regs(vcpu);
1734 memcpy(&vcpu->regs[VCPU_REGS_RAX], vcpu->pio_data,
1738 r = pio_copy_data(vcpu);
1740 kvm_arch_ops->cache_regs(vcpu);
1747 delta *= io->cur_count;
1749 * The size of the register should really depend on
1750 * current address size.
1752 vcpu->regs[VCPU_REGS_RCX] -= delta;
1758 vcpu->regs[VCPU_REGS_RDI] += delta;
1760 vcpu->regs[VCPU_REGS_RSI] += delta;
1763 kvm_arch_ops->decache_regs(vcpu);
1765 io->count -= io->cur_count;
1769 kvm_arch_ops->skip_emulated_instruction(vcpu);
1773 static void kernel_pio(struct kvm_io_device *pio_dev,
1774 struct kvm_vcpu *vcpu,
1777 /* TODO: String I/O for in kernel device */
1780 kvm_iodevice_read(pio_dev, vcpu->pio.port,
1784 kvm_iodevice_write(pio_dev, vcpu->pio.port,
1789 static void pio_string_write(struct kvm_io_device *pio_dev,
1790 struct kvm_vcpu *vcpu)
1792 struct kvm_pio_request *io = &vcpu->pio;
1793 void *pd = vcpu->pio_data;
1796 for (i = 0; i < io->cur_count; i++) {
1797 kvm_iodevice_write(pio_dev, io->port,
1804 int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1805 int size, unsigned long count, int string, int down,
1806 gva_t address, int rep, unsigned port)
1808 unsigned now, in_page;
1812 struct kvm_io_device *pio_dev;
1814 vcpu->run->exit_reason = KVM_EXIT_IO;
1815 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1816 vcpu->run->io.size = size;
1817 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
1818 vcpu->run->io.count = count;
1819 vcpu->run->io.port = port;
1820 vcpu->pio.count = count;
1821 vcpu->pio.cur_count = count;
1822 vcpu->pio.size = size;
1824 vcpu->pio.port = port;
1825 vcpu->pio.string = string;
1826 vcpu->pio.down = down;
1827 vcpu->pio.guest_page_offset = offset_in_page(address);
1828 vcpu->pio.rep = rep;
1830 pio_dev = vcpu_find_pio_dev(vcpu, port);
1832 kvm_arch_ops->cache_regs(vcpu);
1833 memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
1834 kvm_arch_ops->decache_regs(vcpu);
1836 kernel_pio(pio_dev, vcpu, vcpu->pio_data);
1844 kvm_arch_ops->skip_emulated_instruction(vcpu);
1848 now = min(count, PAGE_SIZE / size);
1851 in_page = PAGE_SIZE - offset_in_page(address);
1853 in_page = offset_in_page(address) + size;
1854 now = min(count, (unsigned long)in_page / size);
1857 * String I/O straddles page boundary. Pin two guest pages
1858 * so that we satisfy atomicity constraints. Do just one
1859 * transaction to avoid complexity.
1866 * String I/O in reverse. Yuck. Kill the guest, fix later.
1868 printk(KERN_ERR "kvm: guest string pio down\n");
1872 vcpu->run->io.count = now;
1873 vcpu->pio.cur_count = now;
1875 for (i = 0; i < nr_pages; ++i) {
1876 spin_lock(&vcpu->kvm->lock);
1877 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
1880 vcpu->pio.guest_pages[i] = page;
1881 spin_unlock(&vcpu->kvm->lock);
1884 free_pio_guest_pages(vcpu);
1889 if (!vcpu->pio.in) {
1890 /* string PIO write */
1891 ret = pio_copy_data(vcpu);
1892 if (ret >= 0 && pio_dev) {
1893 pio_string_write(pio_dev, vcpu);
1895 if (vcpu->pio.count == 0)
1899 printk(KERN_ERR "no string pio read support yet, "
1900 "port %x size %d count %ld\n",
1905 EXPORT_SYMBOL_GPL(kvm_setup_pio);
1907 static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1914 if (vcpu->sigset_active)
1915 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1917 /* re-sync apic's tpr */
1918 vcpu->cr8 = kvm_run->cr8;
1920 if (vcpu->pio.cur_count) {
1921 r = complete_pio(vcpu);
1926 if (vcpu->mmio_needed) {
1927 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
1928 vcpu->mmio_read_completed = 1;
1929 vcpu->mmio_needed = 0;
1930 r = emulate_instruction(vcpu, kvm_run,
1931 vcpu->mmio_fault_cr2, 0);
1932 if (r == EMULATE_DO_MMIO) {
1934 * Read-modify-write. Back to userspace.
1936 kvm_run->exit_reason = KVM_EXIT_MMIO;
1942 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
1943 kvm_arch_ops->cache_regs(vcpu);
1944 vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
1945 kvm_arch_ops->decache_regs(vcpu);
1948 r = kvm_arch_ops->run(vcpu, kvm_run);
1951 if (vcpu->sigset_active)
1952 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1958 static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
1959 struct kvm_regs *regs)
1963 kvm_arch_ops->cache_regs(vcpu);
1965 regs->rax = vcpu->regs[VCPU_REGS_RAX];
1966 regs->rbx = vcpu->regs[VCPU_REGS_RBX];
1967 regs->rcx = vcpu->regs[VCPU_REGS_RCX];
1968 regs->rdx = vcpu->regs[VCPU_REGS_RDX];
1969 regs->rsi = vcpu->regs[VCPU_REGS_RSI];
1970 regs->rdi = vcpu->regs[VCPU_REGS_RDI];
1971 regs->rsp = vcpu->regs[VCPU_REGS_RSP];
1972 regs->rbp = vcpu->regs[VCPU_REGS_RBP];
1973 #ifdef CONFIG_X86_64
1974 regs->r8 = vcpu->regs[VCPU_REGS_R8];
1975 regs->r9 = vcpu->regs[VCPU_REGS_R9];
1976 regs->r10 = vcpu->regs[VCPU_REGS_R10];
1977 regs->r11 = vcpu->regs[VCPU_REGS_R11];
1978 regs->r12 = vcpu->regs[VCPU_REGS_R12];
1979 regs->r13 = vcpu->regs[VCPU_REGS_R13];
1980 regs->r14 = vcpu->regs[VCPU_REGS_R14];
1981 regs->r15 = vcpu->regs[VCPU_REGS_R15];
1984 regs->rip = vcpu->rip;
1985 regs->rflags = kvm_arch_ops->get_rflags(vcpu);
1988 * Don't leak debug flags in case they were set for guest debugging
1990 if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
1991 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1998 static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
1999 struct kvm_regs *regs)
2003 vcpu->regs[VCPU_REGS_RAX] = regs->rax;
2004 vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
2005 vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
2006 vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
2007 vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
2008 vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
2009 vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
2010 vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
2011 #ifdef CONFIG_X86_64
2012 vcpu->regs[VCPU_REGS_R8] = regs->r8;
2013 vcpu->regs[VCPU_REGS_R9] = regs->r9;
2014 vcpu->regs[VCPU_REGS_R10] = regs->r10;
2015 vcpu->regs[VCPU_REGS_R11] = regs->r11;
2016 vcpu->regs[VCPU_REGS_R12] = regs->r12;
2017 vcpu->regs[VCPU_REGS_R13] = regs->r13;
2018 vcpu->regs[VCPU_REGS_R14] = regs->r14;
2019 vcpu->regs[VCPU_REGS_R15] = regs->r15;
2022 vcpu->rip = regs->rip;
2023 kvm_arch_ops->set_rflags(vcpu, regs->rflags);
2025 kvm_arch_ops->decache_regs(vcpu);
2032 static void get_segment(struct kvm_vcpu *vcpu,
2033 struct kvm_segment *var, int seg)
2035 return kvm_arch_ops->get_segment(vcpu, var, seg);
2038 static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2039 struct kvm_sregs *sregs)
2041 struct descriptor_table dt;
2045 get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2046 get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2047 get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2048 get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2049 get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2050 get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2052 get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2053 get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2055 kvm_arch_ops->get_idt(vcpu, &dt);
2056 sregs->idt.limit = dt.limit;
2057 sregs->idt.base = dt.base;
2058 kvm_arch_ops->get_gdt(vcpu, &dt);
2059 sregs->gdt.limit = dt.limit;
2060 sregs->gdt.base = dt.base;
2062 kvm_arch_ops->decache_cr4_guest_bits(vcpu);
2063 sregs->cr0 = vcpu->cr0;
2064 sregs->cr2 = vcpu->cr2;
2065 sregs->cr3 = vcpu->cr3;
2066 sregs->cr4 = vcpu->cr4;
2067 sregs->cr8 = vcpu->cr8;
2068 sregs->efer = vcpu->shadow_efer;
2069 sregs->apic_base = vcpu->apic_base;
2071 memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
2072 sizeof sregs->interrupt_bitmap);
2079 static void set_segment(struct kvm_vcpu *vcpu,
2080 struct kvm_segment *var, int seg)
2082 return kvm_arch_ops->set_segment(vcpu, var, seg);
2085 static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2086 struct kvm_sregs *sregs)
2088 int mmu_reset_needed = 0;
2090 struct descriptor_table dt;
2094 dt.limit = sregs->idt.limit;
2095 dt.base = sregs->idt.base;
2096 kvm_arch_ops->set_idt(vcpu, &dt);
2097 dt.limit = sregs->gdt.limit;
2098 dt.base = sregs->gdt.base;
2099 kvm_arch_ops->set_gdt(vcpu, &dt);
2101 vcpu->cr2 = sregs->cr2;
2102 mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
2103 vcpu->cr3 = sregs->cr3;
2105 vcpu->cr8 = sregs->cr8;
2107 mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
2108 #ifdef CONFIG_X86_64
2109 kvm_arch_ops->set_efer(vcpu, sregs->efer);
2111 vcpu->apic_base = sregs->apic_base;
2113 kvm_arch_ops->decache_cr4_guest_bits(vcpu);
2115 mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
2116 kvm_arch_ops->set_cr0(vcpu, sregs->cr0);
2118 mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
2119 kvm_arch_ops->set_cr4(vcpu, sregs->cr4);
2120 if (!is_long_mode(vcpu) && is_pae(vcpu))
2121 load_pdptrs(vcpu, vcpu->cr3);
2123 if (mmu_reset_needed)
2124 kvm_mmu_reset_context(vcpu);
2126 memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
2127 sizeof vcpu->irq_pending);
2128 vcpu->irq_summary = 0;
2129 for (i = 0; i < NR_IRQ_WORDS; ++i)
2130 if (vcpu->irq_pending[i])
2131 __set_bit(i, &vcpu->irq_summary);
2133 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2134 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2135 set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2136 set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2137 set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2138 set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2140 set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2141 set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2149 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
2150 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
2152 * This list is modified at module load time to reflect the
2153 * capabilities of the host cpu.
2155 static u32 msrs_to_save[] = {
2156 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
2158 #ifdef CONFIG_X86_64
2159 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
2161 MSR_IA32_TIME_STAMP_COUNTER,
2164 static unsigned num_msrs_to_save;
2166 static u32 emulated_msrs[] = {
2167 MSR_IA32_MISC_ENABLE,
2170 static __init void kvm_init_msr_list(void)
2175 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
2176 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
2179 msrs_to_save[j] = msrs_to_save[i];
2182 num_msrs_to_save = j;
2186 * Adapt set_msr() to msr_io()'s calling convention
2188 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2190 return kvm_set_msr(vcpu, index, *data);
2194 * Read or write a bunch of msrs. All parameters are kernel addresses.
2196 * @return number of msrs set successfully.
2198 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
2199 struct kvm_msr_entry *entries,
2200 int (*do_msr)(struct kvm_vcpu *vcpu,
2201 unsigned index, u64 *data))
2207 for (i = 0; i < msrs->nmsrs; ++i)
2208 if (do_msr(vcpu, entries[i].index, &entries[i].data))
2217 * Read or write a bunch of msrs. Parameters are user addresses.
2219 * @return number of msrs set successfully.
2221 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
2222 int (*do_msr)(struct kvm_vcpu *vcpu,
2223 unsigned index, u64 *data),
2226 struct kvm_msrs msrs;
2227 struct kvm_msr_entry *entries;
2232 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
2236 if (msrs.nmsrs >= MAX_IO_MSRS)
2240 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
2241 entries = vmalloc(size);
2246 if (copy_from_user(entries, user_msrs->entries, size))
2249 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
2254 if (writeback && copy_to_user(user_msrs->entries, entries, size))
2266 * Translate a guest virtual address to a guest physical address.
2268 static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2269 struct kvm_translation *tr)
2271 unsigned long vaddr = tr->linear_address;
2275 spin_lock(&vcpu->kvm->lock);
2276 gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
2277 tr->physical_address = gpa;
2278 tr->valid = gpa != UNMAPPED_GVA;
2281 spin_unlock(&vcpu->kvm->lock);
2287 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2288 struct kvm_interrupt *irq)
2290 if (irq->irq < 0 || irq->irq >= 256)
2294 set_bit(irq->irq, vcpu->irq_pending);
2295 set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
2302 static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
2303 struct kvm_debug_guest *dbg)
2309 r = kvm_arch_ops->set_guest_debug(vcpu, dbg);
2316 static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
2317 unsigned long address,
2320 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
2321 unsigned long pgoff;
2324 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2326 page = virt_to_page(vcpu->run);
2327 else if (pgoff == KVM_PIO_PAGE_OFFSET)
2328 page = virt_to_page(vcpu->pio_data);
2330 return NOPAGE_SIGBUS;
2333 *type = VM_FAULT_MINOR;
2338 static struct vm_operations_struct kvm_vcpu_vm_ops = {
2339 .nopage = kvm_vcpu_nopage,
2342 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
2344 vma->vm_ops = &kvm_vcpu_vm_ops;
2348 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
2350 struct kvm_vcpu *vcpu = filp->private_data;
2352 fput(vcpu->kvm->filp);
2356 static struct file_operations kvm_vcpu_fops = {
2357 .release = kvm_vcpu_release,
2358 .unlocked_ioctl = kvm_vcpu_ioctl,
2359 .compat_ioctl = kvm_vcpu_ioctl,
2360 .mmap = kvm_vcpu_mmap,
2364 * Allocates an inode for the vcpu.
2366 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
2369 struct inode *inode;
2372 r = anon_inode_getfd(&fd, &inode, &file,
2373 "kvm-vcpu", &kvm_vcpu_fops, vcpu);
2376 atomic_inc(&vcpu->kvm->filp->f_count);
2381 * Creates some virtual cpus. Good luck creating more than one.
2383 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
2386 struct kvm_vcpu *vcpu;
2393 vcpu = &kvm->vcpus[n];
2396 mutex_lock(&vcpu->mutex);
2399 mutex_unlock(&vcpu->mutex);
2403 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2407 vcpu->run = page_address(page);
2409 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2413 vcpu->pio_data = page_address(page);
2415 vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
2417 vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
2420 r = kvm_arch_ops->vcpu_create(vcpu);
2422 goto out_free_vcpus;
2424 r = kvm_mmu_create(vcpu);
2426 goto out_free_vcpus;
2428 kvm_arch_ops->vcpu_load(vcpu);
2429 r = kvm_mmu_setup(vcpu);
2431 r = kvm_arch_ops->vcpu_setup(vcpu);
2435 goto out_free_vcpus;
2437 r = create_vcpu_fd(vcpu);
2439 goto out_free_vcpus;
2441 spin_lock(&kvm_lock);
2442 if (n >= kvm->nvcpus)
2443 kvm->nvcpus = n + 1;
2444 spin_unlock(&kvm_lock);
2449 kvm_free_vcpu(vcpu);
2451 free_page((unsigned long)vcpu->run);
2454 mutex_unlock(&vcpu->mutex);
2459 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
2463 struct kvm_cpuid_entry *e, *entry;
2465 rdmsrl(MSR_EFER, efer);
2467 for (i = 0; i < vcpu->cpuid_nent; ++i) {
2468 e = &vcpu->cpuid_entries[i];
2469 if (e->function == 0x80000001) {
2474 if (entry && (entry->edx & (1 << 20)) && !(efer & EFER_NX)) {
2475 entry->edx &= ~(1 << 20);
2476 printk(KERN_INFO "kvm: guest NX capability removed\n");
2480 static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
2481 struct kvm_cpuid *cpuid,
2482 struct kvm_cpuid_entry __user *entries)
2487 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2490 if (copy_from_user(&vcpu->cpuid_entries, entries,
2491 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
2493 vcpu->cpuid_nent = cpuid->nent;
2494 cpuid_fix_nx_cap(vcpu);
2501 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
2504 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2505 vcpu->sigset_active = 1;
2506 vcpu->sigset = *sigset;
2508 vcpu->sigset_active = 0;
2513 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
2514 * we have asm/x86/processor.h
2525 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
2526 #ifdef CONFIG_X86_64
2527 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
2529 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
2533 static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2535 struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image;
2539 memcpy(fpu->fpr, fxsave->st_space, 128);
2540 fpu->fcw = fxsave->cwd;
2541 fpu->fsw = fxsave->swd;
2542 fpu->ftwx = fxsave->twd;
2543 fpu->last_opcode = fxsave->fop;
2544 fpu->last_ip = fxsave->rip;
2545 fpu->last_dp = fxsave->rdp;
2546 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
2553 static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2555 struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image;
2559 memcpy(fxsave->st_space, fpu->fpr, 128);
2560 fxsave->cwd = fpu->fcw;
2561 fxsave->swd = fpu->fsw;
2562 fxsave->twd = fpu->ftwx;
2563 fxsave->fop = fpu->last_opcode;
2564 fxsave->rip = fpu->last_ip;
2565 fxsave->rdp = fpu->last_dp;
2566 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
2573 static long kvm_vcpu_ioctl(struct file *filp,
2574 unsigned int ioctl, unsigned long arg)
2576 struct kvm_vcpu *vcpu = filp->private_data;
2577 void __user *argp = (void __user *)arg;
2585 r = kvm_vcpu_ioctl_run(vcpu, vcpu->run);
2587 case KVM_GET_REGS: {
2588 struct kvm_regs kvm_regs;
2590 memset(&kvm_regs, 0, sizeof kvm_regs);
2591 r = kvm_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
2595 if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
2600 case KVM_SET_REGS: {
2601 struct kvm_regs kvm_regs;
2604 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
2606 r = kvm_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
2612 case KVM_GET_SREGS: {
2613 struct kvm_sregs kvm_sregs;
2615 memset(&kvm_sregs, 0, sizeof kvm_sregs);
2616 r = kvm_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
2620 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
2625 case KVM_SET_SREGS: {
2626 struct kvm_sregs kvm_sregs;
2629 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
2631 r = kvm_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
2637 case KVM_TRANSLATE: {
2638 struct kvm_translation tr;
2641 if (copy_from_user(&tr, argp, sizeof tr))
2643 r = kvm_vcpu_ioctl_translate(vcpu, &tr);
2647 if (copy_to_user(argp, &tr, sizeof tr))
2652 case KVM_INTERRUPT: {
2653 struct kvm_interrupt irq;
2656 if (copy_from_user(&irq, argp, sizeof irq))
2658 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2664 case KVM_DEBUG_GUEST: {
2665 struct kvm_debug_guest dbg;
2668 if (copy_from_user(&dbg, argp, sizeof dbg))
2670 r = kvm_vcpu_ioctl_debug_guest(vcpu, &dbg);
2677 r = msr_io(vcpu, argp, kvm_get_msr, 1);
2680 r = msr_io(vcpu, argp, do_set_msr, 0);
2682 case KVM_SET_CPUID: {
2683 struct kvm_cpuid __user *cpuid_arg = argp;
2684 struct kvm_cpuid cpuid;
2687 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2689 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
2694 case KVM_SET_SIGNAL_MASK: {
2695 struct kvm_signal_mask __user *sigmask_arg = argp;
2696 struct kvm_signal_mask kvm_sigmask;
2697 sigset_t sigset, *p;
2702 if (copy_from_user(&kvm_sigmask, argp,
2703 sizeof kvm_sigmask))
2706 if (kvm_sigmask.len != sizeof sigset)
2709 if (copy_from_user(&sigset, sigmask_arg->sigset,
2714 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
2720 memset(&fpu, 0, sizeof fpu);
2721 r = kvm_vcpu_ioctl_get_fpu(vcpu, &fpu);
2725 if (copy_to_user(argp, &fpu, sizeof fpu))
2734 if (copy_from_user(&fpu, argp, sizeof fpu))
2736 r = kvm_vcpu_ioctl_set_fpu(vcpu, &fpu);
2749 static long kvm_vm_ioctl(struct file *filp,
2750 unsigned int ioctl, unsigned long arg)
2752 struct kvm *kvm = filp->private_data;
2753 void __user *argp = (void __user *)arg;
2757 case KVM_CREATE_VCPU:
2758 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
2762 case KVM_SET_MEMORY_REGION: {
2763 struct kvm_memory_region kvm_mem;
2766 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
2768 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_mem);
2773 case KVM_GET_DIRTY_LOG: {
2774 struct kvm_dirty_log log;
2777 if (copy_from_user(&log, argp, sizeof log))
2779 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
2784 case KVM_SET_MEMORY_ALIAS: {
2785 struct kvm_memory_alias alias;
2788 if (copy_from_user(&alias, argp, sizeof alias))
2790 r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
2802 static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
2803 unsigned long address,
2806 struct kvm *kvm = vma->vm_file->private_data;
2807 unsigned long pgoff;
2810 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2811 page = gfn_to_page(kvm, pgoff);
2813 return NOPAGE_SIGBUS;
2816 *type = VM_FAULT_MINOR;
2821 static struct vm_operations_struct kvm_vm_vm_ops = {
2822 .nopage = kvm_vm_nopage,
2825 static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
2827 vma->vm_ops = &kvm_vm_vm_ops;
2831 static struct file_operations kvm_vm_fops = {
2832 .release = kvm_vm_release,
2833 .unlocked_ioctl = kvm_vm_ioctl,
2834 .compat_ioctl = kvm_vm_ioctl,
2835 .mmap = kvm_vm_mmap,
2838 static int kvm_dev_ioctl_create_vm(void)
2841 struct inode *inode;
2845 kvm = kvm_create_vm();
2847 return PTR_ERR(kvm);
2848 r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
2850 kvm_destroy_vm(kvm);
2859 static long kvm_dev_ioctl(struct file *filp,
2860 unsigned int ioctl, unsigned long arg)
2862 void __user *argp = (void __user *)arg;
2866 case KVM_GET_API_VERSION:
2870 r = KVM_API_VERSION;
2876 r = kvm_dev_ioctl_create_vm();
2878 case KVM_GET_MSR_INDEX_LIST: {
2879 struct kvm_msr_list __user *user_msr_list = argp;
2880 struct kvm_msr_list msr_list;
2884 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
2887 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
2888 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
2891 if (n < num_msrs_to_save)
2894 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
2895 num_msrs_to_save * sizeof(u32)))
2897 if (copy_to_user(user_msr_list->indices
2898 + num_msrs_to_save * sizeof(u32),
2900 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
2905 case KVM_CHECK_EXTENSION:
2907 * No extensions defined at present.
2911 case KVM_GET_VCPU_MMAP_SIZE:
2924 static struct file_operations kvm_chardev_ops = {
2925 .open = kvm_dev_open,
2926 .release = kvm_dev_release,
2927 .unlocked_ioctl = kvm_dev_ioctl,
2928 .compat_ioctl = kvm_dev_ioctl,
2931 static struct miscdevice kvm_dev = {
2938 * Make sure that a cpu that is being hot-unplugged does not have any vcpus
2941 static void decache_vcpus_on_cpu(int cpu)
2944 struct kvm_vcpu *vcpu;
2947 spin_lock(&kvm_lock);
2948 list_for_each_entry(vm, &vm_list, vm_list)
2949 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
2950 vcpu = &vm->vcpus[i];
2952 * If the vcpu is locked, then it is running on some
2953 * other cpu and therefore it is not cached on the
2956 * If it's not locked, check the last cpu it executed
2959 if (mutex_trylock(&vcpu->mutex)) {
2960 if (vcpu->cpu == cpu) {
2961 kvm_arch_ops->vcpu_decache(vcpu);
2964 mutex_unlock(&vcpu->mutex);
2967 spin_unlock(&kvm_lock);
2970 static void hardware_enable(void *junk)
2972 int cpu = raw_smp_processor_id();
2974 if (cpu_isset(cpu, cpus_hardware_enabled))
2976 cpu_set(cpu, cpus_hardware_enabled);
2977 kvm_arch_ops->hardware_enable(NULL);
2980 static void hardware_disable(void *junk)
2982 int cpu = raw_smp_processor_id();
2984 if (!cpu_isset(cpu, cpus_hardware_enabled))
2986 cpu_clear(cpu, cpus_hardware_enabled);
2987 decache_vcpus_on_cpu(cpu);
2988 kvm_arch_ops->hardware_disable(NULL);
2991 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2998 case CPU_DYING_FROZEN:
2999 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
3001 hardware_disable(NULL);
3003 case CPU_UP_CANCELED:
3004 case CPU_UP_CANCELED_FROZEN:
3005 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
3007 smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
3010 case CPU_ONLINE_FROZEN:
3011 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
3013 smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
3019 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
3022 if (val == SYS_RESTART) {
3024 * Some (well, at least mine) BIOSes hang on reboot if
3027 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
3028 on_each_cpu(hardware_disable, NULL, 0, 1);
3033 static struct notifier_block kvm_reboot_notifier = {
3034 .notifier_call = kvm_reboot,
3038 void kvm_io_bus_init(struct kvm_io_bus *bus)
3040 memset(bus, 0, sizeof(*bus));
3043 void kvm_io_bus_destroy(struct kvm_io_bus *bus)
3047 for (i = 0; i < bus->dev_count; i++) {
3048 struct kvm_io_device *pos = bus->devs[i];
3050 kvm_iodevice_destructor(pos);
3054 struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
3058 for (i = 0; i < bus->dev_count; i++) {
3059 struct kvm_io_device *pos = bus->devs[i];
3061 if (pos->in_range(pos, addr))
3068 void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
3070 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
3072 bus->devs[bus->dev_count++] = dev;
3075 static struct notifier_block kvm_cpu_notifier = {
3076 .notifier_call = kvm_cpu_hotplug,
3077 .priority = 20, /* must be > scheduler priority */
3080 static u64 stat_get(void *_offset)
3082 unsigned offset = (long)_offset;
3085 struct kvm_vcpu *vcpu;
3088 spin_lock(&kvm_lock);
3089 list_for_each_entry(kvm, &vm_list, vm_list)
3090 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
3091 vcpu = &kvm->vcpus[i];
3092 total += *(u32 *)((void *)vcpu + offset);
3094 spin_unlock(&kvm_lock);
3098 static void stat_set(void *offset, u64 val)
3102 DEFINE_SIMPLE_ATTRIBUTE(stat_fops, stat_get, stat_set, "%llu\n");
3104 static __init void kvm_init_debug(void)
3106 struct kvm_stats_debugfs_item *p;
3108 debugfs_dir = debugfs_create_dir("kvm", NULL);
3109 for (p = debugfs_entries; p->name; ++p)
3110 p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
3111 (void *)(long)p->offset,
3115 static void kvm_exit_debug(void)
3117 struct kvm_stats_debugfs_item *p;
3119 for (p = debugfs_entries; p->name; ++p)
3120 debugfs_remove(p->dentry);
3121 debugfs_remove(debugfs_dir);
3124 static int kvm_suspend(struct sys_device *dev, pm_message_t state)
3126 hardware_disable(NULL);
3130 static int kvm_resume(struct sys_device *dev)
3132 hardware_enable(NULL);
3136 static struct sysdev_class kvm_sysdev_class = {
3137 set_kset_name("kvm"),
3138 .suspend = kvm_suspend,
3139 .resume = kvm_resume,
3142 static struct sys_device kvm_sysdev = {
3144 .cls = &kvm_sysdev_class,
3147 hpa_t bad_page_address;
3149 int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
3154 printk(KERN_ERR "kvm: already loaded the other module\n");
3158 if (!ops->cpu_has_kvm_support()) {
3159 printk(KERN_ERR "kvm: no hardware support\n");
3162 if (ops->disabled_by_bios()) {
3163 printk(KERN_ERR "kvm: disabled by bios\n");
3169 r = kvm_arch_ops->hardware_setup();
3173 on_each_cpu(hardware_enable, NULL, 0, 1);
3174 r = register_cpu_notifier(&kvm_cpu_notifier);
3177 register_reboot_notifier(&kvm_reboot_notifier);
3179 r = sysdev_class_register(&kvm_sysdev_class);
3183 r = sysdev_register(&kvm_sysdev);
3187 kvm_chardev_ops.owner = module;
3189 r = misc_register(&kvm_dev);
3191 printk (KERN_ERR "kvm: misc device register failed\n");
3198 sysdev_unregister(&kvm_sysdev);
3200 sysdev_class_unregister(&kvm_sysdev_class);
3202 unregister_reboot_notifier(&kvm_reboot_notifier);
3203 unregister_cpu_notifier(&kvm_cpu_notifier);
3205 on_each_cpu(hardware_disable, NULL, 0, 1);
3206 kvm_arch_ops->hardware_unsetup();
3208 kvm_arch_ops = NULL;
3212 void kvm_exit_arch(void)
3214 misc_deregister(&kvm_dev);
3215 sysdev_unregister(&kvm_sysdev);
3216 sysdev_class_unregister(&kvm_sysdev_class);
3217 unregister_reboot_notifier(&kvm_reboot_notifier);
3218 unregister_cpu_notifier(&kvm_cpu_notifier);
3219 on_each_cpu(hardware_disable, NULL, 0, 1);
3220 kvm_arch_ops->hardware_unsetup();
3221 kvm_arch_ops = NULL;
3224 static __init int kvm_init(void)
3226 static struct page *bad_page;
3229 r = kvm_mmu_module_init();
3235 kvm_init_msr_list();
3237 if ((bad_page = alloc_page(GFP_KERNEL)) == NULL) {
3242 bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT;
3243 memset(__va(bad_page_address), 0, PAGE_SIZE);
3249 kvm_mmu_module_exit();
3254 static __exit void kvm_exit(void)
3257 __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
3258 kvm_mmu_module_exit();
3261 module_init(kvm_init)
3262 module_exit(kvm_exit)
3264 EXPORT_SYMBOL_GPL(kvm_init_arch);
3265 EXPORT_SYMBOL_GPL(kvm_exit_arch);