*/
#include "kvm.h"
+#include "x86_emulate.h"
+#include "irq.h"
#include "vmx.h"
#include "segment_descriptor.h"
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/highmem.h>
-#include <linux/profile.h>
#include <linux/sched.h>
#include <asm/io.h>
struct vcpu_vmx {
struct kvm_vcpu vcpu;
int launched;
+ u8 fail;
struct kvm_msr_entry *guest_msrs;
struct kvm_msr_entry *host_msrs;
int nmsrs;
struct {
int loaded;
u16 fs_sel, gs_sel, ldt_sel;
- int fs_gs_ldt_reload_needed;
+ int gs_ldt_reload_needed;
+ int fs_reload_needed;
}host_state;
};
return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
}
-static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu)
+static inline int msr_efer_need_save_restore(struct vcpu_vmx *vmx)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
int efer_offset = vmx->msr_offset_efer;
return msr_efer_save_restore_bits(vmx->host_msrs[efer_offset]) !=
msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
}
-static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr)
+static inline int cpu_has_vmx_tpr_shadow(void)
+{
+ return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW);
+}
+
+static inline int vm_need_tpr_shadow(struct kvm *kvm)
+{
+ return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)));
+}
+
+static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
int i;
for (i = 0; i < vmx->nmsrs; ++i)
return -1;
}
-static struct kvm_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
+static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
int i;
- i = __find_msr_index(vcpu, msr);
+ i = __find_msr_index(vmx, msr);
if (i >= 0)
return &vmx->guest_msrs[i];
return NULL;
static void __vcpu_clear(void *arg)
{
- struct kvm_vcpu *vcpu = arg;
- struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vcpu_vmx *vmx = arg;
int cpu = raw_smp_processor_id();
- if (vcpu->cpu == cpu)
+ if (vmx->vcpu.cpu == cpu)
vmcs_clear(vmx->vmcs);
if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
per_cpu(current_vmcs, cpu) = NULL;
- rdtscll(vcpu->host_tsc);
+ rdtscll(vmx->vcpu.host_tsc);
}
-static void vcpu_clear(struct kvm_vcpu *vcpu)
+static void vcpu_clear(struct vcpu_vmx *vmx)
{
- if (vcpu->cpu != raw_smp_processor_id() && vcpu->cpu != -1)
- smp_call_function_single(vcpu->cpu, __vcpu_clear, vcpu, 0, 1);
+ if (vmx->vcpu.cpu != raw_smp_processor_id() && vmx->vcpu.cpu != -1)
+ smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear,
+ vmx, 0, 1);
else
- __vcpu_clear(vcpu);
- to_vmx(vcpu)->launched = 0;
+ __vcpu_clear(vmx);
+ vmx->launched = 0;
}
static unsigned long vmcs_readl(unsigned long field)
#endif
}
-static void load_transition_efer(struct kvm_vcpu *vcpu)
+static void load_transition_efer(struct vcpu_vmx *vmx)
{
u64 trans_efer;
- struct vcpu_vmx *vmx = to_vmx(vcpu);
int efer_offset = vmx->msr_offset_efer;
trans_efer = vmx->host_msrs[efer_offset].data;
trans_efer &= ~EFER_SAVE_RESTORE_BITS;
trans_efer |= msr_efer_save_restore_bits(vmx->guest_msrs[efer_offset]);
wrmsrl(MSR_EFER, trans_efer);
- vcpu->stat.efer_reload++;
+ vmx->vcpu.stat.efer_reload++;
}
static void vmx_save_host_state(struct kvm_vcpu *vcpu)
* allow segment selectors with cpl > 0 or ti == 1.
*/
vmx->host_state.ldt_sel = read_ldt();
- vmx->host_state.fs_gs_ldt_reload_needed = vmx->host_state.ldt_sel;
+ vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
vmx->host_state.fs_sel = read_fs();
- if (!(vmx->host_state.fs_sel & 7))
+ if (!(vmx->host_state.fs_sel & 7)) {
vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
- else {
+ vmx->host_state.fs_reload_needed = 0;
+ } else {
vmcs_write16(HOST_FS_SELECTOR, 0);
- vmx->host_state.fs_gs_ldt_reload_needed = 1;
+ vmx->host_state.fs_reload_needed = 1;
}
vmx->host_state.gs_sel = read_gs();
if (!(vmx->host_state.gs_sel & 7))
vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
else {
vmcs_write16(HOST_GS_SELECTOR, 0);
- vmx->host_state.fs_gs_ldt_reload_needed = 1;
+ vmx->host_state.gs_ldt_reload_needed = 1;
}
#ifdef CONFIG_X86_64
#endif
#ifdef CONFIG_X86_64
- if (is_long_mode(vcpu)) {
+ if (is_long_mode(&vmx->vcpu)) {
save_msrs(vmx->host_msrs +
vmx->msr_offset_kernel_gs_base, 1);
}
#endif
load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
- if (msr_efer_need_save_restore(vcpu))
- load_transition_efer(vcpu);
+ if (msr_efer_need_save_restore(vmx))
+ load_transition_efer(vmx);
}
-static void vmx_load_host_state(struct kvm_vcpu *vcpu)
+static void vmx_load_host_state(struct vcpu_vmx *vmx)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long flags;
if (!vmx->host_state.loaded)
return;
vmx->host_state.loaded = 0;
- if (vmx->host_state.fs_gs_ldt_reload_needed) {
- load_ldt(vmx->host_state.ldt_sel);
+ if (vmx->host_state.fs_reload_needed)
load_fs(vmx->host_state.fs_sel);
+ if (vmx->host_state.gs_ldt_reload_needed) {
+ load_ldt(vmx->host_state.ldt_sel);
/*
* If we have to reload gs, we must take care to
* preserve our gs base.
wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
#endif
local_irq_restore(flags);
-
- reload_tss();
}
+ reload_tss();
save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
load_msrs(vmx->host_msrs, vmx->save_nmsrs);
- if (msr_efer_need_save_restore(vcpu))
+ if (msr_efer_need_save_restore(vmx))
load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
}
u64 phys_addr = __pa(vmx->vmcs);
u64 tsc_this, delta;
- if (vcpu->cpu != cpu)
- vcpu_clear(vcpu);
+ if (vcpu->cpu != cpu) {
+ vcpu_clear(vmx);
+ kvm_migrate_apic_timer(vcpu);
+ }
if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
u8 error;
static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
{
- vmx_load_host_state(vcpu);
+ vmx_load_host_state(to_vmx(vcpu));
kvm_put_guest_fpu(vcpu);
}
static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
{
- vcpu_clear(vcpu);
+ vcpu_clear(to_vmx(vcpu));
}
static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
+ if (vcpu->rmode.active)
+ rflags |= IOPL_MASK | X86_EFLAGS_VM;
vmcs_writel(GUEST_RFLAGS, rflags);
}
/*
* Swap MSR entry in host/guest MSR entry array.
*/
-void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
+#ifdef CONFIG_X86_64
+static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_msr_entry tmp;
tmp = vmx->guest_msrs[to];
vmx->host_msrs[to] = vmx->host_msrs[from];
vmx->host_msrs[from] = tmp;
}
+#endif
/*
* Set up the vmcs to automatically save and restore system
* msrs. Don't touch the 64-bit msrs if the guest is in legacy
* mode, as fiddling with msrs is very expensive.
*/
-static void setup_msrs(struct kvm_vcpu *vcpu)
+static void setup_msrs(struct vcpu_vmx *vmx)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
int save_nmsrs;
save_nmsrs = 0;
#ifdef CONFIG_X86_64
- if (is_long_mode(vcpu)) {
+ if (is_long_mode(&vmx->vcpu)) {
int index;
- index = __find_msr_index(vcpu, MSR_SYSCALL_MASK);
+ index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
if (index >= 0)
- move_msr_up(vcpu, index, save_nmsrs++);
- index = __find_msr_index(vcpu, MSR_LSTAR);
+ move_msr_up(vmx, index, save_nmsrs++);
+ index = __find_msr_index(vmx, MSR_LSTAR);
if (index >= 0)
- move_msr_up(vcpu, index, save_nmsrs++);
- index = __find_msr_index(vcpu, MSR_CSTAR);
+ move_msr_up(vmx, index, save_nmsrs++);
+ index = __find_msr_index(vmx, MSR_CSTAR);
if (index >= 0)
- move_msr_up(vcpu, index, save_nmsrs++);
- index = __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
+ move_msr_up(vmx, index, save_nmsrs++);
+ index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
if (index >= 0)
- move_msr_up(vcpu, index, save_nmsrs++);
+ move_msr_up(vmx, index, save_nmsrs++);
/*
* MSR_K6_STAR is only needed on long mode guests, and only
* if efer.sce is enabled.
*/
- index = __find_msr_index(vcpu, MSR_K6_STAR);
- if ((index >= 0) && (vcpu->shadow_efer & EFER_SCE))
- move_msr_up(vcpu, index, save_nmsrs++);
+ index = __find_msr_index(vmx, MSR_K6_STAR);
+ if ((index >= 0) && (vmx->vcpu.shadow_efer & EFER_SCE))
+ move_msr_up(vmx, index, save_nmsrs++);
}
#endif
vmx->save_nmsrs = save_nmsrs;
#ifdef CONFIG_X86_64
vmx->msr_offset_kernel_gs_base =
- __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
+ __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
#endif
- vmx->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
+ vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
}
/*
data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
default:
- msr = find_msr_entry(vcpu, msr_index);
+ msr = find_msr_entry(to_vmx(vcpu), msr_index);
if (msr) {
data = msr->data;
break;
case MSR_EFER:
ret = kvm_set_msr_common(vcpu, msr_index, data);
if (vmx->host_state.loaded)
- load_transition_efer(vcpu);
+ load_transition_efer(vmx);
break;
case MSR_FS_BASE:
vmcs_writel(GUEST_FS_BASE, data);
guest_write_tsc(data);
break;
default:
- msr = find_msr_entry(vcpu, msr_index);
+ msr = find_msr_entry(vmx, msr_index);
if (msr) {
msr->data = data;
if (vmx->host_state.loaded)
return 0;
}
+static int vmx_get_irq(struct kvm_vcpu *vcpu)
+{
+ u32 idtv_info_field;
+
+ idtv_info_field = vmcs_read32(IDT_VECTORING_INFO_FIELD);
+ if (idtv_info_field & INTR_INFO_VALID_MASK) {
+ if (is_external_interrupt(idtv_info_field))
+ return idtv_info_field & VECTORING_INFO_VECTOR_MASK;
+ else
+ printk("pending exception: not handled yet\n");
+ }
+ return -1;
+}
+
static __init int cpu_has_kvm_support(void)
{
unsigned long ecx = cpuid_ecx(1);
/* Ensure minimum (required) set of control bits are supported. */
if (ctl_min & ~ctl)
- return -1;
+ return -EIO;
*result = ctl;
return 0;
}
-static __init int setup_vmcs_config(void)
+static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
{
u32 vmx_msr_low, vmx_msr_high;
u32 min, opt;
opt = 0;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
&_pin_based_exec_control) < 0)
- return -1;
+ return -EIO;
min = CPU_BASED_HLT_EXITING |
#ifdef CONFIG_X86_64
CPU_BASED_USE_IO_BITMAPS |
CPU_BASED_MOV_DR_EXITING |
CPU_BASED_USE_TSC_OFFSETING;
+#ifdef CONFIG_X86_64
+ opt = CPU_BASED_TPR_SHADOW;
+#else
opt = 0;
+#endif
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
&_cpu_based_exec_control) < 0)
- return -1;
+ return -EIO;
+#ifdef CONFIG_X86_64
+ if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
+ _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
+ ~CPU_BASED_CR8_STORE_EXITING;
+#endif
min = 0;
#ifdef CONFIG_X86_64
opt = 0;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
&_vmexit_control) < 0)
- return -1;
+ return -EIO;
min = opt = 0;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
&_vmentry_control) < 0)
- return -1;
+ return -EIO;
rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
/* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
- return -1;
+ return -EIO;
#ifdef CONFIG_X86_64
/* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
if (vmx_msr_high & (1u<<16))
- return -1;
+ return -EIO;
#endif
/* Require Write-Back (WB) memory type for VMCS accesses. */
if (((vmx_msr_high >> 18) & 15) != 6)
- return -1;
+ return -EIO;
- vmcs_config.size = vmx_msr_high & 0x1fff;
- vmcs_config.order = get_order(vmcs_config.size);
- vmcs_config.revision_id = vmx_msr_low;
+ vmcs_conf->size = vmx_msr_high & 0x1fff;
+ vmcs_conf->order = get_order(vmcs_config.size);
+ vmcs_conf->revision_id = vmx_msr_low;
- vmcs_config.pin_based_exec_ctrl = _pin_based_exec_control;
- vmcs_config.cpu_based_exec_ctrl = _cpu_based_exec_control;
- vmcs_config.vmexit_ctrl = _vmexit_control;
- vmcs_config.vmentry_ctrl = _vmentry_control;
+ vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
+ vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
+ vmcs_conf->vmexit_ctrl = _vmexit_control;
+ vmcs_conf->vmentry_ctrl = _vmentry_control;
return 0;
}
free_vmcs(per_cpu(vmxarea, cpu));
}
-extern struct vmcs *alloc_vmcs_cpu(int cpu);
-
static __init int alloc_kvm_area(void)
{
int cpu;
static __init int hardware_setup(void)
{
- if (setup_vmcs_config() < 0)
- return -1;
+ if (setup_vmcs_config(&vmcs_config) < 0)
+ return -EIO;
return alloc_kvm_area();
}
vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
}
-static int rmode_tss_base(struct kvm* kvm)
+static gva_t rmode_tss_base(struct kvm* kvm)
{
gfn_t base_gfn = kvm->memslots[0].base_gfn + kvm->memslots[0].npages - 3;
return base_gfn << PAGE_SHIFT;
fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
+ kvm_mmu_reset_context(vcpu);
init_rmode_tss(vcpu->kvm);
}
vcpu->shadow_efer |= EFER_LMA;
- find_msr_entry(vcpu, MSR_EFER)->data |= EFER_LMA | EFER_LME;
+ find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
vmcs_write32(VM_ENTRY_CONTROLS,
vmcs_read32(VM_ENTRY_CONTROLS)
- | VM_ENTRY_CONTROLS_IA32E_MASK);
+ | VM_ENTRY_IA32E_MODE);
}
static void exit_lmode(struct kvm_vcpu *vcpu)
vmcs_write32(VM_ENTRY_CONTROLS,
vmcs_read32(VM_ENTRY_CONTROLS)
- & ~VM_ENTRY_CONTROLS_IA32E_MASK);
+ & ~VM_ENTRY_IA32E_MODE);
}
#endif
static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
- struct kvm_msr_entry *msr = find_msr_entry(vcpu, MSR_EFER);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
vcpu->shadow_efer = efer;
if (efer & EFER_LMA) {
vmcs_write32(VM_ENTRY_CONTROLS,
vmcs_read32(VM_ENTRY_CONTROLS) |
- VM_ENTRY_CONTROLS_IA32E_MASK);
+ VM_ENTRY_IA32E_MODE);
msr->data = efer;
} else {
vmcs_write32(VM_ENTRY_CONTROLS,
vmcs_read32(VM_ENTRY_CONTROLS) &
- ~VM_ENTRY_CONTROLS_IA32E_MASK);
+ ~VM_ENTRY_IA32E_MODE);
msr->data = efer & ~EFER_LME;
}
- setup_msrs(vcpu);
+ setup_msrs(vmx);
}
#endif
/*
* Sets up the vmcs for emulated real mode.
*/
-static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
+static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 host_sysenter_cs;
u32 junk;
unsigned long a;
int i;
int ret = 0;
unsigned long kvm_vmx_return;
+ u64 msr;
+ u32 exec_control;
- if (!init_rmode_tss(vcpu->kvm)) {
+ if (!init_rmode_tss(vmx->vcpu.kvm)) {
ret = -ENOMEM;
goto out;
}
- memset(vcpu->regs, 0, sizeof(vcpu->regs));
- vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
- vcpu->cr8 = 0;
- vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
- if (vcpu->vcpu_id == 0)
- vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
+ vmx->vcpu.rmode.active = 0;
- fx_init(vcpu);
+ vmx->vcpu.regs[VCPU_REGS_RDX] = get_rdx_init_val();
+ set_cr8(&vmx->vcpu, 0);
+ msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
+ if (vmx->vcpu.vcpu_id == 0)
+ msr |= MSR_IA32_APICBASE_BSP;
+ kvm_set_apic_base(&vmx->vcpu, msr);
+
+ fx_init(&vmx->vcpu);
/*
* GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
* insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
*/
- vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
- vmcs_writel(GUEST_CS_BASE, 0x000f0000);
+ if (vmx->vcpu.vcpu_id == 0) {
+ vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
+ vmcs_writel(GUEST_CS_BASE, 0x000f0000);
+ } else {
+ vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.sipi_vector << 8);
+ vmcs_writel(GUEST_CS_BASE, vmx->vcpu.sipi_vector << 12);
+ }
vmcs_write32(GUEST_CS_LIMIT, 0xffff);
vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
vmcs_writel(GUEST_SYSENTER_EIP, 0);
vmcs_writel(GUEST_RFLAGS, 0x02);
- vmcs_writel(GUEST_RIP, 0xfff0);
+ if (vmx->vcpu.vcpu_id == 0)
+ vmcs_writel(GUEST_RIP, 0xfff0);
+ else
+ vmcs_writel(GUEST_RIP, 0);
vmcs_writel(GUEST_RSP, 0);
//todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0
/* Control */
vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
vmcs_config.pin_based_exec_ctrl);
- vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
- vmcs_config.cpu_based_exec_ctrl);
+
+ exec_control = vmcs_config.cpu_based_exec_ctrl;
+ if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
+ exec_control &= ~CPU_BASED_TPR_SHADOW;
+#ifdef CONFIG_X86_64
+ exec_control |= CPU_BASED_CR8_STORE_EXITING |
+ CPU_BASED_CR8_LOAD_EXITING;
+#endif
+ }
+ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
++vmx->nmsrs;
}
- setup_msrs(vcpu);
+ setup_msrs(vmx);
vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
#ifdef CONFIG_X86_64
- vmcs_writel(VIRTUAL_APIC_PAGE_ADDR, 0);
- vmcs_writel(TPR_THRESHOLD, 0);
+ vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
+ if (vm_need_tpr_shadow(vmx->vcpu.kvm))
+ vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
+ page_to_phys(vmx->vcpu.apic->regs_page));
+ vmcs_write32(TPR_THRESHOLD, 0);
#endif
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
- vcpu->cr0 = 0x60000010;
- vmx_set_cr0(vcpu, vcpu->cr0); // enter rmode
- vmx_set_cr4(vcpu, 0);
+ vmx->vcpu.cr0 = 0x60000010;
+ vmx_set_cr0(&vmx->vcpu, vmx->vcpu.cr0); // enter rmode
+ vmx_set_cr4(&vmx->vcpu, 0);
#ifdef CONFIG_X86_64
- vmx_set_efer(vcpu, 0);
+ vmx_set_efer(&vmx->vcpu, 0);
#endif
- vmx_fpu_activate(vcpu);
- update_exception_bitmap(vcpu);
+ vmx_fpu_activate(&vmx->vcpu);
+ update_exception_bitmap(&vmx->vcpu);
return 0;
return ret;
}
+static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ vmx_vcpu_setup(vmx);
+}
+
static void inject_rmode_irq(struct kvm_vcpu *vcpu, int irq)
{
u16 ent[2];
return;
}
- if (kvm_read_guest(vcpu, irq * sizeof(ent), sizeof(ent), &ent) !=
- sizeof(ent)) {
+ if (emulator_read_std(irq * sizeof(ent), &ent, sizeof(ent), vcpu) !=
+ X86EMUL_CONTINUE) {
vcpu_printf(vcpu, "%s: read guest err\n", __FUNCTION__);
return;
}
ip = vmcs_readl(GUEST_RIP);
- if (kvm_write_guest(vcpu, ss_base + sp - 2, 2, &flags) != 2 ||
- kvm_write_guest(vcpu, ss_base + sp - 4, 2, &cs) != 2 ||
- kvm_write_guest(vcpu, ss_base + sp - 6, 2, &ip) != 2) {
+ if (emulator_write_emulated(ss_base + sp - 2, &flags, 2, vcpu) != X86EMUL_CONTINUE ||
+ emulator_write_emulated(ss_base + sp - 4, &cs, 2, vcpu) != X86EMUL_CONTINUE ||
+ emulator_write_emulated(ss_base + sp - 6, &ip, 2, vcpu) != X86EMUL_CONTINUE) {
vcpu_printf(vcpu, "%s: write guest err\n", __FUNCTION__);
return;
}
vmcs_writel(GUEST_RSP, (vmcs_readl(GUEST_RSP) & ~0xffff) | (sp - 6));
}
+static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
+{
+ if (vcpu->rmode.active) {
+ inject_rmode_irq(vcpu, irq);
+ return;
+ }
+ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+ irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
+}
+
static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
{
int word_index = __ffs(vcpu->irq_summary);
clear_bit(bit_index, &vcpu->irq_pending[word_index]);
if (!vcpu->irq_pending[word_index])
clear_bit(word_index, &vcpu->irq_summary);
-
- if (vcpu->rmode.active) {
- inject_rmode_irq(vcpu, irq);
- return;
- }
- vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
- irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
+ vmx_inject_irq(vcpu, irq);
}
"intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
}
- if (is_external_interrupt(vect_info)) {
+ if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) {
int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
set_bit(irq, vcpu->irq_pending);
set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
}
- if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
- asm ("int $2");
- return 1;
- }
+ if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
+ return 1; /* already handled by vmx_vcpu_run() */
if (is_no_device(intr_info)) {
vmx_fpu_activate(vcpu);
++vcpu->stat.mmio_exits;
return 0;
case EMULATE_FAIL:
- vcpu_printf(vcpu, "%s: emulate fail\n", __FUNCTION__);
+ kvm_report_emulation_failure(vcpu, "pagetable");
break;
default:
BUG();
return 0;
}
-static int get_io_count(struct kvm_vcpu *vcpu, unsigned long *count)
-{
- u64 inst;
- gva_t rip;
- int countr_size;
- int i, n;
-
- if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_VM)) {
- countr_size = 2;
- } else {
- u32 cs_ar = vmcs_read32(GUEST_CS_AR_BYTES);
-
- countr_size = (cs_ar & AR_L_MASK) ? 8:
- (cs_ar & AR_DB_MASK) ? 4: 2;
- }
-
- rip = vmcs_readl(GUEST_RIP);
- if (countr_size != 8)
- rip += vmcs_readl(GUEST_CS_BASE);
-
- n = kvm_read_guest(vcpu, rip, sizeof(inst), &inst);
-
- for (i = 0; i < n; i++) {
- switch (((u8*)&inst)[i]) {
- case 0xf0:
- case 0xf2:
- case 0xf3:
- case 0x2e:
- case 0x36:
- case 0x3e:
- case 0x26:
- case 0x64:
- case 0x65:
- case 0x66:
- break;
- case 0x67:
- countr_size = (countr_size == 2) ? 4: (countr_size >> 1);
- default:
- goto done;
- }
- }
- return 0;
-done:
- countr_size *= 8;
- *count = vcpu->regs[VCPU_REGS_RCX] & (~0ULL >> (64 - countr_size));
- //printk("cx: %lx\n", vcpu->regs[VCPU_REGS_RCX]);
- return 1;
-}
-
static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
- u64 exit_qualification;
+ unsigned long exit_qualification;
int size, down, in, string, rep;
unsigned port;
- unsigned long count;
- gva_t address;
++vcpu->stat.io_exits;
- exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
- in = (exit_qualification & 8) != 0;
- size = (exit_qualification & 7) + 1;
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
string = (exit_qualification & 16) != 0;
+
+ if (string) {
+ if (emulate_instruction(vcpu, kvm_run, 0, 0) == EMULATE_DO_MMIO)
+ return 0;
+ return 1;
+ }
+
+ size = (exit_qualification & 7) + 1;
+ in = (exit_qualification & 8) != 0;
down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
- count = 1;
rep = (exit_qualification & 32) != 0;
port = exit_qualification >> 16;
- address = 0;
- if (string) {
- if (rep && !get_io_count(vcpu, &count))
- return 1;
- address = vmcs_readl(GUEST_LINEAR_ADDRESS);
- }
- return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down,
- address, rep, port);
+
+ return kvm_emulate_pio(vcpu, kvm_run, in, size, port);
}
static void
static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
- u64 exit_qualification;
+ unsigned long exit_qualification;
int cr;
int reg;
- exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
cr = exit_qualification & 15;
reg = (exit_qualification >> 8) & 15;
switch ((exit_qualification >> 4) & 3) {
vcpu_load_rsp_rip(vcpu);
set_cr8(vcpu, vcpu->regs[reg]);
skip_emulated_instruction(vcpu);
- return 1;
+ kvm_run->exit_reason = KVM_EXIT_SET_TPR;
+ return 0;
};
break;
case 2: /* clts */
return 1;
case 8:
vcpu_load_rsp_rip(vcpu);
- vcpu->regs[reg] = vcpu->cr8;
+ vcpu->regs[reg] = get_cr8(vcpu);
vcpu_put_rsp_rip(vcpu);
skip_emulated_instruction(vcpu);
return 1;
break;
}
kvm_run->exit_reason = 0;
- printk(KERN_ERR "kvm: unhandled control register: op %d cr %d\n",
+ pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
(int)(exit_qualification >> 4) & 3, cr);
return 0;
}
static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
- u64 exit_qualification;
+ unsigned long exit_qualification;
unsigned long val;
int dr, reg;
* FIXME: this code assumes the host is debugging the guest.
* need to deal with guest debugging itself too.
*/
- exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
dr = exit_qualification & 7;
reg = (exit_qualification >> 8) & 15;
vcpu_load_rsp_rip(vcpu);
return 1;
}
-static void post_kvm_run_save(struct kvm_vcpu *vcpu,
- struct kvm_run *kvm_run)
+static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu,
+ struct kvm_run *kvm_run)
{
- kvm_run->if_flag = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) != 0;
- kvm_run->cr8 = vcpu->cr8;
- kvm_run->apic_base = vcpu->apic_base;
- kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
- vcpu->irq_summary == 0);
+ return 1;
}
static int handle_interrupt_window(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
{
+ u32 cpu_based_vm_exec_control;
+
+ /* clear pending irq */
+ cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+ cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
+ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
/*
* If the user space waits to inject interrupts, exit as soon as
* possible
[EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
[EXIT_REASON_HLT] = handle_halt,
[EXIT_REASON_VMCALL] = handle_vmcall,
+ [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold
};
static const int kvm_vmx_max_exit_handlers =
{
u32 vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (unlikely(vmx->fail)) {
+ kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ kvm_run->fail_entry.hardware_entry_failure_reason
+ = vmcs_read32(VM_INSTRUCTION_ERROR);
+ return 0;
+ }
if ( (vectoring_info & VECTORING_INFO_VALID_MASK) &&
exit_reason != EXIT_REASON_EXCEPTION_NMI )
return 0;
}
-/*
- * Check if userspace requested an interrupt window, and that the
- * interrupt window is open.
- *
- * No need to exit to userspace if we already have an interrupt queued.
- */
-static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
- struct kvm_run *kvm_run)
+static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
{
- return (!vcpu->irq_summary &&
- kvm_run->request_interrupt_window &&
- vcpu->interrupt_window_open &&
- (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
}
-static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
+static void update_tpr_threshold(struct kvm_vcpu *vcpu)
{
+ int max_irr, tpr;
+
+ if (!vm_need_tpr_shadow(vcpu->kvm))
+ return;
+
+ if (!kvm_lapic_enabled(vcpu) ||
+ ((max_irr = kvm_lapic_find_highest_irr(vcpu)) == -1)) {
+ vmcs_write32(TPR_THRESHOLD, 0);
+ return;
+ }
+
+ tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4;
+ vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4);
}
-static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void enable_irq_window(struct kvm_vcpu *vcpu)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- u8 fail;
- int r;
+ u32 cpu_based_vm_exec_control;
-preempted:
- if (vcpu->guest_debug.enabled)
- kvm_guest_debug_pre(vcpu);
+ cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+ cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
+ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
+}
-again:
- r = kvm_mmu_reload(vcpu);
- if (unlikely(r))
- goto out;
+static void vmx_intr_assist(struct kvm_vcpu *vcpu)
+{
+ u32 idtv_info_field, intr_info_field;
+ int has_ext_irq, interrupt_window_open;
+ int vector;
- preempt_disable();
+ kvm_inject_pending_timer_irqs(vcpu);
+ update_tpr_threshold(vcpu);
- if (!vcpu->mmio_read_completed)
- do_interrupt_requests(vcpu, kvm_run);
+ has_ext_irq = kvm_cpu_has_interrupt(vcpu);
+ intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
+ idtv_info_field = vmcs_read32(IDT_VECTORING_INFO_FIELD);
+ if (intr_info_field & INTR_INFO_VALID_MASK) {
+ if (idtv_info_field & INTR_INFO_VALID_MASK) {
+ /* TODO: fault when IDT_Vectoring */
+ printk(KERN_ERR "Fault when IDT_Vectoring\n");
+ }
+ if (has_ext_irq)
+ enable_irq_window(vcpu);
+ return;
+ }
+ if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) {
+ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
+ vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
+ vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
+
+ if (unlikely(idtv_info_field & INTR_INFO_DELIEVER_CODE_MASK))
+ vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
+ vmcs_read32(IDT_VECTORING_ERROR_CODE));
+ if (unlikely(has_ext_irq))
+ enable_irq_window(vcpu);
+ return;
+ }
+ if (!has_ext_irq)
+ return;
+ interrupt_window_open =
+ ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
+ (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
+ if (interrupt_window_open) {
+ vector = kvm_cpu_get_interrupt(vcpu);
+ vmx_inject_irq(vcpu, vector);
+ kvm_timer_intr_post(vcpu, vector);
+ } else
+ enable_irq_window(vcpu);
+}
- vmx_save_host_state(vcpu);
- kvm_load_guest_fpu(vcpu);
+static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 intr_info;
/*
* Loading guest fpu may have cleared host cr0.ts
*/
vmcs_writel(HOST_CR0, read_cr0());
- local_irq_disable();
-
- vcpu->guest_mode = 1;
- if (vcpu->requests)
- if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
- vmx_flush_tlb(vcpu);
-
asm (
/* Store host registers */
#ifdef CONFIG_X86_64
"pop %%ecx; popa \n\t"
#endif
"setbe %0 \n\t"
- : "=q" (fail)
+ : "=q" (vmx->fail)
: "r"(vmx->launched), "d"((unsigned long)HOST_RSP),
"c"(vcpu),
[rax]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RAX])),
[cr2]"i"(offsetof(struct kvm_vcpu, cr2))
: "cc", "memory" );
- vcpu->guest_mode = 0;
- local_irq_enable();
-
- ++vcpu->stat.exits;
-
vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
vmx->launched = 1;
- preempt_enable();
-
- if (unlikely(fail)) {
- kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
- kvm_run->fail_entry.hardware_entry_failure_reason
- = vmcs_read32(VM_INSTRUCTION_ERROR);
- r = 0;
- goto out;
- }
- /*
- * Profile KVM exit RIPs:
- */
- if (unlikely(prof_on == KVM_PROFILING))
- profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
-
- r = kvm_handle_exit(kvm_run, vcpu);
- if (r > 0) {
- /* Give scheduler a change to reschedule. */
- if (signal_pending(current)) {
- r = -EINTR;
- kvm_run->exit_reason = KVM_EXIT_INTR;
- ++vcpu->stat.signal_exits;
- goto out;
- }
-
- if (dm_request_for_irq_injection(vcpu, kvm_run)) {
- r = -EINTR;
- kvm_run->exit_reason = KVM_EXIT_INTR;
- ++vcpu->stat.request_irq_exits;
- goto out;
- }
- if (!need_resched()) {
- ++vcpu->stat.light_exits;
- goto again;
- }
- }
-
-out:
- if (r > 0) {
- kvm_resched(vcpu);
- goto preempted;
- }
+ intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
- post_kvm_run_save(vcpu, kvm_run);
- return r;
+ /* We need to handle NMIs before interrupts are enabled */
+ if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
+ asm("int $2");
}
static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (vmx->vmcs) {
- on_each_cpu(__vcpu_clear, vcpu, 0, 1);
+ on_each_cpu(__vcpu_clear, vmx, 0, 1);
free_vmcs(vmx->vmcs);
vmx->vmcs = NULL;
}
kfree(vmx->host_msrs);
kfree(vmx->guest_msrs);
kvm_vcpu_uninit(vcpu);
- kfree(vmx);
+ kmem_cache_free(kvm_vcpu_cache, vmx);
}
static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
{
int err;
- struct vcpu_vmx *vmx = kzalloc(sizeof(*vmx), GFP_KERNEL);
+ struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
int cpu;
if (!vmx)
if (err)
goto free_vcpu;
+ if (irqchip_in_kernel(kvm)) {
+ err = kvm_create_lapic(&vmx->vcpu);
+ if (err < 0)
+ goto free_vcpu;
+ }
+
vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!vmx->guest_msrs) {
err = -ENOMEM;
cpu = get_cpu();
vmx_vcpu_load(&vmx->vcpu, cpu);
- err = vmx_vcpu_setup(&vmx->vcpu);
+ err = vmx_vcpu_setup(vmx);
vmx_vcpu_put(&vmx->vcpu);
put_cpu();
if (err)
uninit_vcpu:
kvm_vcpu_uninit(&vmx->vcpu);
free_vcpu:
- kfree(vmx);
+ kmem_cache_free(kvm_vcpu_cache, vmx);
return ERR_PTR(err);
}
-static struct kvm_arch_ops vmx_arch_ops = {
+static void __init vmx_check_processor_compat(void *rtn)
+{
+ struct vmcs_config vmcs_conf;
+
+ *(int *)rtn = 0;
+ if (setup_vmcs_config(&vmcs_conf) < 0)
+ *(int *)rtn = -EIO;
+ if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
+ printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
+ smp_processor_id());
+ *(int *)rtn = -EIO;
+ }
+}
+
+static struct kvm_x86_ops vmx_x86_ops = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
.hardware_setup = hardware_setup,
.hardware_unsetup = hardware_unsetup,
+ .check_processor_compatibility = vmx_check_processor_compat,
.hardware_enable = hardware_enable,
.hardware_disable = hardware_disable,
.vcpu_create = vmx_create_vcpu,
.vcpu_free = vmx_free_vcpu,
+ .vcpu_reset = vmx_vcpu_reset,
+ .prepare_guest_switch = vmx_save_host_state,
.vcpu_load = vmx_vcpu_load,
.vcpu_put = vmx_vcpu_put,
.vcpu_decache = vmx_vcpu_decache,
.set_guest_debug = set_guest_debug,
+ .guest_debug_pre = kvm_guest_debug_pre,
.get_msr = vmx_get_msr,
.set_msr = vmx_set_msr,
.get_segment_base = vmx_get_segment_base,
.inject_gp = vmx_inject_gp,
.run = vmx_vcpu_run,
+ .handle_exit = kvm_handle_exit,
.skip_emulated_instruction = skip_emulated_instruction,
.patch_hypercall = vmx_patch_hypercall,
+ .get_irq = vmx_get_irq,
+ .set_irq = vmx_inject_irq,
+ .inject_pending_irq = vmx_intr_assist,
+ .inject_pending_vectors = do_interrupt_requests,
};
static int __init vmx_init(void)
memset(iova, 0xff, PAGE_SIZE);
kunmap(vmx_io_bitmap_b);
- r = kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
+ r = kvm_init_x86(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
if (r)
goto out1;
__free_page(vmx_io_bitmap_b);
__free_page(vmx_io_bitmap_a);
- kvm_exit_arch();
+ kvm_exit_x86();
}
module_init(vmx_init)