return;
vcpu->guest_fpu_loaded = 1;
- fx_save(vcpu->host_fx_image);
- fx_restore(vcpu->guest_fx_image);
+ fx_save(&vcpu->host_fx_image);
+ fx_restore(&vcpu->guest_fx_image);
}
EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
return;
vcpu->guest_fpu_loaded = 0;
- fx_save(vcpu->guest_fx_image);
- fx_restore(vcpu->host_fx_image);
+ fx_save(&vcpu->guest_fx_image);
+ fx_restore(&vcpu->host_fx_image);
}
EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
}
vcpu->pio_data = page_address(page);
- vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
- FX_IMAGE_ALIGN);
- vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
-
r = kvm_mmu_create(vcpu);
if (r < 0)
goto fail_free_pio_data;
}
}
- vcpu->cr3 = cr3;
mutex_lock(&vcpu->kvm->lock);
/*
* Does the new cr3 value map to physical memory? (Note, we
*/
if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
inject_gp(vcpu);
- else
+ else {
+ vcpu->cr3 = cr3;
vcpu->mmu.new_cr3(vcpu);
+ }
mutex_unlock(&vcpu->kvm->lock);
}
EXPORT_SYMBOL_GPL(set_cr3);
void fx_init(struct kvm_vcpu *vcpu)
{
- struct __attribute__ ((__packed__)) fx_image_s {
- u16 control; //fcw
- u16 status; //fsw
- u16 tag; // ftw
- u16 opcode; //fop
- u64 ip; // fpu ip
- u64 operand;// fpu dp
- u32 mxcsr;
- u32 mxcsr_mask;
-
- } *fx_image;
+ unsigned after_mxcsr_mask;
/* Initialize guest FPU by resetting ours and saving into guest's */
preempt_disable();
- fx_save(vcpu->host_fx_image);
+ fx_save(&vcpu->host_fx_image);
fpu_init();
- fx_save(vcpu->guest_fx_image);
- fx_restore(vcpu->host_fx_image);
+ fx_save(&vcpu->guest_fx_image);
+ fx_restore(&vcpu->host_fx_image);
preempt_enable();
- fx_image = (struct fx_image_s *)vcpu->guest_fx_image;
- fx_image->mxcsr = 0x1f80;
- memset(vcpu->guest_fx_image + sizeof(struct fx_image_s),
- 0, FX_IMAGE_SIZE - sizeof(struct fx_image_s));
+ after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
+ vcpu->guest_fx_image.mxcsr = 0x1f80;
+ memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
+ 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
}
EXPORT_SYMBOL_GPL(fx_init);
if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
goto out;
- mutex_lock(&kvm->lock);
- kvm_mmu_slot_remove_write_access(kvm, log->slot);
- kvm_flush_remote_tlbs(kvm);
- memset(memslot->dirty_bitmap, 0, n);
- mutex_unlock(&kvm->lock);
+ /* If nothing is dirty, don't bother messing with page tables. */
+ if (any) {
+ mutex_lock(&kvm->lock);
+ kvm_mmu_slot_remove_write_access(kvm, log->slot);
+ kvm_flush_remote_tlbs(kvm);
+ memset(memslot->dirty_bitmap, 0, n);
+ mutex_unlock(&kvm->lock);
+ }
r = 0;
preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
+ /* We do fxsave: this must be aligned. */
+ BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
+
vcpu_load(vcpu);
r = kvm_mmu_setup(vcpu);
vcpu_put(vcpu);
static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
- struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image;
+ struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
vcpu_load(vcpu);
static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
- struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image;
+ struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
vcpu_load(vcpu);
struct module *module)
{
int r;
+ int cpu;
if (kvm_arch_ops) {
printk(KERN_ERR "kvm: already loaded the other module\n");
if (r < 0)
goto out;
+ for_each_online_cpu(cpu) {
+ smp_call_function_single(cpu,
+ kvm_arch_ops->check_processor_compatibility,
+ &r, 0, 1);
+ if (r < 0)
+ goto out_free_0;
+ }
+
on_each_cpu(hardware_enable, NULL, 0, 1);
r = register_cpu_notifier(&kvm_cpu_notifier);
if (r)
unregister_cpu_notifier(&kvm_cpu_notifier);
out_free_1:
on_each_cpu(hardware_disable, NULL, 0, 1);
+out_free_0:
kvm_arch_ops->hardware_unsetup();
out:
kvm_arch_ops = NULL;