]> err.no Git - linux-2.6/commitdiff
KVM: Use alignment properties of vcpu to simplify FPU ops
authorRusty Russell <rusty@rustcorp.com.au>
Mon, 30 Jul 2007 11:13:43 +0000 (21:13 +1000)
committerAvi Kivity <avi@qumranet.com>
Sat, 13 Oct 2007 08:18:21 +0000 (10:18 +0200)
Now we use a kmem cache for allocating vcpus, we can get the 16-byte
alignment required by fxsave & fxrstor instructions, and avoid
manually aligning the buffer.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Avi Kivity <avi@qumranet.com>
drivers/kvm/kvm.h
drivers/kvm/kvm_main.c
drivers/kvm/svm.c

index b362e8f8f832965c75253f1831beb5ffd8399afa..7a34706f42be62708c65e2022f422ca91b194aa8 100644 (file)
 #define KVM_REFILL_PAGES 25
 #define KVM_MAX_CPUID_ENTRIES 40
 
-#define FX_IMAGE_SIZE 512
-#define FX_IMAGE_ALIGN 16
-#define FX_BUF_SIZE (2 * FX_IMAGE_SIZE + FX_IMAGE_ALIGN)
-
 #define DE_VECTOR 0
 #define NM_VECTOR 7
 #define DF_VECTOR 8
@@ -342,9 +338,8 @@ struct kvm_vcpu {
 
        struct kvm_guest_debug guest_debug;
 
-       char fx_buf[FX_BUF_SIZE];
-       char *host_fx_image;
-       char *guest_fx_image;
+       struct i387_fxsave_struct host_fx_image;
+       struct i387_fxsave_struct guest_fx_image;
        int fpu_active;
        int guest_fpu_loaded;
 
@@ -704,12 +699,12 @@ static inline unsigned long read_msr(unsigned long msr)
 }
 #endif
 
-static inline void fx_save(void *image)
+static inline void fx_save(struct i387_fxsave_struct *image)
 {
        asm ("fxsave (%0)":: "r" (image));
 }
 
-static inline void fx_restore(void *image)
+static inline void fx_restore(struct i387_fxsave_struct *image)
 {
        asm ("fxrstor (%0)":: "r" (image));
 }
index 4166a08ce5007fca98793f255438905189df698c..bfb1b6de05843beb95a80d31f0547b4d5bde89f1 100644 (file)
@@ -154,8 +154,8 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
                return;
 
        vcpu->guest_fpu_loaded = 1;
-       fx_save(vcpu->host_fx_image);
-       fx_restore(vcpu->guest_fx_image);
+       fx_save(&vcpu->host_fx_image);
+       fx_restore(&vcpu->guest_fx_image);
 }
 EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
 
@@ -165,8 +165,8 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
                return;
 
        vcpu->guest_fpu_loaded = 0;
-       fx_save(vcpu->guest_fx_image);
-       fx_restore(vcpu->host_fx_image);
+       fx_save(&vcpu->guest_fx_image);
+       fx_restore(&vcpu->host_fx_image);
 }
 EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
 
@@ -262,10 +262,6 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
        }
        vcpu->pio_data = page_address(page);
 
-       vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
-                                          FX_IMAGE_ALIGN);
-       vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
-
        r = kvm_mmu_create(vcpu);
        if (r < 0)
                goto fail_free_pio_data;
@@ -615,30 +611,20 @@ EXPORT_SYMBOL_GPL(set_cr8);
 
 void fx_init(struct kvm_vcpu *vcpu)
 {
-       struct __attribute__ ((__packed__)) fx_image_s {
-               u16 control; //fcw
-               u16 status; //fsw
-               u16 tag; // ftw
-               u16 opcode; //fop
-               u64 ip; // fpu ip
-               u64 operand;// fpu dp
-               u32 mxcsr;
-               u32 mxcsr_mask;
-
-       } *fx_image;
+       unsigned after_mxcsr_mask;
 
        /* Initialize guest FPU by resetting ours and saving into guest's */
        preempt_disable();
-       fx_save(vcpu->host_fx_image);
+       fx_save(&vcpu->host_fx_image);
        fpu_init();
-       fx_save(vcpu->guest_fx_image);
-       fx_restore(vcpu->host_fx_image);
+       fx_save(&vcpu->guest_fx_image);
+       fx_restore(&vcpu->host_fx_image);
        preempt_enable();
 
-       fx_image = (struct fx_image_s *)vcpu->guest_fx_image;
-       fx_image->mxcsr = 0x1f80;
-       memset(vcpu->guest_fx_image + sizeof(struct fx_image_s),
-              0, FX_IMAGE_SIZE - sizeof(struct fx_image_s));
+       after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
+       vcpu->guest_fx_image.mxcsr = 0x1f80;
+       memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
+              0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
 }
 EXPORT_SYMBOL_GPL(fx_init);
 
@@ -2356,6 +2342,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
 
        preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
 
+       /* We do fxsave: this must be aligned. */
+       BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
+
        vcpu_load(vcpu);
        r = kvm_mmu_setup(vcpu);
        vcpu_put(vcpu);
@@ -2468,7 +2457,7 @@ struct fxsave {
 
 static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
-       struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image;
+       struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
 
        vcpu_load(vcpu);
 
@@ -2488,7 +2477,7 @@ static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 
 static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
-       struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image;
+       struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
 
        vcpu_load(vcpu);
 
index 8193651dd81560e33e7ed5d11951effda015619a..5277084f3a350b671eb3f4b8c0743519b9a08e87 100644 (file)
@@ -1557,8 +1557,8 @@ again:
        }
 
        if (vcpu->fpu_active) {
-               fx_save(vcpu->host_fx_image);
-               fx_restore(vcpu->guest_fx_image);
+               fx_save(&vcpu->host_fx_image);
+               fx_restore(&vcpu->guest_fx_image);
        }
 
        asm volatile (
@@ -1670,8 +1670,8 @@ again:
        vcpu->guest_mode = 0;
 
        if (vcpu->fpu_active) {
-               fx_save(vcpu->guest_fx_image);
-               fx_restore(vcpu->host_fx_image);
+               fx_save(&vcpu->guest_fx_image);
+               fx_restore(&vcpu->host_fx_image);
        }
 
        if ((svm->vmcb->save.dr7 & 0xff))