]> err.no Git - linux-2.6/blobdiff - drivers/kvm/kvm_main.c
[Blackfin] arch: Add proper SW System Reset delay sequence
[linux-2.6] / drivers / kvm / kvm_main.c
index e17b433152cb006bf29f33d90fb0dcc10e2ad579..c0f372f1d761312bc648d9deccd8e74609fbc513 100644 (file)
@@ -198,21 +198,15 @@ static void vcpu_put(struct kvm_vcpu *vcpu)
 
 static void ack_flush(void *_completed)
 {
-       atomic_t *completed = _completed;
-
-       atomic_inc(completed);
 }
 
 void kvm_flush_remote_tlbs(struct kvm *kvm)
 {
-       int i, cpu, needed;
+       int i, cpu;
        cpumask_t cpus;
        struct kvm_vcpu *vcpu;
-       atomic_t completed;
 
-       atomic_set(&completed, 0);
        cpus_clear(cpus);
-       needed = 0;
        for (i = 0; i < KVM_MAX_VCPUS; ++i) {
                vcpu = kvm->vcpus[i];
                if (!vcpu)
@@ -221,23 +215,9 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
                        continue;
                cpu = vcpu->cpu;
                if (cpu != -1 && cpu != raw_smp_processor_id())
-                       if (!cpu_isset(cpu, cpus)) {
-                               cpu_set(cpu, cpus);
-                               ++needed;
-                       }
-       }
-
-       /*
-        * We really want smp_call_function_mask() here.  But that's not
-        * available, so ipi all cpus in parallel and wait for them
-        * to complete.
-        */
-       for (cpu = first_cpu(cpus); cpu != NR_CPUS; cpu = next_cpu(cpu, cpus))
-               smp_call_function_single(cpu, ack_flush, &completed, 1, 0);
-       while (atomic_read(&completed) != needed) {
-               cpu_relax();
-               barrier();
+                       cpu_set(cpu, cpus);
        }
+       smp_call_function_mask(cpus, ack_flush, NULL, 1);
 }
 
 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
@@ -1208,8 +1188,7 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
 
 int emulate_clts(struct kvm_vcpu *vcpu)
 {
-       vcpu->cr0 &= ~X86_CR0_TS;
-       kvm_x86_ops->set_cr0(vcpu, vcpu->cr0);
+       kvm_x86_ops->set_cr0(vcpu, vcpu->cr0 & ~X86_CR0_TS);
        return X86EMUL_CONTINUE;
 }
 
@@ -1240,25 +1219,25 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
        return X86EMUL_CONTINUE;
 }
 
-static void report_emulation_failure(struct x86_emulate_ctxt *ctxt)
+void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
 {
        static int reported;
        u8 opcodes[4];
-       unsigned long rip = ctxt->vcpu->rip;
+       unsigned long rip = vcpu->rip;
        unsigned long rip_linear;
 
-       rip_linear = rip + get_segment_base(ctxt->vcpu, VCPU_SREG_CS);
+       rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
 
        if (reported)
                return;
 
-       emulator_read_std(rip_linear, (void *)opcodes, 4, ctxt->vcpu);
+       emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
 
-       printk(KERN_ERR "emulation failed but !mmio_needed?"
-              " rip %lx %02x %02x %02x %02x\n",
-              rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
+       printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
+              context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
        reported = 1;
 }
+EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
 
 struct x86_emulate_ops emulate_ops = {
        .read_std            = emulator_read_std,
@@ -1323,7 +1302,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
                if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
                        return EMULATE_DONE;
                if (!vcpu->mmio_needed) {
-                       report_emulation_failure(&emulate_ctxt);
+                       kvm_report_emulation_failure(vcpu, "mmio");
                        return EMULATE_FAIL;
                }
                return EMULATE_DO_MMIO;
@@ -1815,8 +1794,6 @@ static int complete_pio(struct kvm_vcpu *vcpu)
        io->count -= io->cur_count;
        io->cur_count = 0;
 
-       if (!io->count)
-               kvm_x86_ops->skip_emulated_instruction(vcpu);
        return 0;
 }
 
@@ -1876,6 +1853,8 @@ int kvm_emulate_pio (struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
        memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
        kvm_x86_ops->decache_regs(vcpu);
 
+       kvm_x86_ops->skip_emulated_instruction(vcpu);
+
        pio_dev = vcpu_find_pio_dev(vcpu, port);
        if (pio_dev) {
                kernel_pio(pio_dev, vcpu, vcpu->pio_data);
@@ -1938,6 +1917,9 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
        vcpu->run->io.count = now;
        vcpu->pio.cur_count = now;
 
+       if (vcpu->pio.cur_count == vcpu->pio.count)
+               kvm_x86_ops->skip_emulated_instruction(vcpu);
+
        for (i = 0; i < nr_pages; ++i) {
                mutex_lock(&vcpu->kvm->lock);
                page = gva_to_page(vcpu, address + i * PAGE_SIZE);
@@ -2043,6 +2025,7 @@ again:
                kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
 
        vcpu->guest_mode = 1;
+       kvm_guest_enter();
 
        if (vcpu->requests)
                if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
@@ -2055,6 +2038,16 @@ again:
 
        ++vcpu->stat.exits;
 
+       /*
+        * We must have an instruction between local_irq_enable() and
+        * kvm_guest_exit(), so the timer interrupt isn't delayed by
+        * the interrupt shadow.  The stat.exits increment will do nicely.
+        * But we need to prevent reordering, hence this barrier():
+        */
+       barrier();
+
+       kvm_guest_exit();
+
        preempt_enable();
 
        /*
@@ -3458,7 +3451,7 @@ static int kvm_resume(struct sys_device *dev)
 }
 
 static struct sysdev_class kvm_sysdev_class = {
-       set_kset_name("kvm"),
+       .name = "kvm",
        .suspend = kvm_suspend,
        .resume = kvm_resume,
 };