]> err.no Git - linux-2.6/commitdiff
[PATCH] KVM: cpu hotplug support
authorAvi Kivity <avi@qumranet.com>
Mon, 12 Feb 2007 08:54:47 +0000 (00:54 -0800)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Mon, 12 Feb 2007 17:48:41 +0000 (09:48 -0800)
On hotplug, we execute the hardware extension enable sequence.  On unplug, we
decache any vcpus that last ran on the exiting cpu, and execute the hardware
extension disable sequence.

Signed-off-by: Avi Kivity <avi@qumranet.com>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/kvm/kvm.h
drivers/kvm/kvm_main.c
drivers/kvm/svm.c
drivers/kvm/vmx.c

index c48cebf8511dfebf677ddc1055befd73c4fd33d8..04574a9d4430b9deeb7458e0a037a1b76f1e6b23 100644 (file)
@@ -341,6 +341,7 @@ struct kvm_arch_ops {
 
        struct kvm_vcpu *(*vcpu_load)(struct kvm_vcpu *vcpu);
        void (*vcpu_put)(struct kvm_vcpu *vcpu);
+       void (*vcpu_decache)(struct kvm_vcpu *vcpu);
 
        int (*set_guest_debug)(struct kvm_vcpu *vcpu,
                               struct kvm_debug_guest *dbg);
index a6cd1c1fe29ab92a6da89416946b2108b15c1ee9..291d298868fca7947b746bf9f87e95611df1f081 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/highmem.h>
 #include <linux/file.h>
 #include <asm/desc.h>
+#include <linux/cpu.h>
 
 #include "x86_emulate.h"
 #include "segment_descriptor.h"
@@ -2039,6 +2040,64 @@ static struct notifier_block kvm_reboot_notifier = {
        .priority = 0,
 };
 
+/*
+ * Make sure that a cpu that is being hot-unplugged does not have any vcpus
+ * cached on it.
+ */
+static void decache_vcpus_on_cpu(int cpu)
+{
+       struct kvm *vm;
+       struct kvm_vcpu *vcpu;
+       int i;
+
+       spin_lock(&kvm_lock);
+       list_for_each_entry(vm, &vm_list, vm_list)
+               for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+                       vcpu = &vm->vcpus[i];
+                       /*
+                        * If the vcpu is locked, then it is running on some
+                        * other cpu and therefore it is not cached on the
+                        * cpu in question.
+                        *
+                        * If it's not locked, check the last cpu it executed
+                        * on.
+                        */
+                       if (mutex_trylock(&vcpu->mutex)) {
+                               if (vcpu->cpu == cpu) {
+                                       kvm_arch_ops->vcpu_decache(vcpu);
+                                       vcpu->cpu = -1;
+                               }
+                               mutex_unlock(&vcpu->mutex);
+                       }
+               }
+       spin_unlock(&kvm_lock);
+}
+
+static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
+                          void *v)
+{
+       int cpu = (long)v;
+
+       switch (val) {
+       case CPU_DEAD:
+       case CPU_UP_CANCELED:
+               decache_vcpus_on_cpu(cpu);
+               smp_call_function_single(cpu, kvm_arch_ops->hardware_disable,
+                                        NULL, 0, 1);
+               break;
+       case CPU_UP_PREPARE:
+               smp_call_function_single(cpu, kvm_arch_ops->hardware_enable,
+                                        NULL, 0, 1);
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block kvm_cpu_notifier = {
+       .notifier_call = kvm_cpu_hotplug,
+       .priority = 20, /* must be > scheduler priority */
+};
+
 static __init void kvm_init_debug(void)
 {
        struct kvm_stats_debugfs_item *p;
@@ -2085,6 +2144,9 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
            return r;
 
        on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1);
+       r = register_cpu_notifier(&kvm_cpu_notifier);
+       if (r)
+               goto out_free_1;
        register_reboot_notifier(&kvm_reboot_notifier);
 
        kvm_chardev_ops.owner = module;
@@ -2099,6 +2161,8 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
 
 out_free:
        unregister_reboot_notifier(&kvm_reboot_notifier);
+       unregister_cpu_notifier(&kvm_cpu_notifier);
+out_free_1:
        on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
        kvm_arch_ops->hardware_unsetup();
        return r;
index 4fa50bd0dceb82ee71c3a8f374caa008f1e74534..83da4ea150a335c3543315a7adef7855b1f047ce 100644 (file)
@@ -609,6 +609,10 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
        put_cpu();
 }
 
+static void svm_vcpu_decache(struct kvm_vcpu *vcpu)
+{
+}
+
 static void svm_cache_regs(struct kvm_vcpu *vcpu)
 {
        vcpu->regs[VCPU_REGS_RAX] = vcpu->svm->vmcb->save.rax;
@@ -1677,6 +1681,7 @@ static struct kvm_arch_ops svm_arch_ops = {
 
        .vcpu_load = svm_vcpu_load,
        .vcpu_put = svm_vcpu_put,
+       .vcpu_decache = svm_vcpu_decache,
 
        .set_guest_debug = svm_guest_debug,
        .get_msr = svm_get_msr,
index 28da0cae64a7d9578cc6b23a734268a4299935c1..1e640b899175df4f178d5828e1d2025ed9551e57 100644 (file)
@@ -250,6 +250,11 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
        put_cpu();
 }
 
+static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
+{
+       vcpu_clear(vcpu);
+}
+
 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
 {
        return vmcs_readl(GUEST_RFLAGS);
@@ -509,7 +514,7 @@ static __init int vmx_disabled_by_bios(void)
        return (msr & 5) == 1; /* locked but not enabled */
 }
 
-static __init void hardware_enable(void *garbage)
+static void hardware_enable(void *garbage)
 {
        int cpu = raw_smp_processor_id();
        u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
@@ -2023,6 +2028,7 @@ static struct kvm_arch_ops vmx_arch_ops = {
 
        .vcpu_load = vmx_vcpu_load,
        .vcpu_put = vmx_vcpu_put,
+       .vcpu_decache = vmx_vcpu_decache,
 
        .set_guest_debug = set_guest_debug,
        .get_msr = vmx_get_msr,