void kvm_flush_remote_tlbs(struct kvm *kvm)
{
- int i, cpu;
+ int i, cpu, me;
cpumask_t cpus;
struct kvm_vcpu *vcpu;
+ me = get_cpu();
cpus_clear(cpus);
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
vcpu = kvm->vcpus[i];
if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
continue;
cpu = vcpu->cpu;
- if (cpu != -1 && cpu != raw_smp_processor_id())
+ if (cpu != -1 && cpu != me)
cpu_set(cpu, cpus);
}
if (cpus_empty(cpus))
- return;
+ goto out;
++kvm->stat.remote_tlb_flush;
smp_call_function_mask(cpus, ack_flush, NULL, 1);
+out:
+ put_cpu();
}
void kvm_reload_remote_mmus(struct kvm *kvm)
{
- int i, cpu;
+ int i, cpu, me;
cpumask_t cpus;
struct kvm_vcpu *vcpu;
+ me = get_cpu();
cpus_clear(cpus);
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
vcpu = kvm->vcpus[i];
if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
continue;
cpu = vcpu->cpu;
- if (cpu != -1 && cpu != raw_smp_processor_id())
+ if (cpu != -1 && cpu != me)
cpu_set(cpu, cpus);
}
if (cpus_empty(cpus))
- return;
+ goto out;
smp_call_function_mask(cpus, ack_flush, NULL, 1);
+out:
+ put_cpu();
}
if (mem->slot >= kvm->nmemslots)
kvm->nmemslots = mem->slot + 1;
+ if (!npages)
+ kvm_arch_flush_shadow(kvm);
+
*memslot = new;
r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
*/
static int create_vcpu_fd(struct kvm_vcpu *vcpu)
{
- int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu);
+ int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
if (fd < 0)
kvm_put_kvm(vcpu->kvm);
return fd;
kvm = kvm_create_vm();
if (IS_ERR(kvm))
return PTR_ERR(kvm);
- fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm);
+ fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
if (fd < 0)
kvm_put_kvm(kvm);