]> err.no Git - linux-2.6/blobdiff - arch/x86/kvm/x86.c
KVM: x86 emulator: simplify rip relative decoding
[linux-2.6] / arch / x86 / kvm / x86.c
index 00acf1301a151663beb43151a569f2022d6e9dde..d1db5aa5c7f42dfd38d4426ee36c7a92c946fdc1 100644 (file)
@@ -72,6 +72,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "mmio_exits", VCPU_STAT(mmio_exits) },
        { "signal_exits", VCPU_STAT(signal_exits) },
        { "irq_window", VCPU_STAT(irq_window_exits) },
+       { "nmi_window", VCPU_STAT(nmi_window_exits) },
        { "halt_exits", VCPU_STAT(halt_exits) },
        { "halt_wakeup", VCPU_STAT(halt_wakeup) },
        { "hypercalls", VCPU_STAT(hypercalls) },
@@ -173,6 +174,12 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
        kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
 }
 
+void kvm_inject_nmi(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.nmi_pending = 1;
+}
+EXPORT_SYMBOL_GPL(kvm_inject_nmi);
+
 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
 {
        WARN_ON(vcpu->arch.exception.pending);
@@ -492,8 +499,8 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
 {
        static int version;
-       struct kvm_wall_clock wc;
-       struct timespec wc_ts;
+       struct pvclock_wall_clock wc;
+       struct timespec now, sys, boot;
 
        if (!wall_clock)
                return;
@@ -502,10 +509,19 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
 
        kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
 
-       wc_ts = current_kernel_time();
-       wc.wc_sec = wc_ts.tv_sec;
-       wc.wc_nsec = wc_ts.tv_nsec;
-       wc.wc_version = version;
+       /*
+        * The guest calculates current wall clock time by adding
+        * system time (updated by kvm_write_guest_time below) to the
+        * wall clock specified here.  guest system time equals host
+        * system time for us, thus we must fill in host boot time here.
+        */
+       now = current_kernel_time();
+       ktime_get_ts(&sys);
+       boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
+
+       wc.sec = boot.tv_sec;
+       wc.nsec = boot.tv_nsec;
+       wc.version = version;
 
        kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
 
@@ -513,6 +529,45 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
        kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
 }
 
+static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
+{
+       uint32_t quotient, remainder;
+
+       /* Don't try to replace with do_div(), this one calculates
+        * "(dividend << 32) / divisor" */
+       __asm__ ( "divl %4"
+                 : "=a" (quotient), "=d" (remainder)
+                 : "0" (0), "1" (dividend), "r" (divisor) );
+       return quotient;
+}
+
+static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
+{
+       uint64_t nsecs = 1000000000LL;
+       int32_t  shift = 0;
+       uint64_t tps64;
+       uint32_t tps32;
+
+       tps64 = tsc_khz * 1000LL;
+       while (tps64 > nsecs*2) {
+               tps64 >>= 1;
+               shift--;
+       }
+
+       tps32 = (uint32_t)tps64;
+       while (tps32 <= (uint32_t)nsecs) {
+               tps32 <<= 1;
+               shift++;
+       }
+
+       hv_clock->tsc_shift = shift;
+       hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
+
+       pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
+                __FUNCTION__, tsc_khz, hv_clock->tsc_shift,
+                hv_clock->tsc_to_system_mul);
+}
+
 static void kvm_write_guest_time(struct kvm_vcpu *v)
 {
        struct timespec ts;
@@ -523,6 +578,11 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
        if ((!vcpu->time_page))
                return;
 
+       if (unlikely(vcpu->hv_clock_tsc_khz != tsc_khz)) {
+               kvm_set_time_scale(tsc_khz, &vcpu->hv_clock);
+               vcpu->hv_clock_tsc_khz = tsc_khz;
+       }
+
        /* Keep irq disabled to prevent changes to the clock */
        local_irq_save(flags);
        kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
@@ -537,20 +597,52 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
        /*
         * The interface expects us to write an even number signaling that the
         * update is finished. Since the guest won't see the intermediate
-        * state, we just write "2" at the end
+        * state, we just increase by 2 at the end.
         */
-       vcpu->hv_clock.version = 2;
+       vcpu->hv_clock.version += 2;
 
        shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
 
        memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
-               sizeof(vcpu->hv_clock));
+              sizeof(vcpu->hv_clock));
 
        kunmap_atomic(shared_kaddr, KM_USER0);
 
        mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
 }
 
+static bool msr_mtrr_valid(unsigned msr)
+{
+       switch (msr) {
+       case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
+       case MSR_MTRRfix64K_00000:
+       case MSR_MTRRfix16K_80000:
+       case MSR_MTRRfix16K_A0000:
+       case MSR_MTRRfix4K_C0000:
+       case MSR_MTRRfix4K_C8000:
+       case MSR_MTRRfix4K_D0000:
+       case MSR_MTRRfix4K_D8000:
+       case MSR_MTRRfix4K_E0000:
+       case MSR_MTRRfix4K_E8000:
+       case MSR_MTRRfix4K_F0000:
+       case MSR_MTRRfix4K_F8000:
+       case MSR_MTRRdefType:
+       case MSR_IA32_CR_PAT:
+               return true;
+       case 0x2f8:
+               return true;
+       }
+       return false;
+}
+
+static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+{
+       if (!msr_mtrr_valid(msr))
+               return 1;
+
+       vcpu->arch.mtrr[msr - 0x200] = data;
+       return 0;
+}
 
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 {
@@ -572,8 +664,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                break;
        case MSR_IA32_UCODE_REV:
        case MSR_IA32_UCODE_WRITE:
-       case 0x200 ... 0x2ff: /* MTRRs */
                break;
+       case 0x200 ... 0x2ff:
+               return set_msr_mtrr(vcpu, msr, data);
        case MSR_IA32_APICBASE:
                kvm_set_apic_base(vcpu, data);
                break;
@@ -599,10 +692,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                /* ...but clean it before doing the actual write */
                vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
 
-               vcpu->arch.hv_clock.tsc_to_system_mul =
-                                       clocksource_khz2mult(tsc_khz, 22);
-               vcpu->arch.hv_clock.tsc_shift = 22;
-
                down_read(&current->mm->mmap_sem);
                vcpu->arch.time_page =
                                gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
@@ -635,6 +724,15 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
        return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
 }
 
+static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+{
+       if (!msr_mtrr_valid(msr))
+               return 1;
+
+       *pdata = vcpu->arch.mtrr[msr - 0x200];
+       return 0;
+}
+
 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 {
        u64 data;
@@ -656,11 +754,13 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        case MSR_IA32_MC0_MISC+16:
        case MSR_IA32_UCODE_REV:
        case MSR_IA32_EBL_CR_POWERON:
-               /* MTRR registers */
-       case 0xfe:
-       case 0x200 ... 0x2ff:
                data = 0;
                break;
+       case MSR_MTRRcap:
+               data = 0x500 | KVM_NR_VAR_MTRR;
+               break;
+       case 0x200 ... 0x2ff:
+               return get_msr_mtrr(vcpu, msr, pdata);
        case 0xcd: /* fsb frequency */
                data = 3;
                break;
@@ -768,41 +868,6 @@ out:
        return r;
 }
 
-/*
- * Make sure that a cpu that is being hot-unplugged does not have any vcpus
- * cached on it.
- */
-void decache_vcpus_on_cpu(int cpu)
-{
-       struct kvm *vm;
-       struct kvm_vcpu *vcpu;
-       int i;
-
-       spin_lock(&kvm_lock);
-       list_for_each_entry(vm, &vm_list, vm_list)
-               for (i = 0; i < KVM_MAX_VCPUS; ++i) {
-                       vcpu = vm->vcpus[i];
-                       if (!vcpu)
-                               continue;
-                       /*
-                        * If the vcpu is locked, then it is running on some
-                        * other cpu and therefore it is not cached on the
-                        * cpu in question.
-                        *
-                        * If it's not locked, check the last cpu it executed
-                        * on.
-                        */
-                       if (mutex_trylock(&vcpu->mutex)) {
-                               if (vcpu->cpu == cpu) {
-                                       kvm_x86_ops->vcpu_decache(vcpu);
-                                       vcpu->cpu = -1;
-                               }
-                               mutex_unlock(&vcpu->mutex);
-                       }
-               }
-       spin_unlock(&kvm_lock);
-}
-
 int kvm_dev_ioctl_check_extension(long ext)
 {
        int r;
@@ -820,6 +885,9 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_MP_STATE:
                r = 1;
                break;
+       case KVM_CAP_COALESCED_MMIO:
+               r = KVM_COALESCED_MMIO_PAGE_OFFSET;
+               break;
        case KVM_CAP_VAPIC:
                r = !kvm_x86_ops->cpu_has_accelerated_tpr();
                break;
@@ -1732,13 +1800,14 @@ static void kvm_init_msr_list(void)
  * Only apic need an MMIO device hook, so shortcut now..
  */
 static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
-                                               gpa_t addr)
+                                               gpa_t addr, int len,
+                                               int is_write)
 {
        struct kvm_io_device *dev;
 
        if (vcpu->arch.apic) {
                dev = &vcpu->arch.apic->dev;
-               if (dev->in_range(dev, addr))
+               if (dev->in_range(dev, addr, len, is_write))
                        return dev;
        }
        return NULL;
@@ -1746,13 +1815,15 @@ static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
 
 
 static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
-                                               gpa_t addr)
+                                               gpa_t addr, int len,
+                                               int is_write)
 {
        struct kvm_io_device *dev;
 
-       dev = vcpu_find_pervcpu_dev(vcpu, addr);
+       dev = vcpu_find_pervcpu_dev(vcpu, addr, len, is_write);
        if (dev == NULL)
-               dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
+               dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len,
+                                         is_write);
        return dev;
 }
 
@@ -1820,7 +1891,7 @@ mmio:
         * Is this MMIO handled locally?
         */
        mutex_lock(&vcpu->kvm->lock);
-       mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
+       mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0);
        if (mmio_dev) {
                kvm_iodevice_read(mmio_dev, gpa, bytes, val);
                mutex_unlock(&vcpu->kvm->lock);
@@ -1875,7 +1946,7 @@ mmio:
         * Is this MMIO handled locally?
         */
        mutex_lock(&vcpu->kvm->lock);
-       mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
+       mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1);
        if (mmio_dev) {
                kvm_iodevice_write(mmio_dev, gpa, bytes, val);
                mutex_unlock(&vcpu->kvm->lock);
@@ -1971,6 +2042,7 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
 
 int emulate_clts(struct kvm_vcpu *vcpu)
 {
+       KVMTRACE_0D(CLTS, vcpu, handler);
        kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
        return X86EMUL_CONTINUE;
 }
@@ -2004,21 +2076,19 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
 
 void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
 {
-       static int reported;
        u8 opcodes[4];
        unsigned long rip = vcpu->arch.rip;
        unsigned long rip_linear;
 
-       rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
-
-       if (reported)
+       if (!printk_ratelimit())
                return;
 
+       rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
+
        emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
 
        printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
               context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
-       reported = 1;
 }
 EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
 
@@ -2251,9 +2321,10 @@ static void pio_string_write(struct kvm_io_device *pio_dev,
 }
 
 static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
-                                              gpa_t addr)
+                                              gpa_t addr, int len,
+                                              int is_write)
 {
-       return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
+       return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr, len, is_write);
 }
 
 int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
@@ -2282,11 +2353,10 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
 
        kvm_x86_ops->cache_regs(vcpu);
        memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
-       kvm_x86_ops->decache_regs(vcpu);
 
        kvm_x86_ops->skip_emulated_instruction(vcpu);
 
-       pio_dev = vcpu_find_pio_dev(vcpu, port);
+       pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in);
        if (pio_dev) {
                kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
                complete_pio(vcpu);
@@ -2368,7 +2438,9 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
                }
        }
 
-       pio_dev = vcpu_find_pio_dev(vcpu, port);
+       pio_dev = vcpu_find_pio_dev(vcpu, port,
+                                   vcpu->arch.pio.cur_count,
+                                   !vcpu->arch.pio.in);
        if (!vcpu->arch.pio.in) {
                /* string PIO write */
                ret = pio_copy_data(vcpu);
@@ -2551,27 +2623,41 @@ void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
 
 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
 {
+       unsigned long value;
+
        kvm_x86_ops->decache_cr4_guest_bits(vcpu);
        switch (cr) {
        case 0:
-               return vcpu->arch.cr0;
+               value = vcpu->arch.cr0;
+               break;
        case 2:
-               return vcpu->arch.cr2;
+               value = vcpu->arch.cr2;
+               break;
        case 3:
-               return vcpu->arch.cr3;
+               value = vcpu->arch.cr3;
+               break;
        case 4:
-               return vcpu->arch.cr4;
+               value = vcpu->arch.cr4;
+               break;
        case 8:
-               return kvm_get_cr8(vcpu);
+               value = kvm_get_cr8(vcpu);
+               break;
        default:
                vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
                return 0;
        }
+       KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
+                   (u32)((u64)value >> 32), handler);
+
+       return value;
 }
 
 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
                     unsigned long *rflags)
 {
+       KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
+                   (u32)((u64)val >> 32), handler);
+
        switch (cr) {
        case 0:
                kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
@@ -2759,6 +2845,8 @@ again:
        if (vcpu->requests) {
                if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
                        __kvm_migrate_timers(vcpu);
+               if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
+                       kvm_x86_ops->tlb_flush(vcpu);
                if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
                                       &vcpu->requests)) {
                        kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
@@ -2772,6 +2860,7 @@ again:
                }
        }
 
+       clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
        kvm_inject_pending_timer_irqs(vcpu);
 
        preempt_disable();
@@ -2781,21 +2870,13 @@ again:
 
        local_irq_disable();
 
-       if (need_resched()) {
+       if (vcpu->requests || need_resched()) {
                local_irq_enable();
                preempt_enable();
                r = 1;
                goto out;
        }
 
-       if (vcpu->requests)
-               if (test_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) {
-                       local_irq_enable();
-                       preempt_enable();
-                       r = 1;
-                       goto out;
-               }
-
        if (signal_pending(current)) {
                local_irq_enable();
                preempt_enable();
@@ -2825,9 +2906,6 @@ again:
 
        kvm_guest_enter();
 
-       if (vcpu->requests)
-               if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
-                       kvm_x86_ops->tlb_flush(vcpu);
 
        KVMTRACE_0D(VMENTRY, vcpu, entryexit);
        kvm_x86_ops->run(vcpu, kvm_run);
@@ -3029,8 +3107,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        return 0;
 }
 
-static void get_segment(struct kvm_vcpu *vcpu,
-                       struct kvm_segment *var, int seg)
+void kvm_get_segment(struct kvm_vcpu *vcpu,
+                    struct kvm_segment *var, int seg)
 {
        kvm_x86_ops->get_segment(vcpu, var, seg);
 }
@@ -3039,7 +3117,7 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
 {
        struct kvm_segment cs;
 
-       get_segment(vcpu, &cs, VCPU_SREG_CS);
+       kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
        *db = cs.db;
        *l = cs.l;
 }
@@ -3053,15 +3131,15 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 
        vcpu_load(vcpu);
 
-       get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
-       get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
-       get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
-       get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
-       get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
-       get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
+       kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
+       kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
+       kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
+       kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
+       kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
+       kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
 
-       get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
-       get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
+       kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
+       kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
 
        kvm_x86_ops->get_idt(vcpu, &dt);
        sregs->idt.limit = dt.limit;
@@ -3113,7 +3191,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
        return 0;
 }
 
-static void set_segment(struct kvm_vcpu *vcpu,
+static void kvm_set_segment(struct kvm_vcpu *vcpu,
                        struct kvm_segment *var, int seg)
 {
        kvm_x86_ops->set_segment(vcpu, var, seg);
@@ -3150,7 +3228,7 @@ static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
        if (selector & 1 << 2) {
                struct kvm_segment kvm_seg;
 
-               get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
+               kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
 
                if (kvm_seg.unusable)
                        dtable->limit = 0;
@@ -3256,7 +3334,7 @@ static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
 {
        struct kvm_segment kvm_seg;
 
-       get_segment(vcpu, &kvm_seg, seg);
+       kvm_get_segment(vcpu, &kvm_seg, seg);
        return kvm_seg.selector;
 }
 
@@ -3272,8 +3350,8 @@ static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
        return 0;
 }
 
-static int load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
-                                  int type_bits, int seg)
+int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
+                               int type_bits, int seg)
 {
        struct kvm_segment kvm_seg;
 
@@ -3286,7 +3364,7 @@ static int load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
                if (!kvm_seg.s)
                        kvm_seg.unusable = 1;
 
-       set_segment(vcpu, &kvm_seg, seg);
+       kvm_set_segment(vcpu, &kvm_seg, seg);
        return 0;
 }
 
@@ -3332,25 +3410,25 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
        vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi;
        vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi;
 
-       if (load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
+       if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
                return 1;
 
-       if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
+       if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
                return 1;
 
-       if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
+       if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
                return 1;
 
-       if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
+       if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
                return 1;
 
-       if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
+       if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
                return 1;
 
-       if (load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
+       if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
                return 1;
 
-       if (load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
+       if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
                return 1;
        return 0;
 }
@@ -3391,24 +3469,24 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu,
        vcpu->arch.regs[VCPU_REGS_RSI] = tss->si;
        vcpu->arch.regs[VCPU_REGS_RDI] = tss->di;
 
-       if (load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
+       if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
                return 1;
 
-       if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
+       if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
                return 1;
 
-       if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
+       if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
                return 1;
 
-       if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
+       if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
                return 1;
 
-       if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
+       if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
                return 1;
        return 0;
 }
 
-int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
+static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
                       struct desc_struct *cseg_desc,
                       struct desc_struct *nseg_desc)
 {
@@ -3431,7 +3509,7 @@ out:
        return ret;
 }
 
-int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
+static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
                       struct desc_struct *cseg_desc,
                       struct desc_struct *nseg_desc)
 {
@@ -3461,7 +3539,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
        struct desc_struct nseg_desc;
        int ret = 0;
 
-       get_segment(vcpu, &tr_seg, VCPU_SREG_TR);
+       kvm_get_segment(vcpu, &tr_seg, VCPU_SREG_TR);
 
        if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
                goto out;
@@ -3520,7 +3598,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
        kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
        seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
        tr_seg.type = 11;
-       set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
+       kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
 out:
        kvm_x86_ops->decache_regs(vcpu);
        return ret;
@@ -3587,15 +3665,15 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
                }
        }
 
-       set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
-       set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
-       set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
-       set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
-       set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
-       set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
+       kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
+       kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
+       kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
+       kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
+       kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
+       kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
 
-       set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
-       set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
+       kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
+       kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
 
        vcpu_put(vcpu);
 
@@ -4003,6 +4081,6 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
         * So need not to call smp_call_function_single() in that case.
         */
        if (vcpu->guest_mode && vcpu->cpu != cpu)
-               smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0);
+               smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
        put_cpu();
 }