2 * VMI specific paravirt-ops implementation
4 * Copyright (C) 2005, VMware, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 * Send feedback to zach@vmware.com
25 #include <linux/module.h>
26 #include <linux/cpu.h>
27 #include <linux/bootmem.h>
29 #include <linux/highmem.h>
30 #include <linux/sched.h>
33 #include <asm/fixmap.h>
34 #include <asm/apicdef.h>
36 #include <asm/processor.h>
37 #include <asm/timer.h>
38 #include <asm/vmi_time.h>
39 #include <asm/kmap_types.h>
41 /* Convenient for calling VMI functions indirectly in the ROM */
42 typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
43 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
45 #define call_vrom_func(rom,func) \
46 (((VROMFUNC *)(rom->func))())
48 #define call_vrom_long_func(rom,func,arg) \
49 (((VROMLONGFUNC *)(rom->func)) (arg))
51 static struct vrom_header *vmi_rom;
52 static int disable_pge;
53 static int disable_pse;
54 static int disable_sep;
55 static int disable_tsc;
56 static int disable_mtrr;
57 static int disable_noidle;
58 static int disable_vmi_timer;
60 /* Cached VMI operations */
62 void (*cpuid)(void /* non-c */);
63 void (*_set_ldt)(u32 selector);
64 void (*set_tr)(u32 selector);
65 void (*write_idt_entry)(struct desc_struct *, int, u32, u32);
66 void (*write_gdt_entry)(struct desc_struct *, int, u32, u32);
67 void (*set_kernel_stack)(u32 selector, u32 sp0);
68 void (*allocate_page)(u32, u32, u32, u32, u32);
69 void (*release_page)(u32, u32);
70 void (*set_pte)(pte_t, pte_t *, unsigned);
71 void (*update_pte)(pte_t *, unsigned);
72 void (*set_linear_mapping)(int, void *, u32, u32);
73 void (*_flush_tlb)(int);
74 void (*set_initial_ap_state)(int, int);
76 void (*set_lazy_mode)(int mode);
79 /* Cached VMI operations */
80 struct vmi_timer_ops vmi_timer_ops;
83 * VMI patching routines.
85 #define MNEM_CALL 0xe8
89 #define IRQ_PATCH_INT_MASK 0
90 #define IRQ_PATCH_DISABLE 5
92 static inline void patch_offset(void *insnbuf,
93 unsigned long ip, unsigned long dest)
95 *(unsigned long *)(insnbuf+1) = dest-ip-5;
98 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
102 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
103 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
105 case VMI_RELOCATION_CALL_REL:
107 *(char *)insnbuf = MNEM_CALL;
108 patch_offset(insnbuf, ip, (unsigned long)rel->eip);
111 case VMI_RELOCATION_JUMP_REL:
113 *(char *)insnbuf = MNEM_JMP;
114 patch_offset(insnbuf, ip, (unsigned long)rel->eip);
117 case VMI_RELOCATION_NOP:
118 /* obliterate the whole thing */
121 case VMI_RELOCATION_NONE:
122 /* leave native code in place */
132 * Apply patch if appropriate, return length of new instruction
133 * sequence. The callee does nop padding for us.
135 static unsigned vmi_patch(u8 type, u16 clobbers, void *insns,
136 unsigned long ip, unsigned len)
139 case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
140 return patch_internal(VMI_CALL_DisableInterrupts, len,
142 case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
143 return patch_internal(VMI_CALL_EnableInterrupts, len,
145 case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
146 return patch_internal(VMI_CALL_SetInterruptMask, len,
148 case PARAVIRT_PATCH(pv_irq_ops.save_fl):
149 return patch_internal(VMI_CALL_GetInterruptMask, len,
151 case PARAVIRT_PATCH(pv_cpu_ops.iret):
152 return patch_internal(VMI_CALL_IRET, len, insns, ip);
153 case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_syscall_ret):
154 return patch_internal(VMI_CALL_SYSEXIT, len, insns, ip);
161 /* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */
162 static void vmi_cpuid(unsigned int *ax, unsigned int *bx,
163 unsigned int *cx, unsigned int *dx)
168 asm volatile ("call *%6"
173 : "0" (*ax), "2" (*cx), "r" (vmi_ops.cpuid));
176 *dx &= ~X86_FEATURE_PSE;
178 *dx &= ~X86_FEATURE_PGE;
180 *dx &= ~X86_FEATURE_SEP;
182 *dx &= ~X86_FEATURE_TSC;
184 *dx &= ~X86_FEATURE_MTRR;
188 static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new)
190 if (gdt[nr].a != new->a || gdt[nr].b != new->b)
191 write_gdt_entry(gdt, nr, new, 0);
194 static void vmi_load_tls(struct thread_struct *t, unsigned int cpu)
196 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
197 vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0]);
198 vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1]);
199 vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2]);
202 static void vmi_set_ldt(const void *addr, unsigned entries)
204 unsigned cpu = smp_processor_id();
205 struct desc_struct desc;
207 pack_descriptor(&desc, (unsigned long)addr,
208 entries * sizeof(struct desc_struct) - 1,
210 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, &desc, DESC_LDT);
211 vmi_ops._set_ldt(entries ? GDT_ENTRY_LDT*sizeof(struct desc_struct) : 0);
214 static void vmi_set_tr(void)
216 vmi_ops.set_tr(GDT_ENTRY_TSS*sizeof(struct desc_struct));
219 static void vmi_write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
221 u32 *idt_entry = (u32 *)g;
222 vmi_ops.write_idt_entry(dt, entry, idt_entry[0], idt_entry[2]);
225 static void vmi_write_gdt_entry(struct desc_struct *dt, int entry,
226 const void *desc, int type)
228 u32 *gdt_entry = (u32 *)desc;
229 vmi_ops.write_gdt_entry(dt, entry, gdt_entry[0], gdt_entry[2]);
232 static void vmi_load_sp0(struct tss_struct *tss,
233 struct thread_struct *thread)
235 tss->x86_tss.sp0 = thread->sp0;
237 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
238 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
239 tss->x86_tss.ss1 = thread->sysenter_cs;
240 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
242 vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.sp0);
245 static void vmi_flush_tlb_user(void)
247 vmi_ops._flush_tlb(VMI_FLUSH_TLB);
250 static void vmi_flush_tlb_kernel(void)
252 vmi_ops._flush_tlb(VMI_FLUSH_TLB | VMI_FLUSH_GLOBAL);
255 /* Stub to do nothing at all; used for delays and unimplemented calls */
256 static void vmi_nop(void)
260 #ifdef CONFIG_DEBUG_PAGE_TYPE
262 #ifdef CONFIG_X86_PAE
263 #define MAX_BOOT_PTS (2048+4+1)
265 #define MAX_BOOT_PTS (1024+1)
269 * During boot, mem_map is not yet available in paging_init, so stash
270 * all the boot page allocations here.
275 } boot_page_allocations[MAX_BOOT_PTS];
276 static int num_boot_page_allocations;
277 static int boot_allocations_applied;
279 void vmi_apply_boot_page_allocations(void)
283 for (i = 0; i < num_boot_page_allocations; i++) {
284 struct page *page = pfn_to_page(boot_page_allocations[i].pfn);
285 page->type = boot_page_allocations[i].type;
286 page->type = boot_page_allocations[i].type &
287 ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE);
289 boot_allocations_applied = 1;
292 static void record_page_type(u32 pfn, int type)
294 BUG_ON(num_boot_page_allocations >= MAX_BOOT_PTS);
295 boot_page_allocations[num_boot_page_allocations].pfn = pfn;
296 boot_page_allocations[num_boot_page_allocations].type = type;
297 num_boot_page_allocations++;
300 static void check_zeroed_page(u32 pfn, int type, struct page *page)
304 int limit = PAGE_SIZE / sizeof(int);
306 if (page_address(page))
307 ptr = (u32 *)page_address(page);
309 ptr = (u32 *)__va(pfn << PAGE_SHIFT);
311 * When cloning the root in non-PAE mode, only the userspace
312 * pdes need to be zeroed.
314 if (type & VMI_PAGE_CLONE)
315 limit = USER_PTRS_PER_PGD;
316 for (i = 0; i < limit; i++)
321 * We stash the page type into struct page so we can verify the page
322 * types are used properly.
324 static void vmi_set_page_type(u32 pfn, int type)
326 /* PAE can have multiple roots per page - don't track */
327 if (PTRS_PER_PMD > 1 && (type & VMI_PAGE_PDP))
330 if (boot_allocations_applied) {
331 struct page *page = pfn_to_page(pfn);
332 if (type != VMI_PAGE_NORMAL)
335 BUG_ON(page->type == VMI_PAGE_NORMAL);
336 page->type = type & ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE);
337 if (type & VMI_PAGE_ZEROED)
338 check_zeroed_page(pfn, type, page);
340 record_page_type(pfn, type);
344 static void vmi_check_page_type(u32 pfn, int type)
346 /* PAE can have multiple roots per page - skip checks */
347 if (PTRS_PER_PMD > 1 && (type & VMI_PAGE_PDP))
350 type &= ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE);
351 if (boot_allocations_applied) {
352 struct page *page = pfn_to_page(pfn);
353 BUG_ON((page->type ^ type) & VMI_PAGE_PAE);
354 BUG_ON(type == VMI_PAGE_NORMAL && page->type);
355 BUG_ON((type & page->type) == 0);
359 #define vmi_set_page_type(p,t) do { } while (0)
360 #define vmi_check_page_type(p,t) do { } while (0)
363 #ifdef CONFIG_HIGHPTE
364 static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
366 void *va = kmap_atomic(page, type);
369 * Internally, the VMI ROM must map virtual addresses to physical
370 * addresses for processing MMU updates. By the time MMU updates
371 * are issued, this information is typically already lost.
372 * Fortunately, the VMI provides a cache of mapping slots for active
375 * We use slot zero for the linear mapping of physical memory, and
376 * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1.
378 * args: SLOT VA COUNT PFN
380 BUG_ON(type != KM_PTE0 && type != KM_PTE1);
381 vmi_ops.set_linear_mapping((type - KM_PTE0)+1, va, 1, page_to_pfn(page));
387 static void vmi_allocate_pt(struct mm_struct *mm, u32 pfn)
389 vmi_set_page_type(pfn, VMI_PAGE_L1);
390 vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
393 static void vmi_allocate_pd(u32 pfn)
396 * This call comes in very early, before mem_map is setup.
397 * It is called only for swapper_pg_dir, which already has
400 vmi_set_page_type(pfn, VMI_PAGE_L2);
401 vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0);
404 static void vmi_allocate_pd_clone(u32 pfn, u32 clonepfn, u32 start, u32 count)
406 vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE);
407 vmi_check_page_type(clonepfn, VMI_PAGE_L2);
408 vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count);
411 static void vmi_release_pt(u32 pfn)
413 vmi_ops.release_page(pfn, VMI_PAGE_L1);
414 vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
417 static void vmi_release_pd(u32 pfn)
419 vmi_ops.release_page(pfn, VMI_PAGE_L2);
420 vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
424 * Helper macros for MMU update flags. We can defer updates until a flush
425 * or page invalidation only if the update is to the current address space
426 * (otherwise, there is no flush). We must check against init_mm, since
427 * this could be a kernel update, which usually passes init_mm, although
428 * sometimes this check can be skipped if we know the particular function
429 * is only called on user mode PTEs. We could change the kernel to pass
430 * current->active_mm here, but in particular, I was unsure if changing
431 * mm/highmem.c to do this would still be correct on other architectures.
433 #define is_current_as(mm, mustbeuser) ((mm) == current->active_mm || \
434 (!mustbeuser && (mm) == &init_mm))
435 #define vmi_flags_addr(mm, addr, level, user) \
436 ((level) | (is_current_as(mm, user) ? \
437 (VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
438 #define vmi_flags_addr_defer(mm, addr, level, user) \
439 ((level) | (is_current_as(mm, user) ? \
440 (VMI_PAGE_DEFER | VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
442 static void vmi_update_pte(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
444 vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
445 vmi_ops.update_pte(ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
448 static void vmi_update_pte_defer(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
450 vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
451 vmi_ops.update_pte(ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 0));
454 static void vmi_set_pte(pte_t *ptep, pte_t pte)
456 /* XXX because of set_pmd_pte, this can be called on PT or PD layers */
457 vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE | VMI_PAGE_PD);
458 vmi_ops.set_pte(pte, ptep, VMI_PAGE_PT);
461 static void vmi_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
463 vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
464 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
467 static void vmi_set_pmd(pmd_t *pmdp, pmd_t pmdval)
469 #ifdef CONFIG_X86_PAE
470 const pte_t pte = { pmdval.pmd, pmdval.pmd >> 32 };
471 vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PMD);
473 const pte_t pte = { pmdval.pud.pgd.pgd };
474 vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PGD);
476 vmi_ops.set_pte(pte, (pte_t *)pmdp, VMI_PAGE_PD);
479 #ifdef CONFIG_X86_PAE
481 static void vmi_set_pte_atomic(pte_t *ptep, pte_t pteval)
484 * XXX This is called from set_pmd_pte, but at both PT
485 * and PD layers so the VMI_PAGE_PT flag is wrong. But
486 * it is only called for large page mapping changes,
487 * the Xen backend, doesn't support large pages, and the
488 * ESX backend doesn't depend on the flag.
490 set_64bit((unsigned long long *)ptep,pte_val(pteval));
491 vmi_ops.update_pte(ptep, VMI_PAGE_PT);
494 static void vmi_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
496 vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
497 vmi_ops.set_pte(pte, ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 1));
500 static void vmi_set_pud(pud_t *pudp, pud_t pudval)
503 const pte_t pte = { pudval.pgd.pgd, pudval.pgd.pgd >> 32 };
504 vmi_check_page_type(__pa(pudp) >> PAGE_SHIFT, VMI_PAGE_PGD);
505 vmi_ops.set_pte(pte, (pte_t *)pudp, VMI_PAGE_PDP);
508 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
510 const pte_t pte = { 0 };
511 vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
512 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
515 static void vmi_pmd_clear(pmd_t *pmd)
517 const pte_t pte = { 0 };
518 vmi_check_page_type(__pa(pmd) >> PAGE_SHIFT, VMI_PAGE_PMD);
519 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
524 static void __devinit
525 vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
526 unsigned long start_esp)
528 struct vmi_ap_state ap;
530 /* Default everything to zero. This is fine for most GPRs. */
531 memset(&ap, 0, sizeof(struct vmi_ap_state));
533 ap.gdtr_limit = GDT_SIZE - 1;
534 ap.gdtr_base = (unsigned long) get_cpu_gdt_table(phys_apicid);
536 ap.idtr_limit = IDT_ENTRIES * 8 - 1;
537 ap.idtr_base = (unsigned long) idt_table;
542 ap.eip = (unsigned long) start_eip;
544 ap.esp = (unsigned long) start_esp;
548 ap.fs = __KERNEL_PERCPU;
553 #ifdef CONFIG_X86_PAE
554 /* efer should match BSP efer. */
557 rdmsr(MSR_EFER, l, h);
558 ap.efer = (unsigned long long) h << 32 | l;
562 ap.cr3 = __pa(swapper_pg_dir);
563 /* Protected mode, paging, AM, WP, NE, MP. */
565 ap.cr4 = mmu_cr4_features;
566 vmi_ops.set_initial_ap_state((u32)&ap, phys_apicid);
570 static void vmi_enter_lazy_cpu(void)
572 paravirt_enter_lazy_cpu();
573 vmi_ops.set_lazy_mode(2);
576 static void vmi_enter_lazy_mmu(void)
578 paravirt_enter_lazy_mmu();
579 vmi_ops.set_lazy_mode(1);
582 static void vmi_leave_lazy(void)
584 paravirt_leave_lazy(paravirt_get_lazy_mode());
585 vmi_ops.set_lazy_mode(0);
588 static inline int __init check_vmi_rom(struct vrom_header *rom)
590 struct pci_header *pci;
591 struct pnp_header *pnp;
592 const char *manufacturer = "UNKNOWN";
593 const char *product = "UNKNOWN";
594 const char *license = "unspecified";
596 if (rom->rom_signature != 0xaa55)
598 if (rom->vrom_signature != VMI_SIGNATURE)
600 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
601 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
602 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
603 rom->api_version_maj,
604 rom->api_version_min);
609 * Relying on the VMI_SIGNATURE field is not 100% safe, so check
610 * the PCI header and device type to make sure this is really a
613 if (!rom->pci_header_offs) {
614 printk(KERN_WARNING "VMI: ROM does not contain PCI header.\n");
618 pci = (struct pci_header *)((char *)rom+rom->pci_header_offs);
619 if (pci->vendorID != PCI_VENDOR_ID_VMWARE ||
620 pci->deviceID != PCI_DEVICE_ID_VMWARE_VMI) {
621 /* Allow it to run... anyways, but warn */
622 printk(KERN_WARNING "VMI: ROM from unknown manufacturer\n");
625 if (rom->pnp_header_offs) {
626 pnp = (struct pnp_header *)((char *)rom+rom->pnp_header_offs);
627 if (pnp->manufacturer_offset)
628 manufacturer = (const char *)rom+pnp->manufacturer_offset;
629 if (pnp->product_offset)
630 product = (const char *)rom+pnp->product_offset;
633 if (rom->license_offs)
634 license = (char *)rom+rom->license_offs;
636 printk(KERN_INFO "VMI: Found %s %s, API version %d.%d, ROM version %d.%d\n",
637 manufacturer, product,
638 rom->api_version_maj, rom->api_version_min,
639 pci->rom_version_maj, pci->rom_version_min);
641 /* Don't allow BSD/MIT here for now because we don't want to end up
642 with any binary only shim layers */
643 if (strcmp(license, "GPL") && strcmp(license, "GPL v2")) {
644 printk(KERN_WARNING "VMI: Non GPL license `%s' found for ROM. Not used.\n",
653 * Probe for the VMI option ROM
655 static inline int __init probe_vmi_rom(void)
659 /* VMI ROM is in option ROM area, check signature */
660 for (base = 0xC0000; base < 0xE0000; base += 2048) {
661 struct vrom_header *romstart;
662 romstart = (struct vrom_header *)isa_bus_to_virt(base);
663 if (check_vmi_rom(romstart)) {
672 * VMI setup common to all processors
674 void vmi_bringup(void)
676 /* We must establish the lowmem mapping for MMU ops to work */
677 if (vmi_ops.set_linear_mapping)
678 vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, max_low_pfn, 0);
682 * Return a pointer to a VMI function or NULL if unimplemented
684 static void *vmi_get_function(int vmicall)
687 const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
688 reloc = call_vrom_long_func(vmi_rom, get_reloc, vmicall);
689 BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL);
690 if (rel->type == VMI_RELOCATION_CALL_REL)
691 return (void *)rel->eip;
697 * Helper macro for making the VMI paravirt-ops fill code readable.
698 * For unimplemented operations, fall back to default, unless nop
699 * is returned by the ROM.
701 #define para_fill(opname, vmicall) \
703 reloc = call_vrom_long_func(vmi_rom, get_reloc, \
704 VMI_CALL_##vmicall); \
705 if (rel->type == VMI_RELOCATION_CALL_REL) \
706 opname = (void *)rel->eip; \
707 else if (rel->type == VMI_RELOCATION_NOP) \
708 opname = (void *)vmi_nop; \
709 else if (rel->type != VMI_RELOCATION_NONE) \
710 printk(KERN_WARNING "VMI: Unknown relocation " \
711 "type %d for " #vmicall"\n",\
716 * Helper macro for making the VMI paravirt-ops fill code readable.
717 * For cached operations which do not match the VMI ROM ABI and must
718 * go through a tranlation stub. Ignore NOPs, since it is not clear
719 * a NOP * VMI function corresponds to a NOP paravirt-op when the
720 * functions are not in 1-1 correspondence.
722 #define para_wrap(opname, wrapper, cache, vmicall) \
724 reloc = call_vrom_long_func(vmi_rom, get_reloc, \
725 VMI_CALL_##vmicall); \
726 BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \
727 if (rel->type == VMI_RELOCATION_CALL_REL) { \
729 vmi_ops.cache = (void *)rel->eip; \
734 * Activate the VMI interface and switch into paravirtualized mode
736 static inline int __init activate_vmi(void)
740 const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
742 if (call_vrom_func(vmi_rom, vmi_init) != 0) {
743 printk(KERN_ERR "VMI ROM failed to initialize!");
746 savesegment(cs, kernel_cs);
748 pv_info.paravirt_enabled = 1;
749 pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
750 pv_info.name = "vmi";
752 pv_init_ops.patch = vmi_patch;
755 * Many of these operations are ABI compatible with VMI.
756 * This means we can fill in the paravirt-ops with direct
757 * pointers into the VMI ROM. If the calling convention for
758 * these operations changes, this code needs to be updated.
761 * CPUID paravirt-op uses pointers, not the native ISA
762 * halt has no VMI equivalent; all VMI halts are "safe"
763 * no MSR support yet - just trap and emulate. VMI uses the
764 * same ABI as the native ISA, but Linux wants exceptions
765 * from bogus MSR read / write handled
766 * rdpmc is not yet used in Linux
769 /* CPUID is special, so very special it gets wrapped like a present */
770 para_wrap(pv_cpu_ops.cpuid, vmi_cpuid, cpuid, CPUID);
772 para_fill(pv_cpu_ops.clts, CLTS);
773 para_fill(pv_cpu_ops.get_debugreg, GetDR);
774 para_fill(pv_cpu_ops.set_debugreg, SetDR);
775 para_fill(pv_cpu_ops.read_cr0, GetCR0);
776 para_fill(pv_mmu_ops.read_cr2, GetCR2);
777 para_fill(pv_mmu_ops.read_cr3, GetCR3);
778 para_fill(pv_cpu_ops.read_cr4, GetCR4);
779 para_fill(pv_cpu_ops.write_cr0, SetCR0);
780 para_fill(pv_mmu_ops.write_cr2, SetCR2);
781 para_fill(pv_mmu_ops.write_cr3, SetCR3);
782 para_fill(pv_cpu_ops.write_cr4, SetCR4);
783 para_fill(pv_irq_ops.save_fl, GetInterruptMask);
784 para_fill(pv_irq_ops.restore_fl, SetInterruptMask);
785 para_fill(pv_irq_ops.irq_disable, DisableInterrupts);
786 para_fill(pv_irq_ops.irq_enable, EnableInterrupts);
788 para_fill(pv_cpu_ops.wbinvd, WBINVD);
789 para_fill(pv_cpu_ops.read_tsc, RDTSC);
791 /* The following we emulate with trap and emulate for now */
792 /* paravirt_ops.read_msr = vmi_rdmsr */
793 /* paravirt_ops.write_msr = vmi_wrmsr */
794 /* paravirt_ops.rdpmc = vmi_rdpmc */
796 /* TR interface doesn't pass TR value, wrap */
797 para_wrap(pv_cpu_ops.load_tr_desc, vmi_set_tr, set_tr, SetTR);
799 /* LDT is special, too */
800 para_wrap(pv_cpu_ops.set_ldt, vmi_set_ldt, _set_ldt, SetLDT);
802 para_fill(pv_cpu_ops.load_gdt, SetGDT);
803 para_fill(pv_cpu_ops.load_idt, SetIDT);
804 para_fill(pv_cpu_ops.store_gdt, GetGDT);
805 para_fill(pv_cpu_ops.store_idt, GetIDT);
806 para_fill(pv_cpu_ops.store_tr, GetTR);
807 pv_cpu_ops.load_tls = vmi_load_tls;
808 para_fill(pv_cpu_ops.write_ldt_entry, WriteLDTEntry);
809 para_wrap(pv_cpu_ops.write_gdt_entry, vmi_write_gdt_entry,
810 write_gdt_entry, WriteGDTEntry);
811 para_wrap(pv_cpu_ops.write_idt_entry, vmi_write_idt_entry,
812 write_idt_entry, WriteIDTEntry);
813 para_wrap(pv_cpu_ops.load_sp0, vmi_load_sp0, set_kernel_stack, UpdateKernelStack);
814 para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask);
815 para_fill(pv_cpu_ops.io_delay, IODelay);
817 para_wrap(pv_cpu_ops.lazy_mode.enter, vmi_enter_lazy_cpu,
818 set_lazy_mode, SetLazyMode);
819 para_wrap(pv_cpu_ops.lazy_mode.leave, vmi_leave_lazy,
820 set_lazy_mode, SetLazyMode);
822 para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu,
823 set_lazy_mode, SetLazyMode);
824 para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy,
825 set_lazy_mode, SetLazyMode);
827 /* user and kernel flush are just handled with different flags to FlushTLB */
828 para_wrap(pv_mmu_ops.flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB);
829 para_wrap(pv_mmu_ops.flush_tlb_kernel, vmi_flush_tlb_kernel, _flush_tlb, FlushTLB);
830 para_fill(pv_mmu_ops.flush_tlb_single, InvalPage);
833 * Until a standard flag format can be agreed on, we need to
834 * implement these as wrappers in Linux. Get the VMI ROM
835 * function pointers for the two backend calls.
837 #ifdef CONFIG_X86_PAE
838 vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxELong);
839 vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxELong);
841 vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxE);
842 vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxE);
845 if (vmi_ops.set_pte) {
846 pv_mmu_ops.set_pte = vmi_set_pte;
847 pv_mmu_ops.set_pte_at = vmi_set_pte_at;
848 pv_mmu_ops.set_pmd = vmi_set_pmd;
849 #ifdef CONFIG_X86_PAE
850 pv_mmu_ops.set_pte_atomic = vmi_set_pte_atomic;
851 pv_mmu_ops.set_pte_present = vmi_set_pte_present;
852 pv_mmu_ops.set_pud = vmi_set_pud;
853 pv_mmu_ops.pte_clear = vmi_pte_clear;
854 pv_mmu_ops.pmd_clear = vmi_pmd_clear;
858 if (vmi_ops.update_pte) {
859 pv_mmu_ops.pte_update = vmi_update_pte;
860 pv_mmu_ops.pte_update_defer = vmi_update_pte_defer;
863 vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
864 if (vmi_ops.allocate_page) {
865 pv_mmu_ops.alloc_pt = vmi_allocate_pt;
866 pv_mmu_ops.alloc_pd = vmi_allocate_pd;
867 pv_mmu_ops.alloc_pd_clone = vmi_allocate_pd_clone;
870 vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
871 if (vmi_ops.release_page) {
872 pv_mmu_ops.release_pt = vmi_release_pt;
873 pv_mmu_ops.release_pd = vmi_release_pd;
876 /* Set linear is needed in all cases */
877 vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
878 #ifdef CONFIG_HIGHPTE
879 if (vmi_ops.set_linear_mapping)
880 pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
884 * These MUST always be patched. Don't support indirect jumps
885 * through these operations, as the VMI interface may use either
886 * a jump or a call to get to these operations, depending on
887 * the backend. They are performance critical anyway, so requiring
888 * a patch is not a big problem.
890 pv_cpu_ops.irq_enable_syscall_ret = (void *)0xfeedbab0;
891 pv_cpu_ops.iret = (void *)0xbadbab0;
894 para_wrap(pv_apic_ops.startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState);
897 #ifdef CONFIG_X86_LOCAL_APIC
898 para_fill(pv_apic_ops.apic_read, APICRead);
899 para_fill(pv_apic_ops.apic_write, APICWrite);
900 para_fill(pv_apic_ops.apic_write_atomic, APICWrite);
904 * Check for VMI timer functionality by probing for a cycle frequency method
906 reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency);
907 if (!disable_vmi_timer && rel->type != VMI_RELOCATION_NONE) {
908 vmi_timer_ops.get_cycle_frequency = (void *)rel->eip;
909 vmi_timer_ops.get_cycle_counter =
910 vmi_get_function(VMI_CALL_GetCycleCounter);
911 vmi_timer_ops.get_wallclock =
912 vmi_get_function(VMI_CALL_GetWallclockTime);
913 vmi_timer_ops.wallclock_updated =
914 vmi_get_function(VMI_CALL_WallclockUpdated);
915 vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm);
916 vmi_timer_ops.cancel_alarm =
917 vmi_get_function(VMI_CALL_CancelAlarm);
918 pv_time_ops.time_init = vmi_time_init;
919 pv_time_ops.get_wallclock = vmi_get_wallclock;
920 pv_time_ops.set_wallclock = vmi_set_wallclock;
921 #ifdef CONFIG_X86_LOCAL_APIC
922 pv_apic_ops.setup_boot_clock = vmi_time_bsp_init;
923 pv_apic_ops.setup_secondary_clock = vmi_time_ap_init;
925 pv_time_ops.sched_clock = vmi_sched_clock;
926 pv_time_ops.get_cpu_khz = vmi_cpu_khz;
928 /* We have true wallclock functions; disable CMOS clock sync */
929 no_sync_cmos_clock = 1;
932 disable_vmi_timer = 1;
935 para_fill(pv_irq_ops.safe_halt, Halt);
938 * Alternative instruction rewriting doesn't happen soon enough
939 * to convert VMI_IRET to a call instead of a jump; so we have
940 * to do this before IRQs get reenabled. Fortunately, it is
943 apply_paravirt(__parainstructions, __parainstructions_end);
952 void __init vmi_init(void)
959 check_vmi_rom(vmi_rom);
961 /* In case probing for or validating the ROM failed, basil */
965 reserve_top_address(-vmi_rom->virtual_top);
967 local_irq_save(flags);
970 #ifdef CONFIG_X86_IO_APIC
971 /* This is virtual hardware; timer routing is wired correctly */
974 local_irq_restore(flags & X86_EFLAGS_IF);
977 static int __init parse_vmi(char *arg)
982 if (!strcmp(arg, "disable_pge")) {
983 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
985 } else if (!strcmp(arg, "disable_pse")) {
986 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PSE);
988 } else if (!strcmp(arg, "disable_sep")) {
989 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_SEP);
991 } else if (!strcmp(arg, "disable_tsc")) {
992 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC);
994 } else if (!strcmp(arg, "disable_mtrr")) {
995 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_MTRR);
997 } else if (!strcmp(arg, "disable_timer")) {
998 disable_vmi_timer = 1;
1000 } else if (!strcmp(arg, "disable_noidle"))
1005 early_param("vmi", parse_vmi);