]> err.no Git - linux-2.6/blob - arch/i386/xen/enlighten.c
xen: Core Xen implementation
[linux-2.6] / arch / i386 / xen / enlighten.c
1 /*
2  * Core of Xen paravirt_ops implementation.
3  *
4  * This file contains the xen_paravirt_ops structure itself, and the
5  * implementations for:
6  * - privileged instructions
7  * - interrupt flags
8  * - segment operations
9  * - booting and setup
10  *
11  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/smp.h>
17 #include <linux/preempt.h>
18 #include <linux/percpu.h>
19 #include <linux/delay.h>
20 #include <linux/start_kernel.h>
21 #include <linux/sched.h>
22 #include <linux/bootmem.h>
23 #include <linux/module.h>
24
25 #include <xen/interface/xen.h>
26 #include <xen/interface/physdev.h>
27 #include <xen/interface/vcpu.h>
28 #include <xen/features.h>
29 #include <xen/page.h>
30
31 #include <asm/paravirt.h>
32 #include <asm/page.h>
33 #include <asm/xen/hypercall.h>
34 #include <asm/xen/hypervisor.h>
35 #include <asm/fixmap.h>
36 #include <asm/processor.h>
37 #include <asm/setup.h>
38 #include <asm/desc.h>
39 #include <asm/pgtable.h>
40
41 #include "xen-ops.h"
42 #include "multicalls.h"
43
44 EXPORT_SYMBOL_GPL(hypercall_page);
45
46 DEFINE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode);
47
48 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
49 DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
50 DEFINE_PER_CPU(unsigned long, xen_cr3);
51
52 struct start_info *xen_start_info;
53 EXPORT_SYMBOL_GPL(xen_start_info);
54
55 static void xen_vcpu_setup(int cpu)
56 {
57         per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
58 }
59
60 static void __init xen_banner(void)
61 {
62         printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
63                paravirt_ops.name);
64         printk(KERN_INFO "Hypervisor signature: %s\n", xen_start_info->magic);
65 }
66
67 static void xen_cpuid(unsigned int *eax, unsigned int *ebx,
68                       unsigned int *ecx, unsigned int *edx)
69 {
70         unsigned maskedx = ~0;
71
72         /*
73          * Mask out inconvenient features, to try and disable as many
74          * unsupported kernel subsystems as possible.
75          */
76         if (*eax == 1)
77                 maskedx = ~((1 << X86_FEATURE_APIC) |  /* disable APIC */
78                             (1 << X86_FEATURE_ACPI) |  /* disable ACPI */
79                             (1 << X86_FEATURE_ACC));   /* thermal monitoring */
80
81         asm(XEN_EMULATE_PREFIX "cpuid"
82                 : "=a" (*eax),
83                   "=b" (*ebx),
84                   "=c" (*ecx),
85                   "=d" (*edx)
86                 : "0" (*eax), "2" (*ecx));
87         *edx &= maskedx;
88 }
89
90 static void xen_set_debugreg(int reg, unsigned long val)
91 {
92         HYPERVISOR_set_debugreg(reg, val);
93 }
94
95 static unsigned long xen_get_debugreg(int reg)
96 {
97         return HYPERVISOR_get_debugreg(reg);
98 }
99
100 static unsigned long xen_save_fl(void)
101 {
102         struct vcpu_info *vcpu;
103         unsigned long flags;
104
105         preempt_disable();
106         vcpu = x86_read_percpu(xen_vcpu);
107         /* flag has opposite sense of mask */
108         flags = !vcpu->evtchn_upcall_mask;
109         preempt_enable();
110
111         /* convert to IF type flag
112            -0 -> 0x00000000
113            -1 -> 0xffffffff
114         */
115         return (-flags) & X86_EFLAGS_IF;
116 }
117
118 static void xen_restore_fl(unsigned long flags)
119 {
120         struct vcpu_info *vcpu;
121
122         preempt_disable();
123
124         /* convert from IF type flag */
125         flags = !(flags & X86_EFLAGS_IF);
126         vcpu = x86_read_percpu(xen_vcpu);
127         vcpu->evtchn_upcall_mask = flags;
128
129         if (flags == 0) {
130                 /* Unmask then check (avoid races).  We're only protecting
131                    against updates by this CPU, so there's no need for
132                    anything stronger. */
133                 barrier();
134
135                 if (unlikely(vcpu->evtchn_upcall_pending))
136                         force_evtchn_callback();
137                 preempt_enable();
138         } else
139                 preempt_enable_no_resched();
140 }
141
142 static void xen_irq_disable(void)
143 {
144         struct vcpu_info *vcpu;
145         preempt_disable();
146         vcpu = x86_read_percpu(xen_vcpu);
147         vcpu->evtchn_upcall_mask = 1;
148         preempt_enable_no_resched();
149 }
150
151 static void xen_irq_enable(void)
152 {
153         struct vcpu_info *vcpu;
154
155         preempt_disable();
156         vcpu = x86_read_percpu(xen_vcpu);
157         vcpu->evtchn_upcall_mask = 0;
158
159         /* Unmask then check (avoid races).  We're only protecting
160            against updates by this CPU, so there's no need for
161            anything stronger. */
162         barrier();
163
164         if (unlikely(vcpu->evtchn_upcall_pending))
165                 force_evtchn_callback();
166         preempt_enable();
167 }
168
169 static void xen_safe_halt(void)
170 {
171         /* Blocking includes an implicit local_irq_enable(). */
172         if (HYPERVISOR_sched_op(SCHEDOP_block, 0) != 0)
173                 BUG();
174 }
175
176 static void xen_halt(void)
177 {
178         if (irqs_disabled())
179                 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
180         else
181                 xen_safe_halt();
182 }
183
184 static void xen_set_lazy_mode(enum paravirt_lazy_mode mode)
185 {
186         switch (mode) {
187         case PARAVIRT_LAZY_NONE:
188                 BUG_ON(x86_read_percpu(xen_lazy_mode) == PARAVIRT_LAZY_NONE);
189                 break;
190
191         case PARAVIRT_LAZY_MMU:
192         case PARAVIRT_LAZY_CPU:
193                 BUG_ON(x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE);
194                 break;
195
196         case PARAVIRT_LAZY_FLUSH:
197                 /* flush if necessary, but don't change state */
198                 if (x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE)
199                         xen_mc_flush();
200                 return;
201         }
202
203         xen_mc_flush();
204         x86_write_percpu(xen_lazy_mode, mode);
205 }
206
207 static unsigned long xen_store_tr(void)
208 {
209         return 0;
210 }
211
212 static void xen_set_ldt(const void *addr, unsigned entries)
213 {
214         unsigned long linear_addr = (unsigned long)addr;
215         struct mmuext_op *op;
216         struct multicall_space mcs = xen_mc_entry(sizeof(*op));
217
218         op = mcs.args;
219         op->cmd = MMUEXT_SET_LDT;
220         if (linear_addr) {
221                 /* ldt my be vmalloced, use arbitrary_virt_to_machine */
222                 xmaddr_t maddr;
223                 maddr = arbitrary_virt_to_machine((unsigned long)addr);
224                 linear_addr = (unsigned long)maddr.maddr;
225         }
226         op->arg1.linear_addr = linear_addr;
227         op->arg2.nr_ents = entries;
228
229         MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
230
231         xen_mc_issue(PARAVIRT_LAZY_CPU);
232 }
233
234 static void xen_load_gdt(const struct Xgt_desc_struct *dtr)
235 {
236         unsigned long *frames;
237         unsigned long va = dtr->address;
238         unsigned int size = dtr->size + 1;
239         unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
240         int f;
241         struct multicall_space mcs;
242
243         /* A GDT can be up to 64k in size, which corresponds to 8192
244            8-byte entries, or 16 4k pages.. */
245
246         BUG_ON(size > 65536);
247         BUG_ON(va & ~PAGE_MASK);
248
249         mcs = xen_mc_entry(sizeof(*frames) * pages);
250         frames = mcs.args;
251
252         for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
253                 frames[f] = virt_to_mfn(va);
254                 make_lowmem_page_readonly((void *)va);
255         }
256
257         MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct));
258
259         xen_mc_issue(PARAVIRT_LAZY_CPU);
260 }
261
262 static void load_TLS_descriptor(struct thread_struct *t,
263                                 unsigned int cpu, unsigned int i)
264 {
265         struct desc_struct *gdt = get_cpu_gdt_table(cpu);
266         xmaddr_t maddr = virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
267         struct multicall_space mc = __xen_mc_entry(0);
268
269         MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
270 }
271
272 static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
273 {
274         xen_mc_batch();
275
276         load_TLS_descriptor(t, cpu, 0);
277         load_TLS_descriptor(t, cpu, 1);
278         load_TLS_descriptor(t, cpu, 2);
279
280         xen_mc_issue(PARAVIRT_LAZY_CPU);
281 }
282
283 static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
284                                 u32 low, u32 high)
285 {
286         unsigned long lp = (unsigned long)&dt[entrynum];
287         xmaddr_t mach_lp = virt_to_machine(lp);
288         u64 entry = (u64)high << 32 | low;
289
290         xen_mc_flush();
291         if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry))
292                 BUG();
293 }
294
295 static int cvt_gate_to_trap(int vector, u32 low, u32 high,
296                             struct trap_info *info)
297 {
298         u8 type, dpl;
299
300         type = (high >> 8) & 0x1f;
301         dpl = (high >> 13) & 3;
302
303         if (type != 0xf && type != 0xe)
304                 return 0;
305
306         info->vector = vector;
307         info->address = (high & 0xffff0000) | (low & 0x0000ffff);
308         info->cs = low >> 16;
309         info->flags = dpl;
310         /* interrupt gates clear IF */
311         if (type == 0xe)
312                 info->flags |= 4;
313
314         return 1;
315 }
316
317 /* Locations of each CPU's IDT */
318 static DEFINE_PER_CPU(struct Xgt_desc_struct, idt_desc);
319
320 /* Set an IDT entry.  If the entry is part of the current IDT, then
321    also update Xen. */
322 static void xen_write_idt_entry(struct desc_struct *dt, int entrynum,
323                                 u32 low, u32 high)
324 {
325
326         int cpu = smp_processor_id();
327         unsigned long p = (unsigned long)&dt[entrynum];
328         unsigned long start = per_cpu(idt_desc, cpu).address;
329         unsigned long end = start + per_cpu(idt_desc, cpu).size + 1;
330
331         xen_mc_flush();
332
333         write_dt_entry(dt, entrynum, low, high);
334
335         if (p >= start && (p + 8) <= end) {
336                 struct trap_info info[2];
337
338                 info[1].address = 0;
339
340                 if (cvt_gate_to_trap(entrynum, low, high, &info[0]))
341                         if (HYPERVISOR_set_trap_table(info))
342                                 BUG();
343         }
344 }
345
346 /* Load a new IDT into Xen.  In principle this can be per-CPU, so we
347    hold a spinlock to protect the static traps[] array (static because
348    it avoids allocation, and saves stack space). */
349 static void xen_load_idt(const struct Xgt_desc_struct *desc)
350 {
351         static DEFINE_SPINLOCK(lock);
352         static struct trap_info traps[257];
353
354         int cpu = smp_processor_id();
355         unsigned in, out, count;
356
357         per_cpu(idt_desc, cpu) = *desc;
358
359         count = (desc->size+1) / 8;
360         BUG_ON(count > 256);
361
362         spin_lock(&lock);
363         for (in = out = 0; in < count; in++) {
364                 const u32 *entry = (u32 *)(desc->address + in * 8);
365
366                 if (cvt_gate_to_trap(in, entry[0], entry[1], &traps[out]))
367                         out++;
368         }
369         traps[out].address = 0;
370
371         xen_mc_flush();
372         if (HYPERVISOR_set_trap_table(traps))
373                 BUG();
374
375         spin_unlock(&lock);
376 }
377
378 /* Write a GDT descriptor entry.  Ignore LDT descriptors, since
379    they're handled differently. */
380 static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
381                                 u32 low, u32 high)
382 {
383         switch ((high >> 8) & 0xff) {
384         case DESCTYPE_LDT:
385         case DESCTYPE_TSS:
386                 /* ignore */
387                 break;
388
389         default: {
390                 xmaddr_t maddr = virt_to_machine(&dt[entry]);
391                 u64 desc = (u64)high << 32 | low;
392
393                 xen_mc_flush();
394                 if (HYPERVISOR_update_descriptor(maddr.maddr, desc))
395                         BUG();
396         }
397
398         }
399 }
400
401 static void xen_load_esp0(struct tss_struct *tss,
402                                    struct thread_struct *thread)
403 {
404         struct multicall_space mcs = xen_mc_entry(0);
405         MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->esp0);
406         xen_mc_issue(PARAVIRT_LAZY_CPU);
407 }
408
409 static void xen_set_iopl_mask(unsigned mask)
410 {
411         struct physdev_set_iopl set_iopl;
412
413         /* Force the change at ring 0. */
414         set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
415         HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
416 }
417
418 static void xen_io_delay(void)
419 {
420 }
421
422 #ifdef CONFIG_X86_LOCAL_APIC
423 static unsigned long xen_apic_read(unsigned long reg)
424 {
425         return 0;
426 }
427 #endif
428
429 static void xen_flush_tlb(void)
430 {
431         struct mmuext_op op;
432
433         op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
434         if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
435                 BUG();
436 }
437
438 static void xen_flush_tlb_single(unsigned long addr)
439 {
440         struct mmuext_op op;
441
442         op.cmd = MMUEXT_INVLPG_LOCAL;
443         op.arg1.linear_addr = addr & PAGE_MASK;
444         if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
445                 BUG();
446 }
447
448 static unsigned long xen_read_cr2(void)
449 {
450         return x86_read_percpu(xen_vcpu)->arch.cr2;
451 }
452
453 static void xen_write_cr4(unsigned long cr4)
454 {
455         /* never allow TSC to be disabled */
456         native_write_cr4(cr4 & ~X86_CR4_TSD);
457 }
458
459 /*
460  * Page-directory addresses above 4GB do not fit into architectural %cr3.
461  * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
462  * must use the following accessor macros to pack/unpack valid MFNs.
463  *
464  * Note that Xen is using the fact that the pagetable base is always
465  * page-aligned, and putting the 12 MSB of the address into the 12 LSB
466  * of cr3.
467  */
468 #define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
469 #define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
470
471 static unsigned long xen_read_cr3(void)
472 {
473         return x86_read_percpu(xen_cr3);
474 }
475
476 static void xen_write_cr3(unsigned long cr3)
477 {
478         if (cr3 == x86_read_percpu(xen_cr3)) {
479                 /* just a simple tlb flush */
480                 xen_flush_tlb();
481                 return;
482         }
483
484         x86_write_percpu(xen_cr3, cr3);
485
486
487         {
488                 struct mmuext_op *op;
489                 struct multicall_space mcs = xen_mc_entry(sizeof(*op));
490                 unsigned long mfn = pfn_to_mfn(PFN_DOWN(cr3));
491
492                 op = mcs.args;
493                 op->cmd = MMUEXT_NEW_BASEPTR;
494                 op->arg1.mfn = mfn;
495
496                 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
497
498                 xen_mc_issue(PARAVIRT_LAZY_CPU);
499         }
500 }
501
502 static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
503 {
504         /* XXX pfn isn't necessarily a lowmem page */
505         make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
506 }
507
508 static void xen_alloc_pd(u32 pfn)
509 {
510         make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
511 }
512
513 static void xen_release_pd(u32 pfn)
514 {
515         make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
516 }
517
518 static void xen_release_pt(u32 pfn)
519 {
520         make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
521 }
522
523 static void xen_alloc_pd_clone(u32 pfn, u32 clonepfn,
524                                         u32 start, u32 count)
525 {
526         xen_alloc_pd(pfn);
527 }
528
529 static __init void xen_pagetable_setup_start(pgd_t *base)
530 {
531         pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base;
532
533         init_mm.pgd = base;
534         /*
535          * copy top-level of Xen-supplied pagetable into place.  For
536          * !PAE we can use this as-is, but for PAE it is a stand-in
537          * while we copy the pmd pages.
538          */
539         memcpy(base, xen_pgd, PTRS_PER_PGD * sizeof(pgd_t));
540
541         if (PTRS_PER_PMD > 1) {
542                 int i;
543                 /*
544                  * For PAE, need to allocate new pmds, rather than
545                  * share Xen's, since Xen doesn't like pmd's being
546                  * shared between address spaces.
547                  */
548                 for (i = 0; i < PTRS_PER_PGD; i++) {
549                         if (pgd_val_ma(xen_pgd[i]) & _PAGE_PRESENT) {
550                                 pmd_t *pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
551
552                                 memcpy(pmd, (void *)pgd_page_vaddr(xen_pgd[i]),
553                                        PAGE_SIZE);
554
555                                 xen_alloc_pd(PFN_DOWN(__pa(pmd)));
556
557                                 set_pgd(&base[i], __pgd(1 + __pa(pmd)));
558                         } else
559                                 pgd_clear(&base[i]);
560                 }
561         }
562
563         /* make sure zero_page is mapped RO so we can use it in pagetables */
564         make_lowmem_page_readonly(empty_zero_page);
565         make_lowmem_page_readonly(base);
566         /*
567          * Switch to new pagetable.  This is done before
568          * pagetable_init has done anything so that the new pages
569          * added to the table can be prepared properly for Xen.
570          */
571         xen_write_cr3(__pa(base));
572 }
573
574 static __init void xen_pagetable_setup_done(pgd_t *base)
575 {
576         if (!xen_feature(XENFEAT_auto_translated_physmap)) {
577                 /*
578                  * Create a mapping for the shared info page.
579                  * Should be set_fixmap(), but shared_info is a machine
580                  * address with no corresponding pseudo-phys address.
581                  */
582 #if 0
583                 set_pte_mfn(fix_to_virt(FIX_PARAVIRT_BOOTMAP),
584                             PFN_DOWN(xen_start_info->shared_info),
585                             PAGE_KERNEL);
586 #endif
587
588                 HYPERVISOR_shared_info =
589                         (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
590
591         } else
592                 HYPERVISOR_shared_info =
593                         (struct shared_info *)__va(xen_start_info->shared_info);
594
595 #if 0
596         xen_pgd_pin(base);
597 #endif
598
599         xen_vcpu_setup(smp_processor_id());
600 }
601
602 static const struct paravirt_ops xen_paravirt_ops __initdata = {
603         .paravirt_enabled = 1,
604         .shared_kernel_pmd = 0,
605
606         .name = "Xen",
607         .banner = xen_banner,
608
609         .patch = paravirt_patch_default,
610
611         .memory_setup = xen_memory_setup,
612         .arch_setup = xen_arch_setup,
613
614         .cpuid = xen_cpuid,
615
616         .set_debugreg = xen_set_debugreg,
617         .get_debugreg = xen_get_debugreg,
618
619         .clts = native_clts,
620
621         .read_cr0 = native_read_cr0,
622         .write_cr0 = native_write_cr0,
623
624         .read_cr2 = xen_read_cr2,
625         .write_cr2 = native_write_cr2,
626
627         .read_cr3 = xen_read_cr3,
628         .write_cr3 = xen_write_cr3,
629
630         .read_cr4 = native_read_cr4,
631         .read_cr4_safe = native_read_cr4_safe,
632         .write_cr4 = xen_write_cr4,
633
634         .save_fl = xen_save_fl,
635         .restore_fl = xen_restore_fl,
636         .irq_disable = xen_irq_disable,
637         .irq_enable = xen_irq_enable,
638         .safe_halt = xen_safe_halt,
639         .halt = xen_halt,
640         .wbinvd = native_wbinvd,
641
642         .read_msr = native_read_msr_safe,
643         .write_msr = native_write_msr_safe,
644         .read_tsc = native_read_tsc,
645         .read_pmc = native_read_pmc,
646
647         .iret = (void *)&hypercall_page[__HYPERVISOR_iret],
648         .irq_enable_sysexit = NULL,  /* never called */
649
650         .load_tr_desc = paravirt_nop,
651         .set_ldt = xen_set_ldt,
652         .load_gdt = xen_load_gdt,
653         .load_idt = xen_load_idt,
654         .load_tls = xen_load_tls,
655
656         .store_gdt = native_store_gdt,
657         .store_idt = native_store_idt,
658         .store_tr = xen_store_tr,
659
660         .write_ldt_entry = xen_write_ldt_entry,
661         .write_gdt_entry = xen_write_gdt_entry,
662         .write_idt_entry = xen_write_idt_entry,
663         .load_esp0 = xen_load_esp0,
664
665         .set_iopl_mask = xen_set_iopl_mask,
666         .io_delay = xen_io_delay,
667
668 #ifdef CONFIG_X86_LOCAL_APIC
669         .apic_write = paravirt_nop,
670         .apic_write_atomic = paravirt_nop,
671         .apic_read = xen_apic_read,
672         .setup_boot_clock = paravirt_nop,
673         .setup_secondary_clock = paravirt_nop,
674         .startup_ipi_hook = paravirt_nop,
675 #endif
676
677         .flush_tlb_user = xen_flush_tlb,
678         .flush_tlb_kernel = xen_flush_tlb,
679         .flush_tlb_single = xen_flush_tlb_single,
680
681         .pte_update = paravirt_nop,
682         .pte_update_defer = paravirt_nop,
683
684         .pagetable_setup_start = xen_pagetable_setup_start,
685         .pagetable_setup_done = xen_pagetable_setup_done,
686
687         .alloc_pt = xen_alloc_pt,
688         .alloc_pd = xen_alloc_pd,
689         .alloc_pd_clone = xen_alloc_pd_clone,
690         .release_pd = xen_release_pd,
691         .release_pt = xen_release_pt,
692
693         .set_lazy_mode = xen_set_lazy_mode,
694 };
695
696 /* First C function to be called on Xen boot */
697 asmlinkage void __init xen_start_kernel(void)
698 {
699         pgd_t *pgd;
700
701         if (!xen_start_info)
702                 return;
703
704         BUG_ON(memcmp(xen_start_info->magic, "xen-3.0", 7) != 0);
705
706         /* Install Xen paravirt ops */
707         paravirt_ops = xen_paravirt_ops;
708
709         xen_setup_features();
710
711         /* Get mfn list */
712         if (!xen_feature(XENFEAT_auto_translated_physmap))
713                 phys_to_machine_mapping = (unsigned long *)xen_start_info->mfn_list;
714
715         pgd = (pgd_t *)xen_start_info->pt_base;
716
717         init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE;
718
719         init_mm.pgd = pgd; /* use the Xen pagetables to start */
720
721         /* keep using Xen gdt for now; no urgent need to change it */
722
723         x86_write_percpu(xen_cr3, __pa(pgd));
724         xen_vcpu_setup(0);
725
726         paravirt_ops.kernel_rpl = 1;
727         if (xen_feature(XENFEAT_supervisor_mode_kernel))
728                 paravirt_ops.kernel_rpl = 0;
729
730         /* set the limit of our address space */
731         reserve_top_address(-HYPERVISOR_VIRT_START + 2 * PAGE_SIZE);
732
733         /* set up basic CPUID stuff */
734         cpu_detect(&new_cpu_data);
735         new_cpu_data.hard_math = 1;
736         new_cpu_data.x86_capability[0] = cpuid_edx(1);
737
738         /* Poke various useful things into boot_params */
739         LOADER_TYPE = (9 << 4) | 0;
740         INITRD_START = xen_start_info->mod_start ? __pa(xen_start_info->mod_start) : 0;
741         INITRD_SIZE = xen_start_info->mod_len;
742
743         /* Start the world */
744         start_kernel();
745 }