X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Flguest%2Fsegments.c;h=ec6aa3f1c36b349464e946375de109cc606c82a2;hb=9fec6060d9e48ed7db0dac0e16d0f0f0e615b7f6;hp=4d4e5a4586f96aad51720458c51a1d928df6252d;hpb=bff672e630a015d5b54c8bfb16160b7edc39a57c;p=linux-2.6 diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c index 4d4e5a4586..ec6aa3f1c3 100644 --- a/drivers/lguest/segments.c +++ b/drivers/lguest/segments.c @@ -12,8 +12,6 @@ #include "lg.h" /*H:600 - * We've almost completed the Host; there's just one file to go! - * * Segments & The Global Descriptor Table * * (That title sounds like a bad Nerdcore group. Not to suggest that there are @@ -43,22 +41,6 @@ * begin. */ -/* Is the descriptor the Guest wants us to put in OK? - * - * The flag which Intel says must be zero: must be zero. The descriptor must - * be present, (this is actually checked earlier but is here for thorougness), - * and the descriptor type must be 1 (a memory segment). */ -static int desc_ok(const struct desc_struct *gdt) -{ - return ((gdt->b & 0x00209000) == 0x00009000); -} - -/* Is the segment present? (Otherwise it can't be used by the Guest). */ -static int segment_present(const struct desc_struct *gdt) -{ - return gdt->b & 0x8000; -} - /* There are several entries we don't let the Guest set. The TSS entry is the * "Task State Segment" which controls all kinds of delicate things. The * LGUEST_CS and LGUEST_DS entries are reserved for the Switcher, and the @@ -71,34 +53,12 @@ static int ignored_gdt(unsigned int num) || num == GDT_ENTRY_DOUBLEFAULT_TSS); } -/* If the Guest asks us to remove an entry from the GDT, we have to be careful. - * If one of the segment registers is pointing at that entry the Switcher will - * crash when it tries to reload the segment registers for the Guest. - * - * It doesn't make much sense for the Guest to try to remove its own code, data - * or stack segments while they're in use: assume that's a Guest bug. If it's - * one of the lesser segment registers using the removed entry, we simply set - * that register to 0 (unusable). */ -static void check_segment_use(struct lguest *lg, unsigned int desc) -{ - /* GDT entries are 8 bytes long, so we divide to get the index and - * ignore the bottom bits. */ - if (lg->regs->gs / 8 == desc) - lg->regs->gs = 0; - if (lg->regs->fs / 8 == desc) - lg->regs->fs = 0; - if (lg->regs->es / 8 == desc) - lg->regs->es = 0; - if (lg->regs->ds / 8 == desc - || lg->regs->cs / 8 == desc - || lg->regs->ss / 8 == desc) - kill_guest(lg, "Removed live GDT entry %u", desc); -} - -/*H:610 Once the GDT has been changed, we look through the changed entries and - * see if they're OK. If not, we'll call kill_guest() and the Guest will never - * get to use the invalid entries. */ -static void fixup_gdt_table(struct lguest *lg, unsigned start, unsigned end) +/*H:630 Once the Guest gave us new GDT entries, we fix them up a little. We + * don't care if they're invalid: the worst that can happen is a General + * Protection Fault in the Switcher when it restores a Guest segment register + * which tries to use that entry. Then we kill the Guest for causing such a + * mess: the message will be "unhandled trap 256". */ +static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end) { unsigned int i; @@ -108,77 +68,72 @@ static void fixup_gdt_table(struct lguest *lg, unsigned start, unsigned end) if (ignored_gdt(i)) continue; - /* We could fault in switch_to_guest if they are using - * a removed segment. */ - if (!segment_present(&lg->gdt[i])) { - check_segment_use(lg, i); - continue; - } - - if (!desc_ok(&lg->gdt[i])) - kill_guest(lg, "Bad GDT descriptor %i", i); - /* Segment descriptors contain a privilege level: the Guest is * sometimes careless and leaves this as 0, even though it's * running at privilege level 1. If so, we fix it here. */ - if ((lg->gdt[i].b & 0x00006000) == 0) - lg->gdt[i].b |= (GUEST_PL << 13); + if ((cpu->arch.gdt[i].b & 0x00006000) == 0) + cpu->arch.gdt[i].b |= (GUEST_PL << 13); /* Each descriptor has an "accessed" bit. If we don't set it * now, the CPU will try to set it when the Guest first loads * that entry into a segment register. But the GDT isn't * writable by the Guest, so bad things can happen. */ - lg->gdt[i].b |= 0x00000100; + cpu->arch.gdt[i].b |= 0x00000100; } } -/* This routine is called at boot or modprobe time for each CPU to set up the - * "constant" GDT entries for Guests running on that CPU. */ +/*H:610 Like the IDT, we never simply use the GDT the Guest gives us. We keep + * a GDT for each CPU, and copy across the Guest's entries each time we want to + * run the Guest on that CPU. + * + * This routine is called at boot or modprobe time for each CPU to set up the + * constant GDT entries: the ones which are the same no matter what Guest we're + * running. */ void setup_default_gdt_entries(struct lguest_ro_state *state) { struct desc_struct *gdt = state->guest_gdt; unsigned long tss = (unsigned long)&state->guest_tss; - /* The hypervisor segments are full 0-4G segments, privilege level 0 */ + /* The Switcher segments are full 0-4G segments, privilege level 0 */ gdt[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; gdt[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; - /* The TSS segment refers to the TSS entry for this CPU, so we cannot - * copy it from the Guest. Forgive the magic flags */ + /* The TSS segment refers to the TSS entry for this particular CPU. + * Forgive the magic flags: the 0x8900 means the entry is Present, it's + * privilege level 0 Available 386 TSS system segment, and the 0x67 + * means Saturn is eclipsed by Mercury in the twelfth house. */ gdt[GDT_ENTRY_TSS].a = 0x00000067 | (tss << 16); gdt[GDT_ENTRY_TSS].b = 0x00008900 | (tss & 0xFF000000) | ((tss >> 16) & 0x000000FF); } -/* This routine is called before the Guest is run for the first time. */ -void setup_guest_gdt(struct lguest *lg) +/* This routine sets up the initial Guest GDT for booting. All entries start + * as 0 (unusable). */ +void setup_guest_gdt(struct lg_cpu *cpu) { /* Start with full 0-4G segments... */ - lg->gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT; - lg->gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT; + cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT; + cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT; /* ...except the Guest is allowed to use them, so set the privilege * level appropriately in the flags. */ - lg->gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13); - lg->gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13); + cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13); + cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13); } -/* Like the IDT, we never simply use the GDT the Guest gives us. We set up the - * GDTs for each CPU, then we copy across the entries each time we want to run - * a different Guest on that CPU. */ - -/* A partial GDT load, for the three "thead-local storage" entries. Otherwise - * it's just like load_guest_gdt(). So much, in fact, it would probably be - * neater to have a single hypercall to cover both. */ -void copy_gdt_tls(const struct lguest *lg, struct desc_struct *gdt) +/*H:650 An optimization of copy_gdt(), for just the three "thead-local storage" + * entries. */ +void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt) { unsigned int i; for (i = GDT_ENTRY_TLS_MIN; i <= GDT_ENTRY_TLS_MAX; i++) - gdt[i] = lg->gdt[i]; + gdt[i] = cpu->arch.gdt[i]; } -/* This is the full version */ -void copy_gdt(const struct lguest *lg, struct desc_struct *gdt) +/*H:640 When the Guest is run on a different CPU, or the GDT entries have + * changed, copy_gdt() is called to copy the Guest's GDT entries across to this + * CPU's GDT. */ +void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt) { unsigned int i; @@ -186,35 +141,42 @@ void copy_gdt(const struct lguest *lg, struct desc_struct *gdt) * replaced. See ignored_gdt() above. */ for (i = 0; i < GDT_ENTRIES; i++) if (!ignored_gdt(i)) - gdt[i] = lg->gdt[i]; + gdt[i] = cpu->arch.gdt[i]; } -/* This is where the Guest asks us to load a new GDT (LHCALL_LOAD_GDT). */ -void load_guest_gdt(struct lguest *lg, unsigned long table, u32 num) +/*H:620 This is where the Guest asks us to load a new GDT (LHCALL_LOAD_GDT). + * We copy it from the Guest and tweak the entries. */ +void load_guest_gdt(struct lg_cpu *cpu, unsigned long table, u32 num) { /* We assume the Guest has the same number of GDT entries as the * Host, otherwise we'd have to dynamically allocate the Guest GDT. */ - if (num > ARRAY_SIZE(lg->gdt)) - kill_guest(lg, "too many gdt entries %i", num); + if (num > ARRAY_SIZE(cpu->arch.gdt)) + kill_guest(cpu, "too many gdt entries %i", num); /* We read the whole thing in, then fix it up. */ - lgread(lg, lg->gdt, table, num * sizeof(lg->gdt[0])); - fixup_gdt_table(lg, 0, ARRAY_SIZE(lg->gdt)); + __lgread(cpu, cpu->arch.gdt, table, num * sizeof(cpu->arch.gdt[0])); + fixup_gdt_table(cpu, 0, ARRAY_SIZE(cpu->arch.gdt)); /* Mark that the GDT changed so the core knows it has to copy it again, * even if the Guest is run on the same CPU. */ - lg->changed |= CHANGED_GDT; + cpu->changed |= CHANGED_GDT; } -void guest_load_tls(struct lguest *lg, unsigned long gtls) +/* This is the fast-track version for just changing the three TLS entries. + * Remember that this happens on every context switch, so it's worth + * optimizing. But wouldn't it be neater to have a single hypercall to cover + * both cases? */ +void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls) { - struct desc_struct *tls = &lg->gdt[GDT_ENTRY_TLS_MIN]; + struct desc_struct *tls = &cpu->arch.gdt[GDT_ENTRY_TLS_MIN]; - lgread(lg, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES); - fixup_gdt_table(lg, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1); - lg->changed |= CHANGED_GDT_TLS; + __lgread(cpu, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES); + fixup_gdt_table(cpu, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1); + /* Note that just the TLS entries have changed. */ + cpu->changed |= CHANGED_GDT_TLS; } +/*:*/ -/* +/*H:660 * With this, we have finished the Host. * * Five of the seven parts of our task are complete. You have made it through