kvm_free_physmem_slot(&kvm->memslots[i], NULL);
}
+static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
+{
+ int i;
+
+ for (i = 0; i < 2; ++i)
+ if (vcpu->pio.guest_pages[i]) {
+ __free_page(vcpu->pio.guest_pages[i]);
+ vcpu->pio.guest_pages[i] = NULL;
+ }
+}
+
static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
{
if (!vcpu->vmcs)
kvm_arch_ops->vcpu_free(vcpu);
free_page((unsigned long)vcpu->run);
vcpu->run = NULL;
+ free_page((unsigned long)vcpu->pio_data);
+ vcpu->pio_data = NULL;
+ free_pio_guest_pages(vcpu);
}
static void kvm_free_vcpus(struct kvm *kvm)
u64 pdpte;
u64 *pdpt;
int ret;
- struct kvm_memory_slot *memslot;
+ struct page *page;
spin_lock(&vcpu->kvm->lock);
- memslot = gfn_to_memslot(vcpu->kvm, pdpt_gfn);
- /* FIXME: !memslot - emulate? 0xff? */
- pdpt = kmap_atomic(gfn_to_page(memslot, pdpt_gfn), KM_USER0);
+ page = gfn_to_page(vcpu->kvm, pdpt_gfn);
+ /* FIXME: !page - emulate? 0xff? */
+ pdpt = kmap_atomic(page, KM_USER0);
ret = 1;
for (i = 0; i < 4; ++i) {
return r;
}
-struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
+/*
+ * Set a new alias region. Aliases map a portion of physical memory into
+ * another portion. This is useful for memory windows, for example the PC
+ * VGA region.
+ */
+static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
+ struct kvm_memory_alias *alias)
+{
+ int r, n;
+ struct kvm_mem_alias *p;
+
+ r = -EINVAL;
+ /* General sanity checks */
+ if (alias->memory_size & (PAGE_SIZE - 1))
+ goto out;
+ if (alias->guest_phys_addr & (PAGE_SIZE - 1))
+ goto out;
+ if (alias->slot >= KVM_ALIAS_SLOTS)
+ goto out;
+ if (alias->guest_phys_addr + alias->memory_size
+ < alias->guest_phys_addr)
+ goto out;
+ if (alias->target_phys_addr + alias->memory_size
+ < alias->target_phys_addr)
+ goto out;
+
+ spin_lock(&kvm->lock);
+
+ p = &kvm->aliases[alias->slot];
+ p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
+ p->npages = alias->memory_size >> PAGE_SHIFT;
+ p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
+
+ for (n = KVM_ALIAS_SLOTS; n > 0; --n)
+ if (kvm->aliases[n - 1].npages)
+ break;
+ kvm->naliases = n;
+
+ spin_unlock(&kvm->lock);
+
+ vcpu_load(&kvm->vcpus[0]);
+ spin_lock(&kvm->lock);
+ kvm_mmu_zap_all(&kvm->vcpus[0]);
+ spin_unlock(&kvm->lock);
+ vcpu_put(&kvm->vcpus[0]);
+
+ return 0;
+
+out:
+ return r;
+}
+
+static gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+{
+ int i;
+ struct kvm_mem_alias *alias;
+
+ for (i = 0; i < kvm->naliases; ++i) {
+ alias = &kvm->aliases[i];
+ if (gfn >= alias->base_gfn
+ && gfn < alias->base_gfn + alias->npages)
+ return alias->target_gfn + gfn - alias->base_gfn;
+ }
+ return gfn;
+}
+
+static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{
int i;
}
return NULL;
}
-EXPORT_SYMBOL_GPL(gfn_to_memslot);
+
+struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
+{
+ gfn = unalias_gfn(kvm, gfn);
+ return __gfn_to_memslot(kvm, gfn);
+}
+
+struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
+{
+ struct kvm_memory_slot *slot;
+
+ gfn = unalias_gfn(kvm, gfn);
+ slot = __gfn_to_memslot(kvm, gfn);
+ if (!slot)
+ return NULL;
+ return slot->phys_mem[gfn - slot->base_gfn];
+}
+EXPORT_SYMBOL_GPL(gfn_to_page);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{
unsigned offset = addr & (PAGE_SIZE-1);
unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
unsigned long pfn;
- struct kvm_memory_slot *memslot;
- void *page;
+ struct page *page;
+ void *page_virt;
if (gpa == UNMAPPED_GVA)
return X86EMUL_PROPAGATE_FAULT;
pfn = gpa >> PAGE_SHIFT;
- memslot = gfn_to_memslot(vcpu->kvm, pfn);
- if (!memslot)
+ page = gfn_to_page(vcpu->kvm, pfn);
+ if (!page)
return X86EMUL_UNHANDLEABLE;
- page = kmap_atomic(gfn_to_page(memslot, pfn), KM_USER0);
+ page_virt = kmap_atomic(page, KM_USER0);
- memcpy(data, page + offset, tocopy);
+ memcpy(data, page_virt + offset, tocopy);
- kunmap_atomic(page, KM_USER0);
+ kunmap_atomic(page_virt, KM_USER0);
bytes -= tocopy;
data += tocopy;
static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
unsigned long val, int bytes)
{
- struct kvm_memory_slot *m;
struct page *page;
void *virt;
if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
return 0;
- m = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT);
- if (!m)
+ page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+ if (!page)
return 0;
- page = gfn_to_page(m, gpa >> PAGE_SHIFT);
kvm_mmu_pre_write(vcpu, gpa, bytes);
mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
virt = kmap_atomic(page, KM_USER0);
}
switch (nr) {
default:
- ;
+ run->hypercall.args[0] = a0;
+ run->hypercall.args[1] = a1;
+ run->hypercall.args[2] = a2;
+ run->hypercall.args[3] = a3;
+ run->hypercall.args[4] = a4;
+ run->hypercall.args[5] = a5;
+ run->hypercall.ret = ret;
+ run->hypercall.longmode = is_long_mode(vcpu);
+ kvm_arch_ops->decache_regs(vcpu);
+ return 0;
}
vcpu->regs[VCPU_REGS_RAX] = ret;
kvm_arch_ops->decache_regs(vcpu);
printk(KERN_WARNING "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
__FUNCTION__, data);
break;
+ case MSR_IA32_MCG_STATUS:
+ printk(KERN_WARNING "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
+ __FUNCTION__, data);
+ break;
case MSR_IA32_UCODE_REV:
case MSR_IA32_UCODE_WRITE:
case 0x200 ... 0x2ff: /* MTRRs */
}
EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
-static void complete_pio(struct kvm_vcpu *vcpu)
+static int pio_copy_data(struct kvm_vcpu *vcpu)
{
- struct kvm_io *io = &vcpu->run->io;
+ void *p = vcpu->pio_data;
+ void *q;
+ unsigned bytes;
+ int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1;
+
+ kvm_arch_ops->vcpu_put(vcpu);
+ q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
+ PAGE_KERNEL);
+ if (!q) {
+ kvm_arch_ops->vcpu_load(vcpu);
+ free_pio_guest_pages(vcpu);
+ return -ENOMEM;
+ }
+ q += vcpu->pio.guest_page_offset;
+ bytes = vcpu->pio.size * vcpu->pio.cur_count;
+ if (vcpu->pio.in)
+ memcpy(q, p, bytes);
+ else
+ memcpy(p, q, bytes);
+ q -= vcpu->pio.guest_page_offset;
+ vunmap(q);
+ kvm_arch_ops->vcpu_load(vcpu);
+ free_pio_guest_pages(vcpu);
+ return 0;
+}
+
+static int complete_pio(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pio_request *io = &vcpu->pio;
long delta;
+ int r;
kvm_arch_ops->cache_regs(vcpu);
if (!io->string) {
- if (io->direction == KVM_EXIT_IO_IN)
- memcpy(&vcpu->regs[VCPU_REGS_RAX], &io->value,
+ if (io->in)
+ memcpy(&vcpu->regs[VCPU_REGS_RAX], vcpu->pio_data,
io->size);
} else {
+ if (io->in) {
+ r = pio_copy_data(vcpu);
+ if (r) {
+ kvm_arch_ops->cache_regs(vcpu);
+ return r;
+ }
+ }
+
delta = 1;
if (io->rep) {
- delta *= io->count;
+ delta *= io->cur_count;
/*
* The size of the register should really depend on
* current address size.
*/
vcpu->regs[VCPU_REGS_RCX] -= delta;
}
- if (io->string_down)
+ if (io->down)
delta = -delta;
delta *= io->size;
- if (io->direction == KVM_EXIT_IO_IN)
+ if (io->in)
vcpu->regs[VCPU_REGS_RDI] += delta;
else
vcpu->regs[VCPU_REGS_RSI] += delta;
}
- vcpu->pio_pending = 0;
vcpu->run->io_completed = 0;
kvm_arch_ops->decache_regs(vcpu);
- kvm_arch_ops->skip_emulated_instruction(vcpu);
+ io->count -= io->cur_count;
+ io->cur_count = 0;
+
+ if (!io->count)
+ kvm_arch_ops->skip_emulated_instruction(vcpu);
+ return 0;
+}
+
+int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
+ int size, unsigned long count, int string, int down,
+ gva_t address, int rep, unsigned port)
+{
+ unsigned now, in_page;
+ int i;
+ int nr_pages = 1;
+ struct page *page;
+
+ vcpu->run->exit_reason = KVM_EXIT_IO;
+ vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
+ vcpu->run->io.size = size;
+ vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
+ vcpu->run->io.count = count;
+ vcpu->run->io.port = port;
+ vcpu->pio.count = count;
+ vcpu->pio.cur_count = count;
+ vcpu->pio.size = size;
+ vcpu->pio.in = in;
+ vcpu->pio.string = string;
+ vcpu->pio.down = down;
+ vcpu->pio.guest_page_offset = offset_in_page(address);
+ vcpu->pio.rep = rep;
+
+ if (!string) {
+ kvm_arch_ops->cache_regs(vcpu);
+ memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
+ kvm_arch_ops->decache_regs(vcpu);
+ return 0;
+ }
+
+ if (!count) {
+ kvm_arch_ops->skip_emulated_instruction(vcpu);
+ return 1;
+ }
+
+ now = min(count, PAGE_SIZE / size);
+
+ if (!down)
+ in_page = PAGE_SIZE - offset_in_page(address);
+ else
+ in_page = offset_in_page(address) + size;
+ now = min(count, (unsigned long)in_page / size);
+ if (!now) {
+ /*
+ * String I/O straddles page boundary. Pin two guest pages
+ * so that we satisfy atomicity constraints. Do just one
+ * transaction to avoid complexity.
+ */
+ nr_pages = 2;
+ now = 1;
+ }
+ if (down) {
+ /*
+ * String I/O in reverse. Yuck. Kill the guest, fix later.
+ */
+ printk(KERN_ERR "kvm: guest string pio down\n");
+ inject_gp(vcpu);
+ return 1;
+ }
+ vcpu->run->io.count = now;
+ vcpu->pio.cur_count = now;
+
+ for (i = 0; i < nr_pages; ++i) {
+ spin_lock(&vcpu->kvm->lock);
+ page = gva_to_page(vcpu, address + i * PAGE_SIZE);
+ if (page)
+ get_page(page);
+ vcpu->pio.guest_pages[i] = page;
+ spin_unlock(&vcpu->kvm->lock);
+ if (!page) {
+ inject_gp(vcpu);
+ free_pio_guest_pages(vcpu);
+ return 1;
+ }
+ }
+
+ if (!vcpu->pio.in)
+ return pio_copy_data(vcpu);
+ return 0;
}
+EXPORT_SYMBOL_GPL(kvm_setup_pio);
static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
int r;
+ sigset_t sigsaved;
vcpu_load(vcpu);
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
/* re-sync apic's tpr */
vcpu->cr8 = kvm_run->cr8;
if (kvm_run->io_completed) {
- if (vcpu->pio_pending)
- complete_pio(vcpu);
- else {
+ if (vcpu->pio.cur_count) {
+ r = complete_pio(vcpu);
+ if (r)
+ goto out;
+ } else {
memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
vcpu->mmio_read_completed = 1;
}
vcpu->mmio_needed = 0;
+ if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
+ kvm_arch_ops->cache_regs(vcpu);
+ vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
+ kvm_arch_ops->decache_regs(vcpu);
+ }
+
r = kvm_arch_ops->run(vcpu, kvm_run);
+out:
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
vcpu_put(vcpu);
return r;
}
vcpu_load(vcpu);
- set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
- set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
- set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
- set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
- set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
- set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
-
- set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
- set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
-
dt.limit = sregs->idt.limit;
dt.base = sregs->idt.base;
kvm_arch_ops->set_idt(vcpu, &dt);
kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
- kvm_arch_ops->set_cr0_no_modeswitch(vcpu, sregs->cr0);
+ kvm_arch_ops->set_cr0(vcpu, sregs->cr0);
mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
kvm_arch_ops->set_cr4(vcpu, sregs->cr4);
if (vcpu->irq_pending[i])
__set_bit(i, &vcpu->irq_summary);
+ set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
+ set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
+ set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
+ set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
+ set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
+ set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
+
+ set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
+ set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
+
vcpu_put(vcpu);
return 0;
*type = VM_FAULT_MINOR;
pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
- if (pgoff != 0)
+ if (pgoff == 0)
+ page = virt_to_page(vcpu->run);
+ else if (pgoff == KVM_PIO_PAGE_OFFSET)
+ page = virt_to_page(vcpu->pio_data);
+ else
return NOPAGE_SIGBUS;
- page = virt_to_page(vcpu->run);
get_page(page);
return page;
}
goto out_unlock;
vcpu->run = page_address(page);
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ r = -ENOMEM;
+ if (!page)
+ goto out_free_run;
+ vcpu->pio_data = page_address(page);
+
vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
FX_IMAGE_ALIGN);
vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
+ vcpu->cr0 = 0x10;
r = kvm_arch_ops->vcpu_create(vcpu);
if (r < 0)
out_free_vcpus:
kvm_free_vcpu(vcpu);
+out_free_run:
+ free_page((unsigned long)vcpu->run);
+ vcpu->run = NULL;
out_unlock:
mutex_unlock(&vcpu->mutex);
out:
return r;
}
+static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
+{
+ if (sigset) {
+ sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
+ vcpu->sigset_active = 1;
+ vcpu->sigset = *sigset;
+ } else
+ vcpu->sigset_active = 0;
+ return 0;
+}
+
+/*
+ * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
+ * we have asm/x86/processor.h
+ */
+struct fxsave {
+ u16 cwd;
+ u16 swd;
+ u16 twd;
+ u16 fop;
+ u64 rip;
+ u64 rdp;
+ u32 mxcsr;
+ u32 mxcsr_mask;
+ u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
+#ifdef CONFIG_X86_64
+ u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
+#else
+ u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
+#endif
+};
+
+static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image;
+
+ vcpu_load(vcpu);
+
+ memcpy(fpu->fpr, fxsave->st_space, 128);
+ fpu->fcw = fxsave->cwd;
+ fpu->fsw = fxsave->swd;
+ fpu->ftwx = fxsave->twd;
+ fpu->last_opcode = fxsave->fop;
+ fpu->last_ip = fxsave->rip;
+ fpu->last_dp = fxsave->rdp;
+ memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
+
+ vcpu_put(vcpu);
+
+ return 0;
+}
+
+static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image;
+
+ vcpu_load(vcpu);
+
+ memcpy(fxsave->st_space, fpu->fpr, 128);
+ fxsave->cwd = fpu->fcw;
+ fxsave->swd = fpu->fsw;
+ fxsave->twd = fpu->ftwx;
+ fxsave->fop = fpu->last_opcode;
+ fxsave->rip = fpu->last_ip;
+ fxsave->rdp = fpu->last_dp;
+ memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
+
+ vcpu_put(vcpu);
+
+ return 0;
+}
+
static long kvm_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
switch (ioctl) {
case KVM_RUN:
+ r = -EINVAL;
+ if (arg)
+ goto out;
r = kvm_vcpu_ioctl_run(vcpu, vcpu->run);
break;
case KVM_GET_REGS: {
goto out;
break;
}
+ case KVM_SET_SIGNAL_MASK: {
+ struct kvm_signal_mask __user *sigmask_arg = argp;
+ struct kvm_signal_mask kvm_sigmask;
+ sigset_t sigset, *p;
+
+ p = NULL;
+ if (argp) {
+ r = -EFAULT;
+ if (copy_from_user(&kvm_sigmask, argp,
+ sizeof kvm_sigmask))
+ goto out;
+ r = -EINVAL;
+ if (kvm_sigmask.len != sizeof sigset)
+ goto out;
+ r = -EFAULT;
+ if (copy_from_user(&sigset, sigmask_arg->sigset,
+ sizeof sigset))
+ goto out;
+ p = &sigset;
+ }
+ r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
+ break;
+ }
+ case KVM_GET_FPU: {
+ struct kvm_fpu fpu;
+
+ memset(&fpu, 0, sizeof fpu);
+ r = kvm_vcpu_ioctl_get_fpu(vcpu, &fpu);
+ if (r)
+ goto out;
+ r = -EFAULT;
+ if (copy_to_user(argp, &fpu, sizeof fpu))
+ goto out;
+ r = 0;
+ break;
+ }
+ case KVM_SET_FPU: {
+ struct kvm_fpu fpu;
+
+ r = -EFAULT;
+ if (copy_from_user(&fpu, argp, sizeof fpu))
+ goto out;
+ r = kvm_vcpu_ioctl_set_fpu(vcpu, &fpu);
+ if (r)
+ goto out;
+ r = 0;
+ break;
+ }
default:
;
}
goto out;
break;
}
+ case KVM_SET_MEMORY_ALIAS: {
+ struct kvm_memory_alias alias;
+
+ r = -EFAULT;
+ if (copy_from_user(&alias, argp, sizeof alias))
+ goto out;
+ r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
+ if (r)
+ goto out;
+ break;
+ }
default:
;
}
{
struct kvm *kvm = vma->vm_file->private_data;
unsigned long pgoff;
- struct kvm_memory_slot *slot;
struct page *page;
*type = VM_FAULT_MINOR;
pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
- slot = gfn_to_memslot(kvm, pgoff);
- if (!slot)
- return NOPAGE_SIGBUS;
- page = gfn_to_page(slot, pgoff);
+ page = gfn_to_page(kvm, pgoff);
if (!page)
return NOPAGE_SIGBUS;
get_page(page);
unsigned int ioctl, unsigned long arg)
{
void __user *argp = (void __user *)arg;
- int r = -EINVAL;
+ long r = -EINVAL;
switch (ioctl) {
case KVM_GET_API_VERSION:
+ r = -EINVAL;
+ if (arg)
+ goto out;
r = KVM_API_VERSION;
break;
case KVM_CREATE_VM:
+ r = -EINVAL;
+ if (arg)
+ goto out;
r = kvm_dev_ioctl_create_vm();
break;
case KVM_GET_MSR_INDEX_LIST: {
*/
r = 0;
break;
+ case KVM_GET_VCPU_MMAP_SIZE:
+ r = -EINVAL;
+ if (arg)
+ goto out;
+ r = 2 * PAGE_SIZE;
+ break;
default:
;
}