This patch uses the lazy-mmu hooks to batch mmu operations where
possible. This is primarily useful for batching operations applied to
active pagetables, which happens during mprotect, munmap, mremap and
the like (mmap does not do bulk pagetable operations, so it isn't
helped).
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Acked-by: Chris Wright <chrisw@sous-sol.org>
static void xen_flush_tlb(void)
{
static void xen_flush_tlb(void)
{
+ struct mmuext_op *op;
+ struct multicall_space mcs = xen_mc_entry(sizeof(*op));
- op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
- if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
- BUG();
+ op = mcs.args;
+ op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
+ MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
}
static void xen_flush_tlb_single(unsigned long addr)
{
}
static void xen_flush_tlb_single(unsigned long addr)
{
+ struct mmuext_op *op;
+ struct multicall_space mcs = xen_mc_entry(sizeof(*op));
- op.cmd = MMUEXT_INVLPG_LOCAL;
- op.arg1.linear_addr = addr & PAGE_MASK;
- if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
- BUG();
+ op = mcs.args;
+ op->cmd = MMUEXT_INVLPG_LOCAL;
+ op->arg1.linear_addr = addr & PAGE_MASK;
+ MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
}
static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
unsigned long va)
{
}
static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
unsigned long va)
{
+ struct {
+ struct mmuext_op op;
+ cpumask_t mask;
+ } *args;
cpumask_t cpumask = *cpus;
cpumask_t cpumask = *cpus;
+ struct multicall_space mcs;
/*
* A couple of (to be removed) sanity checks:
/*
* A couple of (to be removed) sanity checks:
if (cpus_empty(cpumask))
return;
if (cpus_empty(cpumask))
return;
+ mcs = xen_mc_entry(sizeof(*args));
+ args = mcs.args;
+ args->mask = cpumask;
+ args->op.arg2.vcpumask = &args->mask;
+
if (va == TLB_FLUSH_ALL) {
if (va == TLB_FLUSH_ALL) {
- op.cmd = MMUEXT_TLB_FLUSH_MULTI;
- op.arg2.vcpumask = (void *)cpus;
+ args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
- op.cmd = MMUEXT_INVLPG_MULTI;
- op.arg1.linear_addr = va;
- op.arg2.vcpumask = (void *)cpus;
+ args->op.cmd = MMUEXT_INVLPG_MULTI;
+ args->op.arg1.linear_addr = va;
- if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
- BUG();
+ MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
}
static unsigned long xen_read_cr2(void)
}
static unsigned long xen_read_cr2(void)
void xen_set_pmd(pmd_t *ptr, pmd_t val)
{
void xen_set_pmd(pmd_t *ptr, pmd_t val)
{
+ struct multicall_space mcs;
+ struct mmu_update *u;
- u.ptr = virt_to_machine(ptr).maddr;
- u.val = pmd_val_ma(val);
- if (HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0)
- BUG();
+ preempt_disable();
+
+ mcs = xen_mc_entry(sizeof(*u));
+ u = mcs.args;
+ u->ptr = virt_to_machine(ptr).maddr;
+ u->val = pmd_val_ma(val);
+ MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+ preempt_enable();
void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
- if ((mm != current->mm && mm != &init_mm) ||
- HYPERVISOR_update_va_mapping(addr, pteval, 0) != 0)
- xen_set_pte(ptep, pteval);
+ if (mm == current->mm || mm == &init_mm) {
+ if (xen_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
+ struct multicall_space mcs;
+ mcs = xen_mc_entry(0);
+
+ MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+ return;
+ } else
+ if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
+ return;
+ }
+ xen_set_pte(ptep, pteval);
}
#ifdef CONFIG_X86_PAE
void xen_set_pud(pud_t *ptr, pud_t val)
{
}
#ifdef CONFIG_X86_PAE
void xen_set_pud(pud_t *ptr, pud_t val)
{
+ struct multicall_space mcs;
+ struct mmu_update *u;
- u.ptr = virt_to_machine(ptr).maddr;
- u.val = pud_val_ma(val);
- if (HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0)
- BUG();
+ preempt_disable();
+
+ mcs = xen_mc_entry(sizeof(*u));
+ u = mcs.args;
+ u->ptr = virt_to_machine(ptr).maddr;
+ u->val = pud_val_ma(val);
+ MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+ preempt_enable();
}
void xen_set_pte(pte_t *ptep, pte_t pte)
}
void xen_set_pte(pte_t *ptep, pte_t pte)
-#define MC_BATCH 8
-#define MC_ARGS (MC_BATCH * 32 / sizeof(u64))
+#define MC_BATCH 32
+#define MC_ARGS (MC_BATCH * 16 / sizeof(u64))
struct mc_buffer {
struct multicall_entry entries[MC_BATCH];
struct mc_buffer {
struct multicall_entry entries[MC_BATCH];