From 4c61afcdb2cd4be299c1442b33adf312b695e2d7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 30 Jan 2008 13:34:09 +0100 Subject: [PATCH] x86: fix clflush_page_range logic only present ptes must be flushed. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/mm/pageattr.c | 31 ++++++++++++++++++++++++------- include/asm-x86/cacheflush.h | 2 +- 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index bbfc8e2466..97ec9e7d29 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -26,7 +26,6 @@ within(unsigned long addr, unsigned long start, unsigned long end) * Flushing functions */ - /** * clflush_cache_range - flush a cache range with clflush * @addr: virtual start address @@ -35,13 +34,19 @@ within(unsigned long addr, unsigned long start, unsigned long end) * clflush is an unordered instruction which needs fencing with mfence * to avoid ordering issues. */ -void clflush_cache_range(void *addr, int size) +void clflush_cache_range(void *vaddr, unsigned int size) { - int i; + void *vend = vaddr + size - 1; mb(); - for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size) - clflush(addr+i); + + for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size) + clflush(vaddr); + /* + * Flush any possible final partial cacheline: + */ + clflush(vend); + mb(); } @@ -74,9 +79,13 @@ static void __cpa_flush_range(void *arg) __flush_tlb_all(); } -static void cpa_flush_range(unsigned long addr, int numpages) +static void cpa_flush_range(unsigned long start, int numpages) { + unsigned int i, level; + unsigned long addr; + BUG_ON(irqs_disabled()); + WARN_ON(PAGE_ALIGN(start) != start); on_each_cpu(__cpa_flush_range, NULL, 1, 1); @@ -86,7 +95,15 @@ static void cpa_flush_range(unsigned long addr, int numpages) * will cause all other CPUs to flush the same * cachelines: */ - clflush_cache_range((void *) addr, numpages * PAGE_SIZE); + for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { + pte_t *pte = lookup_address(addr, &level); + + /* + * Only flush present addresses: + */ + if (pte && pte_present(*pte)) + clflush_cache_range((void *) addr, PAGE_SIZE); + } } /* diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index 3e74aff908..8dd8c5e3cc 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h @@ -42,7 +42,7 @@ int set_memory_ro(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages); int set_memory_np(unsigned long addr, int numpages); -void clflush_cache_range(void *addr, int size); +void clflush_cache_range(void *addr, unsigned int size); #ifdef CONFIG_DEBUG_RODATA void mark_rodata_ro(void); -- 2.39.5