2 * linux/include/asm-arm/cacheflush.h
4 * Copyright (C) 1999-2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #ifndef _ASMARM_CACHEFLUSH_H
11 #define _ASMARM_CACHEFLUSH_H
13 #include <linux/sched.h>
17 #include <asm/shmparam.h>
19 #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
28 #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) || \
29 defined(CONFIG_CPU_ARM740T)
31 # define MULTI_CACHE 1
37 #if defined(CONFIG_CPU_ARM720T) || defined(CONFIG_CPU_ARM7TDMI) || \
38 defined(CONFIG_CPU_ARM9TDMI)
40 # define MULTI_CACHE 1
46 #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
47 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020)
48 # define MULTI_CACHE 1
51 #if defined(CONFIG_CPU_ARM926T)
53 # define MULTI_CACHE 1
55 # define _CACHE arm926
59 #if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
61 # define MULTI_CACHE 1
67 #if defined(CONFIG_CPU_XSCALE)
69 # define MULTI_CACHE 1
71 # define _CACHE xscale
75 #if defined(CONFIG_CPU_XSC3)
77 # define MULTI_CACHE 1
83 #if defined(CONFIG_CPU_V6)
85 # define MULTI_CACHE 1
91 #if !defined(_CACHE) && !defined(MULTI_CACHE)
92 #error Unknown cache maintainence model
96 * This flag is used to indicate that the page pointed to by a pte
97 * is dirty and requires cleaning before returning it to the user.
99 #define PG_dcache_dirty PG_arch_1
102 * MM Cache Management
103 * ===================
105 * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
106 * implement these methods.
108 * Start addresses are inclusive and end addresses are exclusive;
109 * start addresses should be rounded down, end addresses up.
111 * See Documentation/cachetlb.txt for more information.
112 * Please note that the implementation of these, and the required
113 * effects are cache-type (VIVT/VIPT/PIPT) specific.
115 * flush_cache_kern_all()
117 * Unconditionally clean and invalidate the entire cache.
119 * flush_cache_user_mm(mm)
121 * Clean and invalidate all user space cache entries
122 * before a change of page tables.
124 * flush_cache_user_range(start, end, flags)
126 * Clean and invalidate a range of cache entries in the
127 * specified address space before a change of page tables.
128 * - start - user start address (inclusive, page aligned)
129 * - end - user end address (exclusive, page aligned)
130 * - flags - vma->vm_flags field
132 * coherent_kern_range(start, end)
134 * Ensure coherency between the Icache and the Dcache in the
135 * region described by start, end. If you have non-snooping
136 * Harvard caches, you need to implement this function.
137 * - start - virtual start address
138 * - end - virtual end address
140 * DMA Cache Coherency
141 * ===================
143 * dma_inv_range(start, end)
145 * Invalidate (discard) the specified virtual address range.
146 * May not write back any entries. If 'start' or 'end'
147 * are not cache line aligned, those lines must be written
149 * - start - virtual start address
150 * - end - virtual end address
152 * dma_clean_range(start, end)
154 * Clean (write back) the specified virtual address range.
155 * - start - virtual start address
156 * - end - virtual end address
158 * dma_flush_range(start, end)
160 * Clean and invalidate the specified virtual address range.
161 * - start - virtual start address
162 * - end - virtual end address
165 struct cpu_cache_fns {
166 void (*flush_kern_all)(void);
167 void (*flush_user_all)(void);
168 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
170 void (*coherent_kern_range)(unsigned long, unsigned long);
171 void (*coherent_user_range)(unsigned long, unsigned long);
172 void (*flush_kern_dcache_page)(void *);
174 void (*dma_inv_range)(unsigned long, unsigned long);
175 void (*dma_clean_range)(unsigned long, unsigned long);
176 void (*dma_flush_range)(unsigned long, unsigned long);
180 * Select the calling method
184 extern struct cpu_cache_fns cpu_cache;
186 #define __cpuc_flush_kern_all cpu_cache.flush_kern_all
187 #define __cpuc_flush_user_all cpu_cache.flush_user_all
188 #define __cpuc_flush_user_range cpu_cache.flush_user_range
189 #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
190 #define __cpuc_coherent_user_range cpu_cache.coherent_user_range
191 #define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
194 * These are private to the dma-mapping API. Do not use directly.
195 * Their sole purpose is to ensure that data held in the cache
196 * is visible to DMA, or data written by DMA to system memory is
197 * visible to the CPU.
199 #define dmac_inv_range cpu_cache.dma_inv_range
200 #define dmac_clean_range cpu_cache.dma_clean_range
201 #define dmac_flush_range cpu_cache.dma_flush_range
205 #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
206 #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
207 #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
208 #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
209 #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
210 #define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
212 extern void __cpuc_flush_kern_all(void);
213 extern void __cpuc_flush_user_all(void);
214 extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
215 extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
216 extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
217 extern void __cpuc_flush_dcache_page(void *);
220 * These are private to the dma-mapping API. Do not use directly.
221 * Their sole purpose is to ensure that data held in the cache
222 * is visible to DMA, or data written by DMA to system memory is
223 * visible to the CPU.
225 #define dmac_inv_range __glue(_CACHE,_dma_inv_range)
226 #define dmac_clean_range __glue(_CACHE,_dma_clean_range)
227 #define dmac_flush_range __glue(_CACHE,_dma_flush_range)
229 extern void dmac_inv_range(unsigned long, unsigned long);
230 extern void dmac_clean_range(unsigned long, unsigned long);
231 extern void dmac_flush_range(unsigned long, unsigned long);
236 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
237 * vmalloc, ioremap etc) in kernel space for pages. Since the
238 * direct-mappings of these pages may contain cached data, we need
239 * to do a full cache flush to ensure that writebacks don't corrupt
240 * data placed into these pages via the new mappings.
242 #define flush_cache_vmap(start, end) flush_cache_all()
243 #define flush_cache_vunmap(start, end) flush_cache_all()
246 * Copy user data from/to a page which is mapped into a different
247 * processes address space. Really, we want to allow our "user
248 * space" model to handle this.
250 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
252 memcpy(dst, src, len); \
253 flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
256 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
258 memcpy(dst, src, len); \
262 * Convert calls to our calling convention.
264 #define flush_cache_all() __cpuc_flush_kern_all()
265 #ifndef CONFIG_CPU_CACHE_VIPT
266 static inline void flush_cache_mm(struct mm_struct *mm)
268 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
269 __cpuc_flush_user_all();
273 flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
275 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
276 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
281 flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
283 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
284 unsigned long addr = user_addr & PAGE_MASK;
285 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
290 flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
291 unsigned long uaddr, void *kaddr,
292 unsigned long len, int write)
294 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
295 unsigned long addr = (unsigned long)kaddr;
296 __cpuc_coherent_kern_range(addr, addr + len);
300 extern void flush_cache_mm(struct mm_struct *mm);
301 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
302 extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
303 extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
304 unsigned long uaddr, void *kaddr,
305 unsigned long len, int write);
309 * flush_cache_user_range is used when we want to ensure that the
310 * Harvard caches are synchronised for the user space address range.
311 * This is used for the ARM private sys_cacheflush system call.
313 #define flush_cache_user_range(vma,start,end) \
314 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
317 * Perform necessary cache operations to ensure that data previously
318 * stored within this range of addresses can be executed by the CPU.
320 #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
323 * Perform necessary cache operations to ensure that the TLB will
324 * see data written in the specified area.
326 #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
329 * flush_dcache_page is used when the kernel has written to the page
330 * cache page at virtual address page->virtual.
332 * If this page isn't mapped (ie, page_mapping == NULL), or it might
333 * have userspace mappings, then we _must_ always clean + invalidate
334 * the dcache entries associated with the kernel mapping.
336 * Otherwise we can defer the operation, and clean the cache when we are
337 * about to change to user space. This is the same method as used on SPARC64.
338 * See update_mmu_cache for the user space part.
340 extern void flush_dcache_page(struct page *);
342 #define flush_dcache_mmap_lock(mapping) \
343 write_lock_irq(&(mapping)->tree_lock)
344 #define flush_dcache_mmap_unlock(mapping) \
345 write_unlock_irq(&(mapping)->tree_lock)
347 #define flush_icache_user_range(vma,page,addr,len) \
348 flush_dcache_page(page)
351 * We don't appear to need to do anything here. In fact, if we did, we'd
352 * duplicate cache flushing elsewhere performed by flush_dcache_page().
354 #define flush_icache_page(vma,page) do { } while (0)
356 #define __cacheid_present(val) (val != read_cpuid(CPUID_ID))
357 #define __cacheid_vivt(val) ((val & (15 << 25)) != (14 << 25))
358 #define __cacheid_vipt(val) ((val & (15 << 25)) == (14 << 25))
359 #define __cacheid_vipt_nonaliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25))
360 #define __cacheid_vipt_aliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23))
362 #if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
364 #define cache_is_vivt() 1
365 #define cache_is_vipt() 0
366 #define cache_is_vipt_nonaliasing() 0
367 #define cache_is_vipt_aliasing() 0
369 #elif defined(CONFIG_CPU_CACHE_VIPT)
371 #define cache_is_vivt() 0
372 #define cache_is_vipt() 1
373 #define cache_is_vipt_nonaliasing() \
375 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
376 __cacheid_vipt_nonaliasing(__val); \
379 #define cache_is_vipt_aliasing() \
381 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
382 __cacheid_vipt_aliasing(__val); \
387 #define cache_is_vivt() \
389 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
390 (!__cacheid_present(__val)) || __cacheid_vivt(__val); \
393 #define cache_is_vipt() \
395 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
396 __cacheid_present(__val) && __cacheid_vipt(__val); \
399 #define cache_is_vipt_nonaliasing() \
401 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
402 __cacheid_present(__val) && \
403 __cacheid_vipt_nonaliasing(__val); \
406 #define cache_is_vipt_aliasing() \
408 unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
409 __cacheid_present(__val) && \
410 __cacheid_vipt_aliasing(__val); \