1 #ifndef _S390_TLBFLUSH_H
2 #define _S390_TLBFLUSH_H
5 #include <asm/processor.h>
6 #include <asm/pgalloc.h>
9 * Flush all tlb entries on the local cpu.
11 static inline void __tlb_flush_local(void)
13 asm volatile("ptlb" : : : "memory");
18 * Flush all tlb entries on all cpus.
20 void smp_ptlb_all(void);
22 static inline void __tlb_flush_global(void)
24 register unsigned long reg2 asm("2");
25 register unsigned long reg3 asm("3");
26 register unsigned long reg4 asm("4");
30 if (!MACHINE_HAS_CSP) {
34 #endif /* __s390x__ */
38 reg4 = ((unsigned long) &dummy) + 1;
41 : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
44 static inline void __tlb_flush_full(struct mm_struct *mm)
46 cpumask_t local_cpumask;
50 * If the process only ran on the local cpu, do a local flush.
52 local_cpumask = cpumask_of_cpu(smp_processor_id());
53 if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
60 #define __tlb_flush_full(mm) __tlb_flush_local()
64 * Flush all tlb entries of a page table on all cpus.
66 static inline void __tlb_flush_idte(unsigned long asce)
69 " .insn rrf,0xb98e0000,0,%0,%1,0"
70 : : "a" (2048), "a" (asce) : "cc" );
73 static inline void __tlb_flush_mm(struct mm_struct * mm)
75 if (unlikely(cpus_empty(mm->cpu_vm_mask)))
78 * If the machine has IDTE we prefer to do a per mm flush
79 * on all cpus instead of doing a local flush if the mm
80 * only ran on the local cpu.
82 if (MACHINE_HAS_IDTE) {
83 if (mm->context.noexec)
84 __tlb_flush_idte((unsigned long)
85 get_shadow_table(mm->pgd) |
86 mm->context.asce_bits);
87 __tlb_flush_idte((unsigned long) mm->pgd |
88 mm->context.asce_bits);
94 static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
96 if (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm)
102 * flush_tlb() - flushes the current mm struct TLBs
103 * flush_tlb_all() - flushes all processes TLBs
104 * flush_tlb_mm(mm) - flushes the specified mm context TLB's
105 * flush_tlb_page(vma, vmaddr) - flushes one page
106 * flush_tlb_range(vma, start, end) - flushes a range of pages
107 * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
111 * flush_tlb_mm goes together with ptep_set_wrprotect for the
112 * copy_page_range operation and flush_tlb_range is related to
113 * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
114 * ptep_get_and_clear do not flush the TLBs directly if the mm has
115 * only one user. At the end of the update the flush_tlb_mm and
116 * flush_tlb_range functions need to do the flush.
118 #define flush_tlb() do { } while (0)
119 #define flush_tlb_all() do { } while (0)
120 #define flush_tlb_page(vma, addr) do { } while (0)
122 static inline void flush_tlb_mm(struct mm_struct *mm)
124 __tlb_flush_mm_cond(mm);
127 static inline void flush_tlb_range(struct vm_area_struct *vma,
128 unsigned long start, unsigned long end)
130 __tlb_flush_mm_cond(vma->vm_mm);
133 static inline void flush_tlb_kernel_range(unsigned long start,
136 __tlb_flush_mm(&init_mm);
139 #endif /* _S390_TLBFLUSH_H */