4 /* The sparc64 TSB is similar to the powerpc hashtables. It's a
5 * power-of-2 sized table of TAG/PTE pairs. The cpu precomputes
6 * pointers into this table for 8K and 64K page sizes, and also a
7 * comparison TAG based upon the virtual address and context which
10 * TLB miss trap handler software does the actual lookup via something
13 * ldxa [%g0] ASI_{D,I}MMU_TSB_8KB_PTR, %g1
14 * ldxa [%g0] ASI_{D,I}MMU, %g6
15 * ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4
17 * bne,pn %xcc, tsb_miss_{d,i}tlb
18 * mov FAULT_CODE_{D,I}TLB, %g3
19 * stxa %g5, [%g0] ASI_{D,I}TLB_DATA_IN
23 * Each 16-byte slot of the TSB is the 8-byte tag and then the 8-byte
24 * PTE. The TAG is of the same layout as the TLB TAG TARGET mmu
27 * -------------------------------------------------
28 * | - | CONTEXT | - | VADDR bits 63:22 |
29 * -------------------------------------------------
30 * 63 61 60 48 47 42 41 0
32 * Like the powerpc hashtables we need to use locking in order to
33 * synchronize while we update the entries. PTE updates need locking
36 * We need to carefully choose a lock bits for the TSB entry. We
37 * choose to use bit 47 in the tag. Also, since we never map anything
38 * at page zero in context zero, we use zero as an invalid tag entry.
39 * When the lock bit is set, this forces a tag comparison failure.
42 #define TSB_TAG_LOCK_BIT 47
43 #define TSB_TAG_LOCK_HIGH (1 << (TSB_TAG_LOCK_BIT - 32))
45 #define TSB_MEMBAR membar #StoreStore
47 /* Some cpus support physical address quad loads. We want to use
48 * those if possible so we don't need to hard-lock the TSB mapping
49 * into the TLB. We encode some instruction patching in order to
52 * The kernel TSB is locked into the TLB by virtue of being in the
53 * kernel image, so we don't play these games for swapper_tsb access.
56 struct tsb_ldquad_phys_patch_entry {
58 unsigned int sun4u_insn;
59 unsigned int sun4v_insn;
61 extern struct tsb_ldquad_phys_patch_entry __tsb_ldquad_phys_patch,
62 __tsb_ldquad_phys_patch_end;
64 struct tsb_phys_patch_entry {
68 extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
70 #define TSB_LOAD_QUAD(TSB, REG) \
71 661: ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; \
72 .section .tsb_ldquad_phys_patch, "ax"; \
74 ldda [TSB] ASI_QUAD_LDD_PHYS, REG; \
75 ldda [TSB] ASI_QUAD_LDD_PHYS_4V, REG; \
78 #define TSB_LOAD_TAG_HIGH(TSB, REG) \
79 661: lduwa [TSB] ASI_N, REG; \
80 .section .tsb_phys_patch, "ax"; \
82 lduwa [TSB] ASI_PHYS_USE_EC, REG; \
85 #define TSB_LOAD_TAG(TSB, REG) \
86 661: ldxa [TSB] ASI_N, REG; \
87 .section .tsb_phys_patch, "ax"; \
89 ldxa [TSB] ASI_PHYS_USE_EC, REG; \
92 #define TSB_CAS_TAG_HIGH(TSB, REG1, REG2) \
93 661: casa [TSB] ASI_N, REG1, REG2; \
94 .section .tsb_phys_patch, "ax"; \
96 casa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \
99 #define TSB_CAS_TAG(TSB, REG1, REG2) \
100 661: casxa [TSB] ASI_N, REG1, REG2; \
101 .section .tsb_phys_patch, "ax"; \
103 casxa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \
106 #define TSB_STORE(ADDR, VAL) \
107 661: stxa VAL, [ADDR] ASI_N; \
108 .section .tsb_phys_patch, "ax"; \
110 stxa VAL, [ADDR] ASI_PHYS_USE_EC; \
113 #define TSB_LOCK_TAG(TSB, REG1, REG2) \
114 99: TSB_LOAD_TAG_HIGH(TSB, REG1); \
115 sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\
116 andcc REG1, REG2, %g0; \
119 TSB_CAS_TAG_HIGH(TSB, REG1, REG2); \
125 #define TSB_WRITE(TSB, TTE, TAG) \
127 TSB_STORE(TSB, TTE); \
132 #define KTSB_LOAD_QUAD(TSB, REG) \
133 ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG;
135 #define KTSB_STORE(ADDR, VAL) \
136 stxa VAL, [ADDR] ASI_N;
138 #define KTSB_LOCK_TAG(TSB, REG1, REG2) \
139 99: lduwa [TSB] ASI_N, REG1; \
140 sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\
141 andcc REG1, REG2, %g0; \
144 casa [TSB] ASI_N, REG1, REG2;\
150 #define KTSB_WRITE(TSB, TTE, TAG) \
152 stxa TTE, [TSB] ASI_N; \
155 stxa TAG, [TSB] ASI_N;
157 /* Do a kernel page table walk. Leaves physical PTE pointer in
158 * REG1. Jumps to FAIL_LABEL on early page table walk termination.
159 * VADDR will not be clobbered, but REG2 will.
161 #define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \
162 sethi %hi(swapper_pg_dir), REG1; \
163 or REG1, %lo(swapper_pg_dir), REG1; \
164 sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
165 srlx REG2, 64 - PAGE_SHIFT, REG2; \
166 andn REG2, 0x3, REG2; \
167 lduw [REG1 + REG2], REG1; \
168 brz,pn REG1, FAIL_LABEL; \
169 sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
170 srlx REG2, 64 - PAGE_SHIFT, REG2; \
171 sllx REG1, 11, REG1; \
172 andn REG2, 0x3, REG2; \
173 lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
174 brz,pn REG1, FAIL_LABEL; \
175 sllx VADDR, 64 - PMD_SHIFT, REG2; \
176 srlx REG2, 64 - PAGE_SHIFT, REG2; \
177 sllx REG1, 11, REG1; \
178 andn REG2, 0x7, REG2; \
179 add REG1, REG2, REG1;
181 /* Do a user page table walk in MMU globals. Leaves physical PTE
182 * pointer in REG1. Jumps to FAIL_LABEL on early page table walk
183 * termination. Physical base of page tables is in PHYS_PGD which
184 * will not be modified.
186 * VADDR will not be clobbered, but REG1 and REG2 will.
188 #define USER_PGTABLE_WALK_TL1(VADDR, PHYS_PGD, REG1, REG2, FAIL_LABEL) \
189 sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
190 srlx REG2, 64 - PAGE_SHIFT, REG2; \
191 andn REG2, 0x3, REG2; \
192 lduwa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \
193 brz,pn REG1, FAIL_LABEL; \
194 sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
195 srlx REG2, 64 - PAGE_SHIFT, REG2; \
196 sllx REG1, 11, REG1; \
197 andn REG2, 0x3, REG2; \
198 lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
199 brz,pn REG1, FAIL_LABEL; \
200 sllx VADDR, 64 - PMD_SHIFT, REG2; \
201 srlx REG2, 64 - PAGE_SHIFT, REG2; \
202 sllx REG1, 11, REG1; \
203 andn REG2, 0x7, REG2; \
204 add REG1, REG2, REG1;
206 /* Lookup a OBP mapping on VADDR in the prom_trans[] table at TL>0.
207 * If no entry is found, FAIL_LABEL will be branched to. On success
208 * the resulting PTE value will be left in REG1. VADDR is preserved
211 #define OBP_TRANS_LOOKUP(VADDR, REG1, REG2, REG3, FAIL_LABEL) \
212 sethi %hi(prom_trans), REG1; \
213 or REG1, %lo(prom_trans), REG1; \
214 97: ldx [REG1 + 0x00], REG2; \
215 brz,pn REG2, FAIL_LABEL; \
217 ldx [REG1 + 0x08], REG3; \
218 add REG2, REG3, REG3; \
223 ldx [REG1 + 0x10], REG3; \
224 sub VADDR, REG2, REG2; \
226 add REG3, REG2, REG1; \
227 98: ba,pt %xcc, 97b; \
228 add REG1, (3 * 8), REG1; \
231 /* We use a 32K TSB for the whole kernel, this allows to
232 * handle about 16MB of modules and vmalloc mappings without
233 * incurring many hash conflicts.
235 #define KERNEL_TSB_SIZE_BYTES (32 * 1024)
236 #define KERNEL_TSB_NENTRIES \
237 (KERNEL_TSB_SIZE_BYTES / 16)
239 /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL
240 * on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries
241 * and the found TTE will be left in REG1. REG3 and REG4 must
242 * be an even/odd pair of registers.
244 * VADDR and TAG will be preserved and not clobbered by this macro.
246 #define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
247 sethi %hi(swapper_tsb), REG1; \
248 or REG1, %lo(swapper_tsb), REG1; \
249 srlx VADDR, PAGE_SHIFT, REG2; \
250 and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \
251 sllx REG2, 4, REG2; \
252 add REG1, REG2, REG2; \
253 KTSB_LOAD_QUAD(REG2, REG3); \
255 be,a,pt %xcc, OK_LABEL; \
258 #endif /* !(_SPARC64_TSB_H) */