1 /* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
3 * Copyright (C) 1995, 1997, 2005 David S. Miller <davem@davemloft.net>
4 * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 #include <linux/config.h>
13 #include <asm/pgtable.h>
18 .globl sparc64_vpte_patchme1
19 .globl sparc64_vpte_patchme2
21 * On a second level vpte miss, check whether the original fault is to the OBP
22 * range (note that this is only possible for instruction miss, data misses to
23 * obp range do not use vpte). If so, go back directly to the faulting address.
24 * This is because we want to read the tpc, otherwise we have no way of knowing
25 * the 8k aligned faulting address if we are using >8k kernel pagesize. This
26 * also ensures no vpte range addresses are dropped into tlb while obp is
27 * executing (see inherit_locked_prom_mappings() rant).
30 /* Note that kvmap below has verified that the address is
31 * in the range MODULES_VADDR --> VMALLOC_END already. So
32 * here we need only check if it is an OBP address or not.
34 sethi %hi(LOW_OBP_ADDRESS), %g5
36 blu,pn %xcc, sparc64_vpte_patchme1
40 blu,pn %xcc, obp_iaddr_patch
43 /* These two instructions are patched by paginig_init(). */
44 sparc64_vpte_patchme1:
46 sparc64_vpte_patchme2:
49 /* With kernel PGD in %g5, branch back into dtlb_backend. */
50 ba,pt %xcc, sparc64_kpte_continue
51 andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
54 /* Restore previous TAG_ACCESS, %g5 is zero, and we will
55 * skip over the trap instruction so that the top level
56 * TLB miss handler will thing this %g5 value is just an
57 * invalid PTE, thus branching to full fault processing.
60 stxa %g4, [%g1 + %g1] ASI_DMMU
63 .globl obp_iaddr_patch
65 /* These two instructions patched by inherit_prom_mappings(). */
69 /* Behave as if we are at TL0. */
71 rdpr %tpc, %g4 /* Find original faulting iaddr */
72 srlx %g4, 13, %g4 /* Throw out context bits */
73 sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
75 /* Restore previous TAG_ACCESS. */
77 stxa %g4, [%g1 + %g1] ASI_IMMU
84 /* Load PMD, is it valid? */
85 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
95 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
96 brgez,pn %g5, longpath
99 /* TLB load and return from trap. */
100 stxa %g5, [%g0] ASI_ITLB_DATA_IN
103 .globl obp_daddr_patch
105 /* These two instructions patched by inherit_prom_mappings(). */
109 /* Get PMD offset. */
114 /* Load PMD, is it valid? */
115 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
119 /* Get PTE offset. */
125 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
126 brgez,pn %g5, longpath
129 /* TLB load and return from trap. */
130 stxa %g5, [%g0] ASI_DTLB_DATA_IN
134 * On a first level data miss, check whether this is to the OBP range (note
135 * that such accesses can be made by prom, as well as by kernel using
136 * prom_getproperty on "address"), and if so, do not use vpte access ...
137 * rather, use information saved during inherit_prom_mappings() using 8k
142 brlz,pt %g4, kvmap_load
146 sethi %hi(MODULES_VADDR), %g5
148 blu,pn %xcc, longpath
149 mov (VMALLOC_END >> 24), %g5
152 bgeu,pn %xcc, longpath
156 sethi %hi(LOW_OBP_ADDRESS), %g5
158 blu,pn %xcc, kvmap_vmalloc_addr
162 blu,pn %xcc, obp_daddr_patch
166 /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */
167 ldxa [%g3 + %g6] ASI_N, %g5
168 brgez,pn %g5, longpath
172 /* PTE is valid, load into TLB and return from trap. */
173 stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB