2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Synthesize TLB refill handlers at runtime.
8 * Copyright (C) 2004,2005,2006 by Thiemo Seufer
9 * Copyright (C) 2005, 2007 Maciej W. Rozycki
10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
12 * ... and the days got worse and worse and now you see
13 * I've gone completly out of my mind.
15 * They're coming to take me a away haha
16 * they're coming to take me a away hoho hihi haha
17 * to the funny farm where code is beautiful all the time ...
19 * (Condolences to Napoleon XIV)
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/string.h>
25 #include <linux/init.h>
28 #include <asm/mmu_context.h>
33 static inline int r45k_bvahwbug(void)
35 /* XXX: We should probe for the presence of this bug, but we don't. */
39 static inline int r4k_250MHZhwbug(void)
41 /* XXX: We should probe for the presence of this bug, but we don't. */
45 static inline int __maybe_unused bcm1250_m3_war(void)
47 return BCM1250_M3_WAR;
50 static inline int __maybe_unused r10000_llsc_war(void)
52 return R10000_LLSC_WAR;
56 * Found by experiment: At least some revisions of the 4kc throw under
57 * some circumstances a machine check exception, triggered by invalid
58 * values in the index register. Delaying the tlbp instruction until
59 * after the next branch, plus adding an additional nop in front of
60 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
61 * why; it's not an issue caused by the core RTL.
64 static int __init m4kc_tlbp_war(void)
66 return (current_cpu_data.processor_id & 0xffff00) ==
67 (PRID_COMP_MIPS | PRID_IMP_4KC);
71 * A little micro-assembler, intended for TLB refill handler
72 * synthesizing. It is intentionally kept simple, does only support
73 * a subset of instructions, and does not try to hide pipeline effects
74 * like branch delay slots.
101 #define IMM_MASK 0xffff
103 #define JIMM_MASK 0x3ffffff
105 #define FUNC_MASK 0x3f
112 insn_addu, insn_addiu, insn_and, insn_andi, insn_beq,
113 insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
114 insn_bne, insn_daddu, insn_daddiu, insn_dmfc0, insn_dmtc0,
115 insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32,
116 insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr, insn_ld,
117 insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, insn_mtc0,
118 insn_ori, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll,
119 insn_sra, insn_srl, insn_subu, insn_sw, insn_tlbp, insn_tlbwi,
120 insn_tlbwr, insn_xor, insn_xori
129 /* This macro sets the non-variable bits of an instruction. */
130 #define M(a, b, c, d, e, f) \
138 static struct insn insn_table[] __initdata = {
139 { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
140 { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
141 { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
142 { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
143 { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
144 { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
145 { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
146 { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
147 { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
148 { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
149 { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
150 { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
151 { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
152 { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
153 { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
154 { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
155 { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
156 { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
157 { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
158 { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
159 { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
160 { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
161 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
162 { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
163 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
164 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
165 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
166 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
167 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
168 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
169 { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
170 { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
171 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
172 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
173 { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
174 { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
175 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
176 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
177 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
178 { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
179 { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
180 { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
181 { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
182 { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
183 { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
184 { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
185 { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
186 { insn_invalid, 0, 0 }
191 static u32 __init build_rs(u32 arg)
194 printk(KERN_WARNING "TLB synthesizer field overflow\n");
196 return (arg & RS_MASK) << RS_SH;
199 static u32 __init build_rt(u32 arg)
202 printk(KERN_WARNING "TLB synthesizer field overflow\n");
204 return (arg & RT_MASK) << RT_SH;
207 static u32 __init build_rd(u32 arg)
210 printk(KERN_WARNING "TLB synthesizer field overflow\n");
212 return (arg & RD_MASK) << RD_SH;
215 static u32 __init build_re(u32 arg)
218 printk(KERN_WARNING "TLB synthesizer field overflow\n");
220 return (arg & RE_MASK) << RE_SH;
223 static u32 __init build_simm(s32 arg)
225 if (arg > 0x7fff || arg < -0x8000)
226 printk(KERN_WARNING "TLB synthesizer field overflow\n");
231 static u32 __init build_uimm(u32 arg)
234 printk(KERN_WARNING "TLB synthesizer field overflow\n");
236 return arg & IMM_MASK;
239 static u32 __init build_bimm(s32 arg)
241 if (arg > 0x1ffff || arg < -0x20000)
242 printk(KERN_WARNING "TLB synthesizer field overflow\n");
245 printk(KERN_WARNING "Invalid TLB synthesizer branch target\n");
247 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
250 static u32 __init build_jimm(u32 arg)
252 if (arg & ~((JIMM_MASK) << 2))
253 printk(KERN_WARNING "TLB synthesizer field overflow\n");
255 return (arg >> 2) & JIMM_MASK;
258 static u32 __init build_func(u32 arg)
260 if (arg & ~FUNC_MASK)
261 printk(KERN_WARNING "TLB synthesizer field overflow\n");
263 return arg & FUNC_MASK;
266 static u32 __init build_set(u32 arg)
269 printk(KERN_WARNING "TLB synthesizer field overflow\n");
271 return arg & SET_MASK;
275 * The order of opcode arguments is implicitly left to right,
276 * starting with RS and ending with FUNC or IMM.
278 static void __init build_insn(u32 **buf, enum opcode opc, ...)
280 struct insn *ip = NULL;
285 for (i = 0; insn_table[i].opcode != insn_invalid; i++)
286 if (insn_table[i].opcode == opc) {
291 if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
292 panic("Unsupported TLB synthesizer instruction %d", opc);
296 if (ip->fields & RS) op |= build_rs(va_arg(ap, u32));
297 if (ip->fields & RT) op |= build_rt(va_arg(ap, u32));
298 if (ip->fields & RD) op |= build_rd(va_arg(ap, u32));
299 if (ip->fields & RE) op |= build_re(va_arg(ap, u32));
300 if (ip->fields & SIMM) op |= build_simm(va_arg(ap, s32));
301 if (ip->fields & UIMM) op |= build_uimm(va_arg(ap, u32));
302 if (ip->fields & BIMM) op |= build_bimm(va_arg(ap, s32));
303 if (ip->fields & JIMM) op |= build_jimm(va_arg(ap, u32));
304 if (ip->fields & FUNC) op |= build_func(va_arg(ap, u32));
305 if (ip->fields & SET) op |= build_set(va_arg(ap, u32));
312 #define I_u1u2u3(op) \
313 static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
314 unsigned int b, unsigned int c) \
316 build_insn(buf, insn##op, a, b, c); \
319 #define I_u2u1u3(op) \
320 static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
321 unsigned int b, unsigned int c) \
323 build_insn(buf, insn##op, b, a, c); \
326 #define I_u3u1u2(op) \
327 static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
328 unsigned int b, unsigned int c) \
330 build_insn(buf, insn##op, b, c, a); \
333 #define I_u1u2s3(op) \
334 static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
335 unsigned int b, signed int c) \
337 build_insn(buf, insn##op, a, b, c); \
340 #define I_u2s3u1(op) \
341 static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
342 signed int b, unsigned int c) \
344 build_insn(buf, insn##op, c, a, b); \
347 #define I_u2u1s3(op) \
348 static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
349 unsigned int b, signed int c) \
351 build_insn(buf, insn##op, b, a, c); \
355 static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
358 build_insn(buf, insn##op, a, b); \
362 static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
365 build_insn(buf, insn##op, a, b); \
369 static void __init __maybe_unused i##op(u32 **buf, unsigned int a) \
371 build_insn(buf, insn##op, a); \
375 static void __init __maybe_unused i##op(u32 **buf) \
377 build_insn(buf, insn##op); \
446 label_smp_pgtable_change,
447 label_r3000_write_probe_fail,
455 static void __init build_label(struct label **lab, u32 *addr,
464 static inline void __init l##lb(struct label **lab, u32 *addr) \
466 build_label(lab, addr, label##lb); \
481 L_LA(_smp_pgtable_change)
482 L_LA(_r3000_write_probe_fail)
484 /* convenience macros for instructions */
486 # define i_LW(buf, rs, rt, off) i_ld(buf, rs, rt, off)
487 # define i_SW(buf, rs, rt, off) i_sd(buf, rs, rt, off)
488 # define i_SLL(buf, rs, rt, sh) i_dsll(buf, rs, rt, sh)
489 # define i_SRA(buf, rs, rt, sh) i_dsra(buf, rs, rt, sh)
490 # define i_SRL(buf, rs, rt, sh) i_dsrl(buf, rs, rt, sh)
491 # define i_MFC0(buf, rt, rd...) i_dmfc0(buf, rt, rd)
492 # define i_MTC0(buf, rt, rd...) i_dmtc0(buf, rt, rd)
493 # define i_ADDIU(buf, rs, rt, val) i_daddiu(buf, rs, rt, val)
494 # define i_ADDU(buf, rs, rt, rd) i_daddu(buf, rs, rt, rd)
495 # define i_SUBU(buf, rs, rt, rd) i_dsubu(buf, rs, rt, rd)
496 # define i_LL(buf, rs, rt, off) i_lld(buf, rs, rt, off)
497 # define i_SC(buf, rs, rt, off) i_scd(buf, rs, rt, off)
499 # define i_LW(buf, rs, rt, off) i_lw(buf, rs, rt, off)
500 # define i_SW(buf, rs, rt, off) i_sw(buf, rs, rt, off)
501 # define i_SLL(buf, rs, rt, sh) i_sll(buf, rs, rt, sh)
502 # define i_SRA(buf, rs, rt, sh) i_sra(buf, rs, rt, sh)
503 # define i_SRL(buf, rs, rt, sh) i_srl(buf, rs, rt, sh)
504 # define i_MFC0(buf, rt, rd...) i_mfc0(buf, rt, rd)
505 # define i_MTC0(buf, rt, rd...) i_mtc0(buf, rt, rd)
506 # define i_ADDIU(buf, rs, rt, val) i_addiu(buf, rs, rt, val)
507 # define i_ADDU(buf, rs, rt, rd) i_addu(buf, rs, rt, rd)
508 # define i_SUBU(buf, rs, rt, rd) i_subu(buf, rs, rt, rd)
509 # define i_LL(buf, rs, rt, off) i_ll(buf, rs, rt, off)
510 # define i_SC(buf, rs, rt, off) i_sc(buf, rs, rt, off)
513 #define i_b(buf, off) i_beq(buf, 0, 0, off)
514 #define i_beqz(buf, rs, off) i_beq(buf, rs, 0, off)
515 #define i_beqzl(buf, rs, off) i_beql(buf, rs, 0, off)
516 #define i_bnez(buf, rs, off) i_bne(buf, rs, 0, off)
517 #define i_bnezl(buf, rs, off) i_bnel(buf, rs, 0, off)
518 #define i_move(buf, a, b) i_ADDU(buf, a, 0, b)
519 #define i_nop(buf) i_sll(buf, 0, 0, 0)
520 #define i_ssnop(buf) i_sll(buf, 0, 0, 1)
521 #define i_ehb(buf) i_sll(buf, 0, 0, 3)
523 static int __init __maybe_unused in_compat_space_p(long addr)
525 /* Is this address in 32bit compat space? */
527 return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L);
533 static int __init __maybe_unused rel_highest(long val)
536 return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
542 static int __init __maybe_unused rel_higher(long val)
545 return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
551 static int __init rel_hi(long val)
553 return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
556 static int __init rel_lo(long val)
558 return ((val & 0xffff) ^ 0x8000) - 0x8000;
561 static void __init i_LA_mostly(u32 **buf, unsigned int rs, long addr)
563 if (!in_compat_space_p(addr)) {
564 i_lui(buf, rs, rel_highest(addr));
565 if (rel_higher(addr))
566 i_daddiu(buf, rs, rs, rel_higher(addr));
568 i_dsll(buf, rs, rs, 16);
569 i_daddiu(buf, rs, rs, rel_hi(addr));
570 i_dsll(buf, rs, rs, 16);
572 i_dsll32(buf, rs, rs, 0);
574 i_lui(buf, rs, rel_hi(addr));
577 static void __init __maybe_unused i_LA(u32 **buf, unsigned int rs, long addr)
579 i_LA_mostly(buf, rs, addr);
581 if (!in_compat_space_p(addr))
582 i_daddiu(buf, rs, rs, rel_lo(addr));
584 i_addiu(buf, rs, rs, rel_lo(addr));
598 static void __init r_mips_pc16(struct reloc **rel, u32 *addr,
602 (*rel)->type = R_MIPS_PC16;
607 static inline void __resolve_relocs(struct reloc *rel, struct label *lab)
609 long laddr = (long)lab->addr;
610 long raddr = (long)rel->addr;
614 *rel->addr |= build_bimm(laddr - (raddr + 4));
618 panic("Unsupported TLB synthesizer relocation %d",
623 static void __init resolve_relocs(struct reloc *rel, struct label *lab)
627 for (; rel->lab != label_invalid; rel++)
628 for (l = lab; l->lab != label_invalid; l++)
629 if (rel->lab == l->lab)
630 __resolve_relocs(rel, l);
633 static void __init move_relocs(struct reloc *rel, u32 *first, u32 *end,
636 for (; rel->lab != label_invalid; rel++)
637 if (rel->addr >= first && rel->addr < end)
641 static void __init move_labels(struct label *lab, u32 *first, u32 *end,
644 for (; lab->lab != label_invalid; lab++)
645 if (lab->addr >= first && lab->addr < end)
649 static void __init copy_handler(struct reloc *rel, struct label *lab,
650 u32 *first, u32 *end, u32 *target)
652 long off = (long)(target - first);
654 memcpy(target, first, (end - first) * sizeof(u32));
656 move_relocs(rel, first, end, off);
657 move_labels(lab, first, end, off);
660 static int __init __maybe_unused insn_has_bdelay(struct reloc *rel,
663 for (; rel->lab != label_invalid; rel++) {
664 if (rel->addr == addr
665 && (rel->type == R_MIPS_PC16
666 || rel->type == R_MIPS_26))
673 /* convenience functions for labeled branches */
674 static void __init __maybe_unused
675 il_bltz(u32 **p, struct reloc **r, unsigned int reg, enum label_id l)
677 r_mips_pc16(r, *p, l);
681 static void __init __maybe_unused il_b(u32 **p, struct reloc **r,
684 r_mips_pc16(r, *p, l);
688 static void __init il_beqz(u32 **p, struct reloc **r, unsigned int reg,
691 r_mips_pc16(r, *p, l);
695 static void __init __maybe_unused
696 il_beqzl(u32 **p, struct reloc **r, unsigned int reg, enum label_id l)
698 r_mips_pc16(r, *p, l);
702 static void __init il_bnez(u32 **p, struct reloc **r, unsigned int reg,
705 r_mips_pc16(r, *p, l);
709 static void __init il_bgezl(u32 **p, struct reloc **r, unsigned int reg,
712 r_mips_pc16(r, *p, l);
716 static void __init __maybe_unused
717 il_bgez(u32 **p, struct reloc **r, unsigned int reg, enum label_id l)
719 r_mips_pc16(r, *p, l);
724 * For debug purposes.
726 static inline void dump_handler(const u32 *handler, int count)
730 pr_debug("\t.set push\n");
731 pr_debug("\t.set noreorder\n");
733 for (i = 0; i < count; i++)
734 pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]);
736 pr_debug("\t.set pop\n");
739 /* The only general purpose registers allowed in TLB handlers. */
743 /* Some CP0 registers */
744 #define C0_INDEX 0, 0
745 #define C0_ENTRYLO0 2, 0
746 #define C0_TCBIND 2, 2
747 #define C0_ENTRYLO1 3, 0
748 #define C0_CONTEXT 4, 0
749 #define C0_BADVADDR 8, 0
750 #define C0_ENTRYHI 10, 0
752 #define C0_XCONTEXT 20, 0
755 # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_XCONTEXT)
757 # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_CONTEXT)
760 /* The worst case length of the handler is around 18 instructions for
761 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
762 * Maximum space available is 32 instructions for R3000 and 64
763 * instructions for R4000.
765 * We deliberately chose a buffer size of 128, so we won't scribble
766 * over anything important on overflow before we panic.
768 static u32 tlb_handler[128] __initdata;
770 /* simply assume worst case size for labels and relocs */
771 static struct label labels[128] __initdata;
772 static struct reloc relocs[128] __initdata;
775 * The R3000 TLB handler is simple.
777 static void __init build_r3000_tlb_refill_handler(void)
779 long pgdc = (long)pgd_current;
782 memset(tlb_handler, 0, sizeof(tlb_handler));
785 i_mfc0(&p, K0, C0_BADVADDR);
786 i_lui(&p, K1, rel_hi(pgdc)); /* cp0 delay */
787 i_lw(&p, K1, rel_lo(pgdc), K1);
788 i_srl(&p, K0, K0, 22); /* load delay */
789 i_sll(&p, K0, K0, 2);
790 i_addu(&p, K1, K1, K0);
791 i_mfc0(&p, K0, C0_CONTEXT);
792 i_lw(&p, K1, 0, K1); /* cp0 delay */
793 i_andi(&p, K0, K0, 0xffc); /* load delay */
794 i_addu(&p, K1, K1, K0);
796 i_nop(&p); /* load delay */
797 i_mtc0(&p, K0, C0_ENTRYLO0);
798 i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
799 i_tlbwr(&p); /* cp0 delay */
801 i_rfe(&p); /* branch delay */
803 if (p > tlb_handler + 32)
804 panic("TLB refill handler space exceeded");
806 pr_info("Synthesized TLB refill handler (%u instructions).\n",
807 (unsigned int)(p - tlb_handler));
809 memcpy((void *)ebase, tlb_handler, 0x80);
811 dump_handler((u32 *)ebase, 32);
815 * The R4000 TLB handler is much more complicated. We have two
816 * consecutive handler areas with 32 instructions space each.
817 * Since they aren't used at the same time, we can overflow in the
818 * other one.To keep things simple, we first assume linear space,
819 * then we relocate it to the final handler layout as needed.
821 static u32 final_handler[64] __initdata;
826 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
827 * 2. A timing hazard exists for the TLBP instruction.
829 * stalling_instruction
832 * The JTLB is being read for the TLBP throughout the stall generated by the
833 * previous instruction. This is not really correct as the stalling instruction
834 * can modify the address used to access the JTLB. The failure symptom is that
835 * the TLBP instruction will use an address created for the stalling instruction
836 * and not the address held in C0_ENHI and thus report the wrong results.
838 * The software work-around is to not allow the instruction preceding the TLBP
839 * to stall - make it an NOP or some other instruction guaranteed not to stall.
841 * Errata 2 will not be fixed. This errata is also on the R5000.
843 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
845 static void __init __maybe_unused build_tlb_probe_entry(u32 **p)
847 switch (current_cpu_type()) {
848 /* Found by experiment: R4600 v2.0 needs this, too. */
864 * Write random or indexed TLB entry, and care about the hazards from
865 * the preceeding mtc0 and for the following eret.
867 enum tlb_write_entry { tlb_random, tlb_indexed };
869 static void __init build_tlb_write_entry(u32 **p, struct label **l,
871 enum tlb_write_entry wmode)
873 void(*tlbw)(u32 **) = NULL;
876 case tlb_random: tlbw = i_tlbwr; break;
877 case tlb_indexed: tlbw = i_tlbwi; break;
880 if (cpu_has_mips_r2) {
886 switch (current_cpu_type()) {
894 * This branch uses up a mtc0 hazard nop slot and saves
895 * two nops after the tlbw instruction.
897 il_bgezl(p, r, 0, label_tlbw_hazard);
899 l_tlbw_hazard(l, *p);
943 i_nop(p); /* QED specifies 2 nops hazard */
945 * This branch uses up a mtc0 hazard nop slot and saves
946 * a nop after the tlbw instruction.
948 il_bgezl(p, r, 0, label_tlbw_hazard);
950 l_tlbw_hazard(l, *p);
963 * When the JTLB is updated by tlbwi or tlbwr, a subsequent
964 * use of the JTLB for instructions should not occur for 4
965 * cpu cycles and use for data translations should not occur
1000 panic("No TLB refill handler yet (CPU type: %d)",
1001 current_cpu_data.cputype);
1008 * TMP and PTR are scratch.
1009 * TMP will be clobbered, PTR will hold the pmd entry.
1012 build_get_pmde64(u32 **p, struct label **l, struct reloc **r,
1013 unsigned int tmp, unsigned int ptr)
1015 long pgdc = (long)pgd_current;
1018 * The vmalloc handling is not in the hotpath.
1020 i_dmfc0(p, tmp, C0_BADVADDR);
1022 il_bltz(p, r, tmp, label_module_alloc);
1024 il_bltz(p, r, tmp, label_vmalloc);
1026 /* No i_nop needed here, since the next insn doesn't touch TMP. */
1029 # ifdef CONFIG_MIPS_MT_SMTC
1031 * SMTC uses TCBind value as "CPU" index
1033 i_mfc0(p, ptr, C0_TCBIND);
1034 i_dsrl(p, ptr, ptr, 19);
1037 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
1038 * stored in CONTEXT.
1040 i_dmfc0(p, ptr, C0_CONTEXT);
1041 i_dsrl(p, ptr, ptr, 23);
1043 i_LA_mostly(p, tmp, pgdc);
1044 i_daddu(p, ptr, ptr, tmp);
1045 i_dmfc0(p, tmp, C0_BADVADDR);
1046 i_ld(p, ptr, rel_lo(pgdc), ptr);
1048 i_LA_mostly(p, ptr, pgdc);
1049 i_ld(p, ptr, rel_lo(pgdc), ptr);
1052 l_vmalloc_done(l, *p);
1054 if (PGDIR_SHIFT - 3 < 32) /* get pgd offset in bytes */
1055 i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3);
1057 i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32);
1059 i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
1060 i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
1061 i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
1062 i_ld(p, ptr, 0, ptr); /* get pmd pointer */
1063 i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
1064 i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
1065 i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
1069 * BVADDR is the faulting address, PTR is scratch.
1070 * PTR will hold the pgd for vmalloc.
1073 build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r,
1074 unsigned int bvaddr, unsigned int ptr)
1076 long swpd = (long)swapper_pg_dir;
1079 long modd = (long)module_pg_dir;
1081 l_module_alloc(l, *p);
1084 * VMALLOC_START >= 0xc000000000000000UL
1085 * MODULE_START >= 0xe000000000000000UL
1087 i_SLL(p, ptr, bvaddr, 2);
1088 il_bgez(p, r, ptr, label_vmalloc);
1090 if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START)) {
1091 i_lui(p, ptr, rel_hi(MODULE_START)); /* delay slot */
1093 /* unlikely configuration */
1094 i_nop(p); /* delay slot */
1095 i_LA(p, ptr, MODULE_START);
1097 i_dsubu(p, bvaddr, bvaddr, ptr);
1099 if (in_compat_space_p(modd) && !rel_lo(modd)) {
1100 il_b(p, r, label_vmalloc_done);
1101 i_lui(p, ptr, rel_hi(modd));
1103 i_LA_mostly(p, ptr, modd);
1104 il_b(p, r, label_vmalloc_done);
1105 if (in_compat_space_p(modd))
1106 i_addiu(p, ptr, ptr, rel_lo(modd));
1108 i_daddiu(p, ptr, ptr, rel_lo(modd));
1112 if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START) &&
1113 MODULE_START << 32 == VMALLOC_START)
1114 i_dsll32(p, ptr, ptr, 0); /* typical case */
1116 i_LA(p, ptr, VMALLOC_START);
1119 i_LA(p, ptr, VMALLOC_START);
1121 i_dsubu(p, bvaddr, bvaddr, ptr);
1123 if (in_compat_space_p(swpd) && !rel_lo(swpd)) {
1124 il_b(p, r, label_vmalloc_done);
1125 i_lui(p, ptr, rel_hi(swpd));
1127 i_LA_mostly(p, ptr, swpd);
1128 il_b(p, r, label_vmalloc_done);
1129 if (in_compat_space_p(swpd))
1130 i_addiu(p, ptr, ptr, rel_lo(swpd));
1132 i_daddiu(p, ptr, ptr, rel_lo(swpd));
1136 #else /* !CONFIG_64BIT */
1139 * TMP and PTR are scratch.
1140 * TMP will be clobbered, PTR will hold the pgd entry.
1142 static void __init __maybe_unused
1143 build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
1145 long pgdc = (long)pgd_current;
1147 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
1149 #ifdef CONFIG_MIPS_MT_SMTC
1151 * SMTC uses TCBind value as "CPU" index
1153 i_mfc0(p, ptr, C0_TCBIND);
1154 i_LA_mostly(p, tmp, pgdc);
1155 i_srl(p, ptr, ptr, 19);
1158 * smp_processor_id() << 3 is stored in CONTEXT.
1160 i_mfc0(p, ptr, C0_CONTEXT);
1161 i_LA_mostly(p, tmp, pgdc);
1162 i_srl(p, ptr, ptr, 23);
1164 i_addu(p, ptr, tmp, ptr);
1166 i_LA_mostly(p, ptr, pgdc);
1168 i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
1169 i_lw(p, ptr, rel_lo(pgdc), ptr);
1170 i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
1171 i_sll(p, tmp, tmp, PGD_T_LOG2);
1172 i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
1175 #endif /* !CONFIG_64BIT */
1177 static void __init build_adjust_context(u32 **p, unsigned int ctx)
1179 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
1180 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
1182 switch (current_cpu_type()) {
1199 i_SRL(p, ctx, ctx, shift);
1200 i_andi(p, ctx, ctx, mask);
1203 static void __init build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
1206 * Bug workaround for the Nevada. It seems as if under certain
1207 * circumstances the move from cp0_context might produce a
1208 * bogus result when the mfc0 instruction and its consumer are
1209 * in a different cacheline or a load instruction, probably any
1210 * memory reference, is between them.
1212 switch (current_cpu_type()) {
1214 i_LW(p, ptr, 0, ptr);
1215 GET_CONTEXT(p, tmp); /* get context reg */
1219 GET_CONTEXT(p, tmp); /* get context reg */
1220 i_LW(p, ptr, 0, ptr);
1224 build_adjust_context(p, tmp);
1225 i_ADDU(p, ptr, ptr, tmp); /* add in offset */
1228 static void __init build_update_entries(u32 **p, unsigned int tmp,
1232 * 64bit address support (36bit on a 32bit CPU) in a 32bit
1233 * Kernel is a special case. Only a few CPUs use it.
1235 #ifdef CONFIG_64BIT_PHYS_ADDR
1236 if (cpu_has_64bits) {
1237 i_ld(p, tmp, 0, ptep); /* get even pte */
1238 i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
1239 i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */
1240 i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
1241 i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */
1242 i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
1244 int pte_off_even = sizeof(pte_t) / 2;
1245 int pte_off_odd = pte_off_even + sizeof(pte_t);
1247 /* The pte entries are pre-shifted */
1248 i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
1249 i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
1250 i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
1251 i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
1254 i_LW(p, tmp, 0, ptep); /* get even pte */
1255 i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
1256 if (r45k_bvahwbug())
1257 build_tlb_probe_entry(p);
1258 i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */
1259 if (r4k_250MHZhwbug())
1260 i_mtc0(p, 0, C0_ENTRYLO0);
1261 i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
1262 i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */
1263 if (r45k_bvahwbug())
1264 i_mfc0(p, tmp, C0_INDEX);
1265 if (r4k_250MHZhwbug())
1266 i_mtc0(p, 0, C0_ENTRYLO1);
1267 i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
1271 static void __init build_r4000_tlb_refill_handler(void)
1273 u32 *p = tlb_handler;
1274 struct label *l = labels;
1275 struct reloc *r = relocs;
1277 unsigned int final_len;
1279 memset(tlb_handler, 0, sizeof(tlb_handler));
1280 memset(labels, 0, sizeof(labels));
1281 memset(relocs, 0, sizeof(relocs));
1282 memset(final_handler, 0, sizeof(final_handler));
1285 * create the plain linear handler
1287 if (bcm1250_m3_war()) {
1288 i_MFC0(&p, K0, C0_BADVADDR);
1289 i_MFC0(&p, K1, C0_ENTRYHI);
1290 i_xor(&p, K0, K0, K1);
1291 i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
1292 il_bnez(&p, &r, K0, label_leave);
1293 /* No need for i_nop */
1297 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
1299 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
1302 build_get_ptep(&p, K0, K1);
1303 build_update_entries(&p, K0, K1);
1304 build_tlb_write_entry(&p, &l, &r, tlb_random);
1306 i_eret(&p); /* return from trap */
1309 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1);
1313 * Overflow check: For the 64bit handler, we need at least one
1314 * free instruction slot for the wrap-around branch. In worst
1315 * case, if the intended insertion point is a delay slot, we
1316 * need three, with the second nop'ed and the third being
1319 /* Loongson2 ebase is different than r4k, we have more space */
1320 #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
1321 if ((p - tlb_handler) > 64)
1322 panic("TLB refill handler space exceeded");
1324 if (((p - tlb_handler) > 63)
1325 || (((p - tlb_handler) > 61)
1326 && insn_has_bdelay(relocs, tlb_handler + 29)))
1327 panic("TLB refill handler space exceeded");
1331 * Now fold the handler in the TLB refill handler space.
1333 #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
1335 /* Simplest case, just copy the handler. */
1336 copy_handler(relocs, labels, tlb_handler, p, f);
1337 final_len = p - tlb_handler;
1338 #else /* CONFIG_64BIT */
1339 f = final_handler + 32;
1340 if ((p - tlb_handler) <= 32) {
1341 /* Just copy the handler. */
1342 copy_handler(relocs, labels, tlb_handler, p, f);
1343 final_len = p - tlb_handler;
1345 u32 *split = tlb_handler + 30;
1348 * Find the split point.
1350 if (insn_has_bdelay(relocs, split - 1))
1353 /* Copy first part of the handler. */
1354 copy_handler(relocs, labels, tlb_handler, split, f);
1355 f += split - tlb_handler;
1357 /* Insert branch. */
1358 l_split(&l, final_handler);
1359 il_b(&f, &r, label_split);
1360 if (insn_has_bdelay(relocs, split))
1363 copy_handler(relocs, labels, split, split + 1, f);
1364 move_labels(labels, f, f + 1, -1);
1369 /* Copy the rest of the handler. */
1370 copy_handler(relocs, labels, split, p, final_handler);
1371 final_len = (f - (final_handler + 32)) + (p - split);
1373 #endif /* CONFIG_64BIT */
1375 resolve_relocs(relocs, labels);
1376 pr_info("Synthesized TLB refill handler (%u instructions).\n",
1379 memcpy((void *)ebase, final_handler, 0x100);
1381 dump_handler((u32 *)ebase, 64);
1385 * TLB load/store/modify handlers.
1387 * Only the fastpath gets synthesized at runtime, the slowpath for
1388 * do_page_fault remains normal asm.
1390 extern void tlb_do_page_fault_0(void);
1391 extern void tlb_do_page_fault_1(void);
1394 * 128 instructions for the fastpath handler is generous and should
1395 * never be exceeded.
1397 #define FASTPATH_SIZE 128
1399 u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
1400 u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
1401 u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
1404 iPTE_LW(u32 **p, struct label **l, unsigned int pte, unsigned int ptr)
1407 # ifdef CONFIG_64BIT_PHYS_ADDR
1409 i_lld(p, pte, 0, ptr);
1412 i_LL(p, pte, 0, ptr);
1414 # ifdef CONFIG_64BIT_PHYS_ADDR
1416 i_ld(p, pte, 0, ptr);
1419 i_LW(p, pte, 0, ptr);
1424 iPTE_SW(u32 **p, struct reloc **r, unsigned int pte, unsigned int ptr,
1427 #ifdef CONFIG_64BIT_PHYS_ADDR
1428 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
1431 i_ori(p, pte, pte, mode);
1433 # ifdef CONFIG_64BIT_PHYS_ADDR
1435 i_scd(p, pte, 0, ptr);
1438 i_SC(p, pte, 0, ptr);
1440 if (r10000_llsc_war())
1441 il_beqzl(p, r, pte, label_smp_pgtable_change);
1443 il_beqz(p, r, pte, label_smp_pgtable_change);
1445 # ifdef CONFIG_64BIT_PHYS_ADDR
1446 if (!cpu_has_64bits) {
1447 /* no i_nop needed */
1448 i_ll(p, pte, sizeof(pte_t) / 2, ptr);
1449 i_ori(p, pte, pte, hwmode);
1450 i_sc(p, pte, sizeof(pte_t) / 2, ptr);
1451 il_beqz(p, r, pte, label_smp_pgtable_change);
1452 /* no i_nop needed */
1453 i_lw(p, pte, 0, ptr);
1460 # ifdef CONFIG_64BIT_PHYS_ADDR
1462 i_sd(p, pte, 0, ptr);
1465 i_SW(p, pte, 0, ptr);
1467 # ifdef CONFIG_64BIT_PHYS_ADDR
1468 if (!cpu_has_64bits) {
1469 i_lw(p, pte, sizeof(pte_t) / 2, ptr);
1470 i_ori(p, pte, pte, hwmode);
1471 i_sw(p, pte, sizeof(pte_t) / 2, ptr);
1472 i_lw(p, pte, 0, ptr);
1479 * Check if PTE is present, if not then jump to LABEL. PTR points to
1480 * the page table where this PTE is located, PTE will be re-loaded
1481 * with it's original value.
1484 build_pte_present(u32 **p, struct label **l, struct reloc **r,
1485 unsigned int pte, unsigned int ptr, enum label_id lid)
1487 i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
1488 i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
1489 il_bnez(p, r, pte, lid);
1490 iPTE_LW(p, l, pte, ptr);
1493 /* Make PTE valid, store result in PTR. */
1495 build_make_valid(u32 **p, struct reloc **r, unsigned int pte,
1498 unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
1500 iPTE_SW(p, r, pte, ptr, mode);
1504 * Check if PTE can be written to, if not branch to LABEL. Regardless
1505 * restore PTE with value from PTR when done.
1508 build_pte_writable(u32 **p, struct label **l, struct reloc **r,
1509 unsigned int pte, unsigned int ptr, enum label_id lid)
1511 i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
1512 i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
1513 il_bnez(p, r, pte, lid);
1514 iPTE_LW(p, l, pte, ptr);
1517 /* Make PTE writable, update software status bits as well, then store
1521 build_make_write(u32 **p, struct reloc **r, unsigned int pte,
1524 unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
1527 iPTE_SW(p, r, pte, ptr, mode);
1531 * Check if PTE can be modified, if not branch to LABEL. Regardless
1532 * restore PTE with value from PTR when done.
1535 build_pte_modifiable(u32 **p, struct label **l, struct reloc **r,
1536 unsigned int pte, unsigned int ptr, enum label_id lid)
1538 i_andi(p, pte, pte, _PAGE_WRITE);
1539 il_beqz(p, r, pte, lid);
1540 iPTE_LW(p, l, pte, ptr);
1544 * R3000 style TLB load/store/modify handlers.
1548 * This places the pte into ENTRYLO0 and writes it with tlbwi.
1552 build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
1554 i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1555 i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
1558 i_rfe(p); /* branch delay */
1562 * This places the pte into ENTRYLO0 and writes it with tlbwi
1563 * or tlbwr as appropriate. This is because the index register
1564 * may have the probe fail bit set as a result of a trap on a
1565 * kseg2 access, i.e. without refill. Then it returns.
1568 build_r3000_tlb_reload_write(u32 **p, struct label **l, struct reloc **r,
1569 unsigned int pte, unsigned int tmp)
1571 i_mfc0(p, tmp, C0_INDEX);
1572 i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1573 il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
1574 i_mfc0(p, tmp, C0_EPC); /* branch delay */
1575 i_tlbwi(p); /* cp0 delay */
1577 i_rfe(p); /* branch delay */
1578 l_r3000_write_probe_fail(l, *p);
1579 i_tlbwr(p); /* cp0 delay */
1581 i_rfe(p); /* branch delay */
1585 build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1588 long pgdc = (long)pgd_current;
1590 i_mfc0(p, pte, C0_BADVADDR);
1591 i_lui(p, ptr, rel_hi(pgdc)); /* cp0 delay */
1592 i_lw(p, ptr, rel_lo(pgdc), ptr);
1593 i_srl(p, pte, pte, 22); /* load delay */
1594 i_sll(p, pte, pte, 2);
1595 i_addu(p, ptr, ptr, pte);
1596 i_mfc0(p, pte, C0_CONTEXT);
1597 i_lw(p, ptr, 0, ptr); /* cp0 delay */
1598 i_andi(p, pte, pte, 0xffc); /* load delay */
1599 i_addu(p, ptr, ptr, pte);
1600 i_lw(p, pte, 0, ptr);
1601 i_tlbp(p); /* load delay */
1604 static void __init build_r3000_tlb_load_handler(void)
1606 u32 *p = handle_tlbl;
1607 struct label *l = labels;
1608 struct reloc *r = relocs;
1610 memset(handle_tlbl, 0, sizeof(handle_tlbl));
1611 memset(labels, 0, sizeof(labels));
1612 memset(relocs, 0, sizeof(relocs));
1614 build_r3000_tlbchange_handler_head(&p, K0, K1);
1615 build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl);
1616 i_nop(&p); /* load delay */
1617 build_make_valid(&p, &r, K0, K1);
1618 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
1620 l_nopage_tlbl(&l, p);
1621 i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1624 if ((p - handle_tlbl) > FASTPATH_SIZE)
1625 panic("TLB load handler fastpath space exceeded");
1627 resolve_relocs(relocs, labels);
1628 pr_info("Synthesized TLB load handler fastpath (%u instructions).\n",
1629 (unsigned int)(p - handle_tlbl));
1631 dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
1634 static void __init build_r3000_tlb_store_handler(void)
1636 u32 *p = handle_tlbs;
1637 struct label *l = labels;
1638 struct reloc *r = relocs;
1640 memset(handle_tlbs, 0, sizeof(handle_tlbs));
1641 memset(labels, 0, sizeof(labels));
1642 memset(relocs, 0, sizeof(relocs));
1644 build_r3000_tlbchange_handler_head(&p, K0, K1);
1645 build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs);
1646 i_nop(&p); /* load delay */
1647 build_make_write(&p, &r, K0, K1);
1648 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
1650 l_nopage_tlbs(&l, p);
1651 i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1654 if ((p - handle_tlbs) > FASTPATH_SIZE)
1655 panic("TLB store handler fastpath space exceeded");
1657 resolve_relocs(relocs, labels);
1658 pr_info("Synthesized TLB store handler fastpath (%u instructions).\n",
1659 (unsigned int)(p - handle_tlbs));
1661 dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
1664 static void __init build_r3000_tlb_modify_handler(void)
1666 u32 *p = handle_tlbm;
1667 struct label *l = labels;
1668 struct reloc *r = relocs;
1670 memset(handle_tlbm, 0, sizeof(handle_tlbm));
1671 memset(labels, 0, sizeof(labels));
1672 memset(relocs, 0, sizeof(relocs));
1674 build_r3000_tlbchange_handler_head(&p, K0, K1);
1675 build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm);
1676 i_nop(&p); /* load delay */
1677 build_make_write(&p, &r, K0, K1);
1678 build_r3000_pte_reload_tlbwi(&p, K0, K1);
1680 l_nopage_tlbm(&l, p);
1681 i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1684 if ((p - handle_tlbm) > FASTPATH_SIZE)
1685 panic("TLB modify handler fastpath space exceeded");
1687 resolve_relocs(relocs, labels);
1688 pr_info("Synthesized TLB modify handler fastpath (%u instructions).\n",
1689 (unsigned int)(p - handle_tlbm));
1691 dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
1695 * R4000 style TLB load/store/modify handlers.
1698 build_r4000_tlbchange_handler_head(u32 **p, struct label **l,
1699 struct reloc **r, unsigned int pte,
1703 build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */
1705 build_get_pgde32(p, pte, ptr); /* get pgd in ptr */
1708 i_MFC0(p, pte, C0_BADVADDR);
1709 i_LW(p, ptr, 0, ptr);
1710 i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
1711 i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
1712 i_ADDU(p, ptr, ptr, pte);
1715 l_smp_pgtable_change(l, *p);
1717 iPTE_LW(p, l, pte, ptr); /* get even pte */
1718 if (!m4kc_tlbp_war())
1719 build_tlb_probe_entry(p);
1723 build_r4000_tlbchange_handler_tail(u32 **p, struct label **l,
1724 struct reloc **r, unsigned int tmp,
1727 i_ori(p, ptr, ptr, sizeof(pte_t));
1728 i_xori(p, ptr, ptr, sizeof(pte_t));
1729 build_update_entries(p, tmp, ptr);
1730 build_tlb_write_entry(p, l, r, tlb_indexed);
1732 i_eret(p); /* return from trap */
1735 build_get_pgd_vmalloc64(p, l, r, tmp, ptr);
1739 static void __init build_r4000_tlb_load_handler(void)
1741 u32 *p = handle_tlbl;
1742 struct label *l = labels;
1743 struct reloc *r = relocs;
1745 memset(handle_tlbl, 0, sizeof(handle_tlbl));
1746 memset(labels, 0, sizeof(labels));
1747 memset(relocs, 0, sizeof(relocs));
1749 if (bcm1250_m3_war()) {
1750 i_MFC0(&p, K0, C0_BADVADDR);
1751 i_MFC0(&p, K1, C0_ENTRYHI);
1752 i_xor(&p, K0, K0, K1);
1753 i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
1754 il_bnez(&p, &r, K0, label_leave);
1755 /* No need for i_nop */
1758 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
1759 build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl);
1760 if (m4kc_tlbp_war())
1761 build_tlb_probe_entry(&p);
1762 build_make_valid(&p, &r, K0, K1);
1763 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1765 l_nopage_tlbl(&l, p);
1766 i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1769 if ((p - handle_tlbl) > FASTPATH_SIZE)
1770 panic("TLB load handler fastpath space exceeded");
1772 resolve_relocs(relocs, labels);
1773 pr_info("Synthesized TLB load handler fastpath (%u instructions).\n",
1774 (unsigned int)(p - handle_tlbl));
1776 dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
1779 static void __init build_r4000_tlb_store_handler(void)
1781 u32 *p = handle_tlbs;
1782 struct label *l = labels;
1783 struct reloc *r = relocs;
1785 memset(handle_tlbs, 0, sizeof(handle_tlbs));
1786 memset(labels, 0, sizeof(labels));
1787 memset(relocs, 0, sizeof(relocs));
1789 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
1790 build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs);
1791 if (m4kc_tlbp_war())
1792 build_tlb_probe_entry(&p);
1793 build_make_write(&p, &r, K0, K1);
1794 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1796 l_nopage_tlbs(&l, p);
1797 i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1800 if ((p - handle_tlbs) > FASTPATH_SIZE)
1801 panic("TLB store handler fastpath space exceeded");
1803 resolve_relocs(relocs, labels);
1804 pr_info("Synthesized TLB store handler fastpath (%u instructions).\n",
1805 (unsigned int)(p - handle_tlbs));
1807 dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
1810 static void __init build_r4000_tlb_modify_handler(void)
1812 u32 *p = handle_tlbm;
1813 struct label *l = labels;
1814 struct reloc *r = relocs;
1816 memset(handle_tlbm, 0, sizeof(handle_tlbm));
1817 memset(labels, 0, sizeof(labels));
1818 memset(relocs, 0, sizeof(relocs));
1820 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
1821 build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm);
1822 if (m4kc_tlbp_war())
1823 build_tlb_probe_entry(&p);
1824 /* Present and writable bits set, set accessed and dirty bits. */
1825 build_make_write(&p, &r, K0, K1);
1826 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1828 l_nopage_tlbm(&l, p);
1829 i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1832 if ((p - handle_tlbm) > FASTPATH_SIZE)
1833 panic("TLB modify handler fastpath space exceeded");
1835 resolve_relocs(relocs, labels);
1836 pr_info("Synthesized TLB modify handler fastpath (%u instructions).\n",
1837 (unsigned int)(p - handle_tlbm));
1839 dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
1842 void __init build_tlb_refill_handler(void)
1845 * The refill handler is generated per-CPU, multi-node systems
1846 * may have local storage for it. The other handlers are only
1849 static int run_once = 0;
1851 switch (current_cpu_type()) {
1859 build_r3000_tlb_refill_handler();
1861 build_r3000_tlb_load_handler();
1862 build_r3000_tlb_store_handler();
1863 build_r3000_tlb_modify_handler();
1870 panic("No R6000 TLB refill handler yet");
1874 panic("No R8000 TLB refill handler yet");
1878 build_r4000_tlb_refill_handler();
1880 build_r4000_tlb_load_handler();
1881 build_r4000_tlb_store_handler();
1882 build_r4000_tlb_modify_handler();
1888 void __init flush_tlb_handlers(void)
1890 flush_icache_range((unsigned long)handle_tlbl,
1891 (unsigned long)handle_tlbl + sizeof(handle_tlbl));
1892 flush_icache_range((unsigned long)handle_tlbs,
1893 (unsigned long)handle_tlbs + sizeof(handle_tlbs));
1894 flush_icache_range((unsigned long)handle_tlbm,
1895 (unsigned long)handle_tlbm + sizeof(handle_tlbm));