+ .size __tsb_context_switch, .-__tsb_context_switch
+
+#define TSB_PASS_BITS ((1 << TSB_TAG_LOCK_BIT) | \
+ (1 << TSB_TAG_INVALID_BIT))
+
+ .align 32
+ .globl copy_tsb
+ .type copy_tsb,#function
+copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
+ * %o2=new_tsb_base, %o3=new_tsb_size
+ */
+ sethi %uhi(TSB_PASS_BITS), %g7
+ srlx %o3, 4, %o3
+ add %o0, %o1, %g1 /* end of old tsb */
+ sllx %g7, 32, %g7
+ sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
+
+661: prefetcha [%o0] ASI_N, #one_read
+ .section .tsb_phys_patch, "ax"
+ .word 661b
+ prefetcha [%o0] ASI_PHYS_USE_EC, #one_read
+ .previous
+
+90: andcc %o0, (64 - 1), %g0
+ bne 1f
+ add %o0, 64, %o5
+
+661: prefetcha [%o5] ASI_N, #one_read
+ .section .tsb_phys_patch, "ax"
+ .word 661b
+ prefetcha [%o5] ASI_PHYS_USE_EC, #one_read
+ .previous
+
+1: TSB_LOAD_QUAD(%o0, %g2) /* %g2/%g3 == TSB entry */
+ andcc %g2, %g7, %g0 /* LOCK or INVALID set? */
+ bne,pn %xcc, 80f /* Skip it */
+ sllx %g2, 22, %o4 /* TAG --> VADDR */
+
+ /* This can definitely be computed faster... */
+ srlx %o0, 4, %o5 /* Build index */
+ and %o5, 511, %o5 /* Mask index */
+ sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
+ or %o4, %o5, %o4 /* Full VADDR. */
+ srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */
+ and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
+ sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
+ TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
+ add %o4, 0x8, %o4 /* Advance to TTE */
+ TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
+
+80: add %o0, 16, %o0
+ cmp %o0, %g1
+ bne,pt %xcc, 90b
+ nop
+
+ retl
+ TSB_MEMBAR
+ .size copy_tsb, .-copy_tsb
+
+ /* Set the invalid bit in all TSB entries. */
+ .align 32
+ .globl tsb_init
+ .type tsb_init,#function
+tsb_init: /* %o0 = TSB vaddr, %o1 = size in bytes */
+ prefetch [%o0 + 0x000], #n_writes
+ mov 1, %g1
+ prefetch [%o0 + 0x040], #n_writes
+ sllx %g1, TSB_TAG_INVALID_BIT, %g1
+ prefetch [%o0 + 0x080], #n_writes
+1: prefetch [%o0 + 0x0c0], #n_writes
+ stx %g1, [%o0 + 0x00]
+ stx %g1, [%o0 + 0x10]
+ stx %g1, [%o0 + 0x20]
+ stx %g1, [%o0 + 0x30]
+ prefetch [%o0 + 0x100], #n_writes
+ stx %g1, [%o0 + 0x40]
+ stx %g1, [%o0 + 0x50]
+ stx %g1, [%o0 + 0x60]
+ stx %g1, [%o0 + 0x70]
+ prefetch [%o0 + 0x140], #n_writes
+ stx %g1, [%o0 + 0x80]
+ stx %g1, [%o0 + 0x90]
+ stx %g1, [%o0 + 0xa0]
+ stx %g1, [%o0 + 0xb0]
+ prefetch [%o0 + 0x180], #n_writes
+ stx %g1, [%o0 + 0xc0]
+ stx %g1, [%o0 + 0xd0]
+ stx %g1, [%o0 + 0xe0]
+ stx %g1, [%o0 + 0xf0]
+ subcc %o1, 0x100, %o1
+ bne,pt %xcc, 1b
+ add %o0, 0x100, %o0
+ retl
+ nop
+ nop
+ nop
+ .size tsb_init, .-tsb_init
+
+ .globl NGtsb_init
+ .type NGtsb_init,#function
+NGtsb_init:
+ rd %asi, %g2
+ mov 1, %g1
+ wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
+ sllx %g1, TSB_TAG_INVALID_BIT, %g1
+1: stxa %g1, [%o0 + 0x00] %asi
+ stxa %g1, [%o0 + 0x10] %asi
+ stxa %g1, [%o0 + 0x20] %asi
+ stxa %g1, [%o0 + 0x30] %asi
+ stxa %g1, [%o0 + 0x40] %asi
+ stxa %g1, [%o0 + 0x50] %asi
+ stxa %g1, [%o0 + 0x60] %asi
+ stxa %g1, [%o0 + 0x70] %asi
+ stxa %g1, [%o0 + 0x80] %asi
+ stxa %g1, [%o0 + 0x90] %asi
+ stxa %g1, [%o0 + 0xa0] %asi
+ stxa %g1, [%o0 + 0xb0] %asi
+ stxa %g1, [%o0 + 0xc0] %asi
+ stxa %g1, [%o0 + 0xd0] %asi
+ stxa %g1, [%o0 + 0xe0] %asi
+ stxa %g1, [%o0 + 0xf0] %asi
+ subcc %o1, 0x100, %o1
+ bne,pt %xcc, 1b
+ add %o0, 0x100, %o0
+ membar #Sync
+ retl
+ wr %g2, 0x0, %asi
+ .size NGtsb_init, .-NGtsb_init