ble 1b
isync
-#elif defined(CONFIG_FSL_BOOKE)
- /* Invalidate all entries in TLB0 */
- li r3, 0x04
- tlbivax 0,3
- /* Invalidate all entries in TLB1 */
- li r3, 0x0c
- tlbivax 0,3
- /* Invalidate all entries in TLB2 */
- li r3, 0x14
- tlbivax 0,3
- /* Invalidate all entries in TLB3 */
- li r3, 0x1c
- tlbivax 0,3
- msync
-#ifdef CONFIG_SMP
- tlbsync
-#endif /* CONFIG_SMP */
-#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
+#else /* !(CONFIG_40x || CONFIG_44x) */
#if defined(CONFIG_SMP)
rlwinm r8,r1,0,0,18
lwz r8,TI_CPU(r8)
*/
_GLOBAL(_tlbie)
#if defined(CONFIG_40x)
+ /* We run the search with interrupts disabled because we have to change
+ * the PID and I don't want to preempt when that happens.
+ */
+ mfmsr r5
+ mfspr r6,SPRN_PID
+ wrteei 0
+ mtspr SPRN_PID,r4
tlbsx. r3, 0, r3
+ mtspr SPRN_PID,r6
+ wrtee r5
bne 10f
sync
/* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
isync
10:
#elif defined(CONFIG_44x)
- mfspr r4,SPRN_MMUCR
- mfspr r5,SPRN_PID /* Get PID */
- rlwimi r4,r5,0,24,31 /* Set TID */
+ mfspr r5,SPRN_MMUCR
+ rlwimi r5,r4,0,24,31 /* Set TID */
/* We have to run the search with interrupts disabled, even critical
* and debug interrupts (in fact the only critical exceptions we have
* are debug and machine check). Otherwise an interrupt which causes
* a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
- mfmsr r5
+ mfmsr r4
lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
- andc r6,r5,r6
+ andc r6,r4,r6
mtmsr r6
- mtspr SPRN_MMUCR,r4
+ mtspr SPRN_MMUCR,r5
tlbsx. r3, 0, r3
- mtmsr r5
+ mtmsr r4
bne 10f
sync
/* There are only 64 TLB entries, so r3 < 64,
tlbwe r3, r3, PPC44x_TLB_PAGEID
isync
10:
-#elif defined(CONFIG_FSL_BOOKE)
- rlwinm r4, r3, 0, 0, 19
- ori r5, r4, 0x08 /* TLBSEL = 1 */
- ori r6, r4, 0x10 /* TLBSEL = 2 */
- ori r7, r4, 0x18 /* TLBSEL = 3 */
- tlbivax 0, r4
- tlbivax 0, r5
- tlbivax 0, r6
- tlbivax 0, r7
- msync
-#if defined(CONFIG_SMP)
- tlbsync
-#endif /* CONFIG_SMP */
-#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
+#else /* !(CONFIG_40x || CONFIG_44x) */
#if defined(CONFIG_SMP)
rlwinm r8,r1,0,0,18
lwz r8,TI_CPU(r8)
lis r3, KERNELBASE@h
iccci 0,r3
#endif
-#elif CONFIG_FSL_BOOKE
-BEGIN_FTR_SECTION
- mfspr r3,SPRN_L1CSR0
- ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
- /* msync; isync recommended here */
- mtspr SPRN_L1CSR0,r3
- isync
- blr
-END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
- mfspr r3,SPRN_L1CSR1
- ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
- mtspr SPRN_L1CSR1,r3
#else
mfspr r3,SPRN_PVR
rlwinm r3,r3,16,16,31
addi r3,r3,L1_CACHE_BYTES
bdnz 0b
sync
+#ifndef CONFIG_44x
+ /* We don't flush the icache on 44x. Those have a virtual icache
+ * and we don't have access to the virtual address here (it's
+ * not the page vaddr but where it's mapped in user space). The
+ * flushing of the icache on these is handled elsewhere, when
+ * a change in the address space occurs, before returning to
+ * user space
+ */
mtctr r4
1: icbi 0,r6
addi r6,r6,L1_CACHE_BYTES
bdnz 1b
sync
isync
+#endif /* CONFIG_44x */
blr
/*