]> err.no Git - linux-2.6/commitdiff
[POWERPC] Add 1TB workaround for PA6T
authorOlof Johansson <olof@lixom.net>
Mon, 15 Oct 2007 14:58:59 +0000 (00:58 +1000)
committerPaul Mackerras <paulus@samba.org>
Wed, 17 Oct 2007 12:30:09 +0000 (22:30 +1000)
PA6T has a bug where the slbie instruction does not honor the large
segment bit.  As a result, we have to always use slbia when switching
context.

We don't have to worry about changing the slbie's during fault processing,
since they should never be replacing one VSID with another using the
same ESID.  I.e. there's no risk for inserting duplicate entries due to a
failed slbie of the old entry.  So as long as we clear it out on context
switch we should be fine.

Signed-off-by: Olof Johansson <olof@lixom.net>
Signed-off-by: Paul Mackerras <paulus@samba.org>
arch/powerpc/kernel/entry_64.S
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/slb.c
include/asm-powerpc/cputable.h

index 0ec13403489906a5a8b2309facf0ad8b00f414d0..148a3547c9aabbb04dc29a8f74a8a0cfd46bdf06 100644 (file)
@@ -408,6 +408,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
        std     r7,SLBSHADOW_STACKVSID(r9)  /* Save VSID */
        std     r0,SLBSHADOW_STACKESID(r9)  /* Save ESID */
 
+       /* No need to check for CPU_FTR_NO_SLBIE_B here, since when
+        * we have 1TB segments, the only CPUs known to have the errata
+        * only support less than 1TB of system memory and we'll never
+        * actually hit this code path.
+        */
+
        slbie   r6
        slbie   r6              /* Workaround POWER5 < DD2.1 issue */
        slbmte  r7,r0
index 09da90b5385045dac3b9348a507aea13b94120c7..c78dc912411f9f6aa57c951023a733a7370b81c7 100644 (file)
@@ -212,6 +212,7 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,
                        return 1;
                }
        }
+       cur_cpu_spec->cpu_features &= ~CPU_FTR_NO_SLBIE_B;
        return 0;
 }
 
index 6c164cec9d2c6018865783f6d8c5fd3dc70f8cbe..bbd2c512ee0530ded1aa374242b06cce5b4f319b 100644 (file)
@@ -157,7 +157,8 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
        unsigned long stack = KSTK_ESP(tsk);
        unsigned long unmapped_base;
 
-       if (offset <= SLB_CACHE_ENTRIES) {
+       if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) &&
+           offset <= SLB_CACHE_ENTRIES) {
                int i;
                asm volatile("isync" : : : "memory");
                for (i = 0; i < offset; i++) {
index ae093ef68363a699d2cfafde633801c0e2513ace..9d74338e3dec05bcd4ce93090fe365ba8279f1e1 100644 (file)
@@ -165,6 +165,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
 #define CPU_FTR_SPURR                  LONG_ASM_CONST(0x0001000000000000)
 #define CPU_FTR_DSCR                   LONG_ASM_CONST(0x0002000000000000)
 #define CPU_FTR_1T_SEGMENT             LONG_ASM_CONST(0x0004000000000000)
+#define CPU_FTR_NO_SLBIE_B             LONG_ASM_CONST(0x0008000000000000)
 
 #ifndef __ASSEMBLY__
 
@@ -367,7 +368,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
 #define CPU_FTRS_PA6T (CPU_FTR_USE_TB | \
            CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
            CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \
-           CPU_FTR_PURR | CPU_FTR_REAL_LE)
+           CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B)
 #define CPU_FTRS_COMPATIBLE    (CPU_FTR_USE_TB | \
            CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2)