]> err.no Git - linux-2.6/commitdiff
[POWERPC] Consolidate feature fixup code
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>
Tue, 24 Oct 2006 06:42:40 +0000 (16:42 +1000)
committerPaul Mackerras <paulus@samba.org>
Wed, 25 Oct 2006 01:42:10 +0000 (11:42 +1000)
There are currently two versions of the functions for applying the
feature fixups, one for CPU features and one for firmware features. In
addition, they are both in assembly and with separate implementations
for 32 and 64 bits. identify_cpu() is also implemented in assembly and
separately for 32 and 64 bits.

This patch replaces them with a pair of C functions. The call sites are
slightly moved on ppc64 as well to be called from C instead of from
assembly, though it's a very small change, and thus shouldn't cause any
problem.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: Olof Johansson <olof@lixom.net>
Signed-off-by: Paul Mackerras <paulus@samba.org>
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/head_64.S
arch/powerpc/kernel/misc_32.S
arch/powerpc/kernel/misc_64.S
arch/powerpc/kernel/setup_32.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/platforms/iseries/setup.c
arch/ppc/kernel/misc.S
arch/ppc/kernel/setup.c
include/asm-powerpc/cputable.h
include/asm-powerpc/firmware.h

index f23aad66a79eec48f1478422830b22421dd54719..6fdfaa4a82b8bc20f588190e9185071c3c14e495 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <asm/oprofile_impl.h>
 #include <asm/cputable.h>
+#include <asm/prom.h>          /* for PTRRELOC on ARCH=ppc */
 
 struct cpu_spec* cur_cpu_spec = NULL;
 EXPORT_SYMBOL(cur_cpu_spec);
@@ -73,7 +74,7 @@ extern void __restore_cpu_ppc970(void);
 #define PPC_FEATURE_SPE_COMP   0
 #endif
 
-struct cpu_spec        cpu_specs[] = {
+static struct cpu_spec cpu_specs[] = {
 #ifdef CONFIG_PPC64
        {       /* Power3 */
                .pvr_mask               = 0xffff0000,
@@ -1167,3 +1168,72 @@ struct cpu_spec  cpu_specs[] = {
 #endif /* !CLASSIC_PPC */
 #endif /* CONFIG_PPC32 */
 };
+
+struct cpu_spec *identify_cpu(unsigned long offset)
+{
+       struct cpu_spec *s = cpu_specs;
+       struct cpu_spec **cur = &cur_cpu_spec;
+       unsigned int pvr = mfspr(SPRN_PVR);
+       int i;
+
+       s = PTRRELOC(s);
+       cur = PTRRELOC(cur);
+
+       if (*cur != NULL)
+               return PTRRELOC(*cur);
+
+       for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++)
+               if ((pvr & s->pvr_mask) == s->pvr_value) {
+                       *cur = cpu_specs + i;
+#ifdef CONFIG_PPC64
+                       /* ppc64 expects identify_cpu to also call setup_cpu
+                        * for that processor. I will consolidate that at a
+                        * later time, for now, just use our friend #ifdef.
+                        * we also don't need to PTRRELOC the function pointer
+                        * on ppc64 as we are running at 0 in real mode.
+                        */
+                       if (s->cpu_setup) {
+                               s->cpu_setup(offset, s);
+                       }
+#endif /* CONFIG_PPC64 */
+                       return s;
+               }
+       BUG();
+       return NULL;
+}
+
+void do_feature_fixups(unsigned long offset, unsigned long value,
+                      void *fixup_start, void *fixup_end)
+{
+       struct fixup_entry {
+               unsigned long   mask;
+               unsigned long   value;
+               unsigned int    *start;
+               unsigned int    *end;
+       } *fcur, *fend;
+
+       fcur = fixup_start;
+       fend = fixup_end;
+
+       for (; fcur < fend; fcur++) {
+               unsigned int *pstart, *pend, *p;
+
+               if ((value & fcur->mask) == fcur->value)
+                       continue;
+
+               /* These PTRRELOCs will disappear once the new scheme for
+                * modules and vdso is implemented
+                */
+               pstart = PTRRELOC(fcur->start);
+               pend = PTRRELOC(fcur->end);
+
+               for (p = pstart; p < pend; p++) {
+                       *p = 0x60000000u;
+                       asm volatile ("dcbst 0, %0" : : "r" (p));
+               }
+               asm volatile ("sync" : : : "memory");
+               for (p = pstart; p < pend; p++)
+                       asm volatile ("icbi 0,%0" : : "r" (p));
+               asm volatile ("sync; isync" : : : "memory");
+       }
+}
index 645c7f10fb28300b782334e4c264602d80718877..f12e3c55520d742ff050e1ff43a9638d6459e193 100644 (file)
@@ -1580,11 +1580,6 @@ _STATIC(__start_initialization_iSeries)
        li      r0,0
        stdu    r0,-STACK_FRAME_OVERHEAD(r1)
 
-       LOAD_REG_IMMEDIATE(r3,cpu_specs)
-       LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
-       li      r5,0
-       bl      .identify_cpu
-
        LOAD_REG_IMMEDIATE(r2,__toc_start)
        addi    r2,r2,0x4000
        addi    r2,r2,0x4000
@@ -1964,13 +1959,6 @@ _STATIC(start_here_multiplatform)
        addi    r2,r2,0x4000
        add     r2,r2,r26
 
-       LOAD_REG_IMMEDIATE(r3, cpu_specs)
-       add     r3,r3,r26
-       LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
-       add     r4,r4,r26
-       mr      r5,r26
-       bl      .identify_cpu
-
        /* Do very early kernel initializations, including initial hash table,
         * stab and slb setup before we turn on relocation.     */
 
@@ -2000,13 +1988,6 @@ _STATIC(start_here_common)
        li      r0,0
        stdu    r0,-STACK_FRAME_OVERHEAD(r1)
 
-       /* Apply the CPUs-specific fixups (nop out sections not relevant
-        * to this CPU
-        */
-       li      r3,0
-       bl      .do_cpu_ftr_fixups
-       bl      .do_fw_ftr_fixups
-
        /* ptr to current */
        LOAD_REG_IMMEDIATE(r4, init_task)
        std     r4,PACACURRENT(r13)
index 88fd73fdf048aea647974a694f033653e93cd4f8..412bea3cf813279bd64d8820ed12e593327fa457 100644 (file)
@@ -101,80 +101,6 @@ _GLOBAL(reloc_got2)
        mtlr    r11
        blr
 
-/*
- * identify_cpu,
- * called with r3 = data offset and r4 = CPU number
- * doesn't change r3
- */
-_GLOBAL(identify_cpu)
-       addis   r8,r3,cpu_specs@ha
-       addi    r8,r8,cpu_specs@l
-       mfpvr   r7
-1:
-       lwz     r5,CPU_SPEC_PVR_MASK(r8)
-       and     r5,r5,r7
-       lwz     r6,CPU_SPEC_PVR_VALUE(r8)
-       cmplw   0,r6,r5
-       beq     1f
-       addi    r8,r8,CPU_SPEC_ENTRY_SIZE
-       b       1b
-1:
-       addis   r6,r3,cur_cpu_spec@ha
-       addi    r6,r6,cur_cpu_spec@l
-       sub     r8,r8,r3
-       stw     r8,0(r6)
-       blr
-
-/*
- * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
- * and writes nop's over sections of code that don't apply for this cpu.
- * r3 = data offset (not changed)
- */
-_GLOBAL(do_cpu_ftr_fixups)
-       /* Get CPU 0 features */
-       addis   r6,r3,cur_cpu_spec@ha
-       addi    r6,r6,cur_cpu_spec@l
-       lwz     r4,0(r6)
-       add     r4,r4,r3
-       lwz     r4,CPU_SPEC_FEATURES(r4)
-
-       /* Get the fixup table */
-       addis   r6,r3,__start___ftr_fixup@ha
-       addi    r6,r6,__start___ftr_fixup@l
-       addis   r7,r3,__stop___ftr_fixup@ha
-       addi    r7,r7,__stop___ftr_fixup@l
-
-       /* Do the fixup */
-1:     cmplw   0,r6,r7
-       bgelr
-       addi    r6,r6,16
-       lwz     r8,-16(r6)      /* mask */
-       and     r8,r8,r4
-       lwz     r9,-12(r6)      /* value */
-       cmplw   0,r8,r9
-       beq     1b
-       lwz     r8,-8(r6)       /* section begin */
-       lwz     r9,-4(r6)       /* section end */
-       subf.   r9,r8,r9
-       beq     1b
-       /* write nops over the section of code */
-       /* todo: if large section, add a branch at the start of it */
-       srwi    r9,r9,2
-       mtctr   r9
-       add     r8,r8,r3
-       lis     r0,0x60000000@h /* nop */
-3:     stw     r0,0(r8)
-       andi.   r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
-       beq     2f
-       dcbst   0,r8            /* suboptimal, but simpler */
-       sync
-       icbi    0,r8
-2:     addi    r8,r8,4
-       bdnz    3b
-       sync                    /* additional sync needed on g4 */
-       isync
-       b       1b
-
 /*
  * call_setup_cpu - call the setup_cpu function for this cpu
  * r3 = data offset, r24 = cpu number
index c70e20708a1f625347139f9a7caec7410544266f..21fd2c662a9913cc9ef8f3d0efe421c4907b8929 100644 (file)
@@ -246,130 +246,6 @@ _GLOBAL(__flush_dcache_icache)
        isync
        blr
 
-/*
- * identify_cpu and calls setup_cpu
- * In: r3 = base of the cpu_specs array
- *     r4 = address of cur_cpu_spec
- *     r5 = relocation offset
- */
-_GLOBAL(identify_cpu)
-       mfpvr   r7
-1:
-       lwz     r8,CPU_SPEC_PVR_MASK(r3)
-       and     r8,r8,r7
-       lwz     r9,CPU_SPEC_PVR_VALUE(r3)
-       cmplw   0,r9,r8
-       beq     1f
-       addi    r3,r3,CPU_SPEC_ENTRY_SIZE
-       b       1b
-1:
-       sub     r0,r3,r5
-       std     r0,0(r4)
-       ld      r4,CPU_SPEC_SETUP(r3)
-       cmpdi   0,r4,0
-       add     r4,r4,r5
-       beqlr
-       ld      r4,0(r4)
-       add     r4,r4,r5
-       mtctr   r4
-       /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
-       mr      r4,r3
-       mr      r3,r5
-       bctr
-
-/*
- * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
- * and writes nop's over sections of code that don't apply for this cpu.
- * r3 = data offset (not changed)
- */
-_GLOBAL(do_cpu_ftr_fixups)
-       /* Get CPU 0 features */
-       LOAD_REG_IMMEDIATE(r6,cur_cpu_spec)
-       sub     r6,r6,r3
-       ld      r4,0(r6)
-       sub     r4,r4,r3
-       ld      r4,CPU_SPEC_FEATURES(r4)
-       /* Get the fixup table */
-       LOAD_REG_IMMEDIATE(r6,__start___ftr_fixup)
-       sub     r6,r6,r3
-       LOAD_REG_IMMEDIATE(r7,__stop___ftr_fixup)
-       sub     r7,r7,r3
-       /* Do the fixup */
-1:     cmpld   r6,r7
-       bgelr
-       addi    r6,r6,32
-       ld      r8,-32(r6)      /* mask */
-       and     r8,r8,r4
-       ld      r9,-24(r6)      /* value */
-       cmpld   r8,r9
-       beq     1b
-       ld      r8,-16(r6)      /* section begin */
-       ld      r9,-8(r6)       /* section end */
-       subf.   r9,r8,r9
-       beq     1b
-       /* write nops over the section of code */
-       /* todo: if large section, add a branch at the start of it */
-       srwi    r9,r9,2
-       mtctr   r9
-       sub     r8,r8,r3
-       lis     r0,0x60000000@h /* nop */
-3:     stw     r0,0(r8)
-       andi.   r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
-       beq     2f
-       dcbst   0,r8            /* suboptimal, but simpler */
-       sync
-       icbi    0,r8
-2:     addi    r8,r8,4
-       bdnz    3b
-       sync                    /* additional sync needed on g4 */
-       isync
-       b       1b
-
-/*
- * do_fw_ftr_fixups - goes through the list of firmware feature fixups
- * and writes nop's over sections of code that don't apply for this firmware.
- * r3 = data offset (not changed)
- */
-_GLOBAL(do_fw_ftr_fixups)
-       /* Get firmware features */
-       LOAD_REG_IMMEDIATE(r6,powerpc_firmware_features)
-       sub     r6,r6,r3
-       ld      r4,0(r6)
-       /* Get the fixup table */
-       LOAD_REG_IMMEDIATE(r6,__start___fw_ftr_fixup)
-       sub     r6,r6,r3
-       LOAD_REG_IMMEDIATE(r7,__stop___fw_ftr_fixup)
-       sub     r7,r7,r3
-       /* Do the fixup */
-1:     cmpld   r6,r7
-       bgelr
-       addi    r6,r6,32
-       ld      r8,-32(r6)      /* mask */
-       and     r8,r8,r4
-       ld      r9,-24(r6)      /* value */
-       cmpld   r8,r9
-       beq     1b
-       ld      r8,-16(r6)      /* section begin */
-       ld      r9,-8(r6)       /* section end */
-       subf.   r9,r8,r9
-       beq     1b
-       /* write nops over the section of code */
-       /* todo: if large section, add a branch at the start of it */
-       srwi    r9,r9,2
-       mtctr   r9
-       sub     r8,r8,r3
-       lis     r0,0x60000000@h /* nop */
-3:     stw     r0,0(r8)
-BEGIN_FTR_SECTION
-       dcbst   0,r8            /* suboptimal, but simpler */
-       sync
-       icbi    0,r8
-END_FTR_SECTION_IFSET(CPU_FTR_SPLIT_ID_CACHE)
-       addi    r8,r8,4
-       bdnz    3b
-       sync                    /* additional sync needed on g4 */
-       isync
-       b       1b
 
 #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
 /*
index 191d0ab0922227908db8dc6fc3b0fefa53ed876d..769e511783b0f201b4a9f24347bb2c7a63d3f440 100644 (file)
@@ -91,6 +91,7 @@ int ucache_bsize;
 unsigned long __init early_init(unsigned long dt_ptr)
 {
        unsigned long offset = reloc_offset();
+       struct cpu_spec *spec;
 
        /* First zero the BSS -- use memset_io, some platforms don't have
         * caches on yet */
@@ -100,8 +101,11 @@ unsigned long __init early_init(unsigned long dt_ptr)
         * Identify the CPU type and fix up code sections
         * that depend on which cpu we have.
         */
-       identify_cpu(offset, 0);
-       do_cpu_ftr_fixups(offset);
+       spec = identify_cpu(offset);
+
+       do_feature_fixups(offset, spec->cpu_features,
+                         PTRRELOC(&__start___ftr_fixup),
+                         PTRRELOC(&__stop___ftr_fixup));
 
        return KERNELBASE + offset;
 }
index 4b2e32eab9dc3e6b50bea7ffdf52fcdcb229492d..1969b5686eee79d08127589685a6e24941ff964a 100644 (file)
@@ -170,6 +170,9 @@ void __init setup_paca(int cpu)
 
 void __init early_setup(unsigned long dt_ptr)
 {
+       /* Identify CPU type */
+       identify_cpu(0);
+
        /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
        setup_paca(0);
 
@@ -348,6 +351,14 @@ void __init setup_system(void)
 {
        DBG(" -> setup_system()\n");
 
+       /* Apply the CPUs-specific and firmware specific fixups to kernel
+        * text (nop out sections not relevant to this CPU or this firmware)
+        */
+       do_feature_fixups(0, cur_cpu_spec->cpu_features,
+                         &__start___ftr_fixup, &__stop___ftr_fixup);
+       do_feature_fixups(0, powerpc_firmware_features,
+                         &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
+
        /*
         * Unflatten the device-tree passed by prom_init or kexec
         */
index a0ff7ba7d666addc2a78a3f20276e9de56d09f61..6f73469fd3b0c64b1a753003af5e3a93a1fef072 100644 (file)
@@ -694,6 +694,11 @@ void * __init iSeries_early_setup(void)
 {
        unsigned long phys_mem_size;
 
+       /* Identify CPU type. This is done again by the common code later
+        * on but calling this function multiple times is fine.
+        */
+       identify_cpu(0);
+
        powerpc_firmware_features |= FW_FEATURE_ISERIES;
        powerpc_firmware_features |= FW_FEATURE_LPAR;
 
index 5f6684012ded76d4d02c9b0f55c37b8e2cb6548f..d319f9ba2379d812f31d5bfbc8b249989de79636 100644 (file)
@@ -109,80 +109,6 @@ _GLOBAL(reloc_got2)
        mtlr    r11
        blr
 
-/*
- * identify_cpu,
- * called with r3 = data offset and r4 = CPU number
- * doesn't change r3
- */
-_GLOBAL(identify_cpu)
-       addis   r8,r3,cpu_specs@ha
-       addi    r8,r8,cpu_specs@l
-       mfpvr   r7
-1:
-       lwz     r5,CPU_SPEC_PVR_MASK(r8)
-       and     r5,r5,r7
-       lwz     r6,CPU_SPEC_PVR_VALUE(r8)
-       cmplw   0,r6,r5
-       beq     1f
-       addi    r8,r8,CPU_SPEC_ENTRY_SIZE
-       b       1b
-1:
-       addis   r6,r3,cur_cpu_spec@ha
-       addi    r6,r6,cur_cpu_spec@l
-       sub     r8,r8,r3
-       stw     r8,0(r6)
-       blr
-
-/*
- * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
- * and writes nop's over sections of code that don't apply for this cpu.
- * r3 = data offset (not changed)
- */
-_GLOBAL(do_cpu_ftr_fixups)
-       /* Get CPU 0 features */
-       addis   r6,r3,cur_cpu_spec@ha
-       addi    r6,r6,cur_cpu_spec@l
-       lwz     r4,0(r6)
-       add     r4,r4,r3
-       lwz     r4,CPU_SPEC_FEATURES(r4)
-
-       /* Get the fixup table */
-       addis   r6,r3,__start___ftr_fixup@ha
-       addi    r6,r6,__start___ftr_fixup@l
-       addis   r7,r3,__stop___ftr_fixup@ha
-       addi    r7,r7,__stop___ftr_fixup@l
-
-       /* Do the fixup */
-1:     cmplw   0,r6,r7
-       bgelr
-       addi    r6,r6,16
-       lwz     r8,-16(r6)      /* mask */
-       and     r8,r8,r4
-       lwz     r9,-12(r6)      /* value */
-       cmplw   0,r8,r9
-       beq     1b
-       lwz     r8,-8(r6)       /* section begin */
-       lwz     r9,-4(r6)       /* section end */
-       subf.   r9,r8,r9
-       beq     1b
-       /* write nops over the section of code */
-       /* todo: if large section, add a branch at the start of it */
-       srwi    r9,r9,2
-       mtctr   r9
-       add     r8,r8,r3
-       lis     r0,0x60000000@h /* nop */
-3:     stw     r0,0(r8)
-       andi.   r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
-       beq     2f
-       dcbst   0,r8            /* suboptimal, but simpler */
-       sync
-       icbi    0,r8
-2:     addi    r8,r8,4
-       bdnz    3b
-       sync                    /* additional sync needed on g4 */
-       isync
-       b       1b
-
 /*
  * call_setup_cpu - call the setup_cpu function for this cpu
  * r3 = data offset, r24 = cpu number
index 75fe13815be27c23e817ac7527c00079476f1c0b..41a640f16bddd681fb8d912514891c20a4615716 100644 (file)
@@ -38,6 +38,7 @@
 #include <asm/nvram.h>
 #include <asm/xmon.h>
 #include <asm/ocp.h>
+#include <asm/prom.h>
 
 #define USES_PPC_SYS (defined(CONFIG_85xx) || defined(CONFIG_83xx) || \
                      defined(CONFIG_MPC10X_BRIDGE) || defined(CONFIG_8260) || \
@@ -53,8 +54,6 @@
 
 extern void platform_init(unsigned long r3, unsigned long r4,
                unsigned long r5, unsigned long r6, unsigned long r7);
-extern void identify_cpu(unsigned long offset, unsigned long cpu);
-extern void do_cpu_ftr_fixups(unsigned long offset);
 extern void reloc_got2(unsigned long offset);
 
 extern void ppc6xx_idle(void);
@@ -301,6 +300,7 @@ early_init(int r3, int r4, int r5)
 {
        unsigned long phys;
        unsigned long offset = reloc_offset();
+       struct cpu_spec *spec;
 
        /* Default */
        phys = offset + KERNELBASE;
@@ -313,8 +313,10 @@ early_init(int r3, int r4, int r5)
         * Identify the CPU type and fix up code sections
         * that depend on which cpu we have.
         */
-       identify_cpu(offset, 0);
-       do_cpu_ftr_fixups(offset);
+       spec = identify_cpu(offset);
+       do_feature_fixups(offset, spec->cpu_features,
+                         PTRRELOC(&__start___ftr_fixup),
+                         PTRRELOC(&__stop___ftr_fixup));
 
        return phys;
 }
index 12707ab9dc98db6bcc6bf75556dd925e9a56818e..4d22218739e0804667bfc9e1a3bc9491def0439a 100644 (file)
@@ -89,8 +89,11 @@ struct cpu_spec {
 
 extern struct cpu_spec         *cur_cpu_spec;
 
-extern void identify_cpu(unsigned long offset, unsigned long cpu);
-extern void do_cpu_ftr_fixups(unsigned long offset);
+extern unsigned int __start___ftr_fixup, __stop___ftr_fixup;
+
+extern struct cpu_spec *identify_cpu(unsigned long offset);
+extern void do_feature_fixups(unsigned long offset, unsigned long value,
+                             void *fixup_start, void *fixup_end);
 
 #endif /* __ASSEMBLY__ */
 
index 1022737f4f34884ea392264cd8a21156290bad67..c16e0a6b9dab8490b031e96722173fdb937f652e 100644 (file)
@@ -96,6 +96,8 @@ extern void machine_check_fwnmi(void);
 /* This is true if we are using the firmware NMI handler (typically LPAR) */
 extern int fwnmi_active;
 
+extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup;
+
 #else /* __ASSEMBLY__ */
 
 #define BEGIN_FW_FTR_SECTION           96: