2 * arch/ppc64/kernel/head.S
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Adapted for Power Macintosh by Paul Mackerras.
10 * Low-level exception handlers and MMU support
11 * rewritten by Paul Mackerras.
12 * Copyright (C) 1996 Paul Mackerras.
14 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
17 * This file contains the low-level support and setup for the
18 * PowerPC-64 platform, including trap and interrupt dispatch.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/config.h>
27 #include <linux/threads.h>
31 #include <asm/ppc_asm.h>
32 #include <asm/asm-offsets.h>
34 #include <asm/cputable.h>
35 #include <asm/setup.h>
36 #include <asm/hvcall.h>
37 #include <asm/iseries/lpar_map.h>
38 #include <asm/thread_info.h>
40 #ifdef CONFIG_PPC_ISERIES
41 #define DO_SOFT_DISABLE
45 * We layout physical memory as follows:
46 * 0x0000 - 0x00ff : Secondary processor spin code
47 * 0x0100 - 0x2fff : pSeries Interrupt prologs
48 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
49 * 0x6000 - 0x6fff : Initial (CPU0) segment table
50 * 0x7000 - 0x7fff : FWNMI data area
51 * 0x8000 - : Early init and support code
59 * SPRG0 reserved for hypervisor
60 * SPRG1 temp - used to save gpr
61 * SPRG2 temp - used to save gpr
62 * SPRG3 virt addr of paca
66 * Entering into this code we make the following assumptions:
68 * 1. The MMU is off & open firmware is running in real mode.
69 * 2. The kernel is entered at __start
72 * 1. The MMU is on (as it always is for iSeries)
73 * 2. The kernel is entered at system_reset_iSeries
79 #ifdef CONFIG_PPC_MULTIPLATFORM
81 /* NOP this out unconditionally */
83 b .__start_initialization_multiplatform
85 #endif /* CONFIG_PPC_MULTIPLATFORM */
87 /* Catch branch to 0 in real mode */
90 #ifdef CONFIG_PPC_ISERIES
92 * At offset 0x20, there is a pointer to iSeries LPAR data.
93 * This is required by the hypervisor
96 .llong hvReleaseData-KERNELBASE
99 * At offset 0x28 and 0x30 are offsets to the mschunks_map
100 * array (used by the iSeries LPAR debugger to do translation
101 * between physical addresses and absolute addresses) and
102 * to the pidhash table (also used by the debugger)
104 .llong mschunks_map-KERNELBASE
105 .llong 0 /* pidhash-KERNELBASE SFRXXX */
107 /* Offset 0x38 - Pointer to start of embedded System.map */
108 .globl embedded_sysmap_start
109 embedded_sysmap_start:
111 /* Offset 0x40 - Pointer to end of embedded System.map */
112 .globl embedded_sysmap_end
116 #endif /* CONFIG_PPC_ISERIES */
118 /* Secondary processors spin on this value until it goes to 1. */
119 .globl __secondary_hold_spinloop
120 __secondary_hold_spinloop:
123 /* Secondary processors write this value with their cpu # */
124 /* after they enter the spin loop immediately below. */
125 .globl __secondary_hold_acknowledge
126 __secondary_hold_acknowledge:
131 * The following code is used on pSeries to hold secondary processors
132 * in a spin loop after they have been freed from OpenFirmware, but
133 * before the bulk of the kernel has been relocated. This code
134 * is relocated to physical address 0x60 before prom_init is run.
135 * All of it must fit below the first exception vector at 0x100.
137 _GLOBAL(__secondary_hold)
140 mtmsrd r24 /* RI on */
142 /* Grab our linux cpu number */
145 /* Tell the master cpu we're here */
146 /* Relocation is off & we are located at an address less */
147 /* than 0x100, so only need to grab low order offset. */
148 std r24,__secondary_hold_acknowledge@l(0)
151 /* All secondary cpus wait here until told to start. */
152 100: ld r4,__secondary_hold_spinloop@l(0)
157 LOADADDR(r4, .hmt_init)
162 LOADADDR(r4, .pSeries_secondary_smp_init)
171 /* This value is used to mark exception frames on the stack. */
174 .tc ID_72656773_68657265[TC],0x7265677368657265
178 * The following macros define the code that appears as
179 * the prologue to each of the exception handlers. They
180 * are split into two parts to allow a single kernel binary
181 * to be used for pSeries and iSeries.
182 * LOL. One day... - paulus
186 * We make as much of the exception code common between native
187 * exception handlers (including pSeries LPAR) and iSeries LPAR
188 * implementations as possible.
192 * This is the start of the interrupt handlers for pSeries
193 * This code runs with relocation off.
208 * We're short on space and time in the exception prolog, so we can't use
209 * the normal LOADADDR macro. Normally we just need the low halfword of the
210 * address, but for Kdump we need the whole low word.
212 #ifdef CONFIG_CRASH_DUMP
213 #define LOAD_HANDLER(reg, label) \
214 oris reg,reg,(label)@h; /* virt addr of handler ... */ \
215 ori reg,reg,(label)@l; /* .. and the rest */
217 #define LOAD_HANDLER(reg, label) \
218 ori reg,reg,(label)@l; /* virt addr of handler ... */
221 #define EXCEPTION_PROLOG_PSERIES(area, label) \
222 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
223 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
224 std r10,area+EX_R10(r13); \
225 std r11,area+EX_R11(r13); \
226 std r12,area+EX_R12(r13); \
227 mfspr r9,SPRN_SPRG1; \
228 std r9,area+EX_R13(r13); \
230 clrrdi r12,r13,32; /* get high part of &label */ \
232 mfspr r11,SPRN_SRR0; /* save SRR0 */ \
233 LOAD_HANDLER(r12,label) \
234 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
235 mtspr SPRN_SRR0,r12; \
236 mfspr r12,SPRN_SRR1; /* and SRR1 */ \
237 mtspr SPRN_SRR1,r10; \
239 b . /* prevent speculative execution */
242 * This is the start of the interrupt handlers for iSeries
243 * This code runs with relocation on.
245 #define EXCEPTION_PROLOG_ISERIES_1(area) \
246 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
247 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
248 std r10,area+EX_R10(r13); \
249 std r11,area+EX_R11(r13); \
250 std r12,area+EX_R12(r13); \
251 mfspr r9,SPRN_SPRG1; \
252 std r9,area+EX_R13(r13); \
255 #define EXCEPTION_PROLOG_ISERIES_2 \
257 ld r11,PACALPPACA+LPPACASRR0(r13); \
258 ld r12,PACALPPACA+LPPACASRR1(r13); \
259 ori r10,r10,MSR_RI; \
263 * The common exception prolog is used for all except a few exceptions
264 * such as a segment miss on a kernel address. We have to be prepared
265 * to take another exception from the point where we first touch the
266 * kernel stack onwards.
268 * On entry r13 points to the paca, r9-r13 are saved in the paca,
269 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
270 * SRR1, and relocation is on.
272 #define EXCEPTION_PROLOG_COMMON(n, area) \
273 andi. r10,r12,MSR_PR; /* See if coming from user */ \
274 mr r10,r1; /* Save r1 */ \
275 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
277 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
278 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
279 bge- cr1,bad_stack; /* abort if it is */ \
280 std r9,_CCR(r1); /* save CR in stackframe */ \
281 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
282 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
283 std r10,0(r1); /* make stack chain pointer */ \
284 std r0,GPR0(r1); /* save r0 in stackframe */ \
285 std r10,GPR1(r1); /* save r1 in stackframe */ \
286 std r2,GPR2(r1); /* save r2 in stackframe */ \
287 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
288 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
289 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
290 ld r10,area+EX_R10(r13); \
293 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
294 ld r10,area+EX_R12(r13); \
295 ld r11,area+EX_R13(r13); \
299 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
300 mflr r9; /* save LR in stackframe */ \
302 mfctr r10; /* save CTR in stackframe */ \
304 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
307 std r9,_TRAP(r1); /* set trap number */ \
309 ld r11,exception_marker@toc(r2); \
310 std r10,RESULT(r1); /* clear regs->result */ \
311 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
316 #define STD_EXCEPTION_PSERIES(n, label) \
318 .globl label##_pSeries; \
321 mtspr SPRN_SPRG1,r13; /* save r13 */ \
323 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
325 #define STD_EXCEPTION_ISERIES(n, label, area) \
326 .globl label##_iSeries; \
329 mtspr SPRN_SPRG1,r13; /* save r13 */ \
331 EXCEPTION_PROLOG_ISERIES_1(area); \
332 EXCEPTION_PROLOG_ISERIES_2; \
335 #define MASKABLE_EXCEPTION_ISERIES(n, label) \
336 .globl label##_iSeries; \
339 mtspr SPRN_SPRG1,r13; /* save r13 */ \
341 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
342 lbz r10,PACAPROCENABLED(r13); \
344 beq- label##_iSeries_masked; \
345 EXCEPTION_PROLOG_ISERIES_2; \
348 #ifdef DO_SOFT_DISABLE
349 #define DISABLE_INTS \
350 lbz r10,PACAPROCENABLED(r13); \
354 stb r11,PACAPROCENABLED(r13); \
355 ori r10,r10,MSR_EE; \
358 #define ENABLE_INTS \
359 lbz r10,PACAPROCENABLED(r13); \
362 ori r11,r11,MSR_EE; \
365 #else /* hard enable/disable interrupts */
368 #define ENABLE_INTS \
371 rlwimi r11,r12,0,MSR_EE; \
376 #define STD_EXCEPTION_COMMON(trap, label, hdlr) \
378 .globl label##_common; \
380 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
383 addi r3,r1,STACK_FRAME_OVERHEAD; \
387 #define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
389 .globl label##_common; \
391 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
393 addi r3,r1,STACK_FRAME_OVERHEAD; \
395 b .ret_from_except_lite
398 * Start of pSeries system interrupt routines
401 .globl __start_interrupts
404 STD_EXCEPTION_PSERIES(0x100, system_reset)
407 _machine_check_pSeries:
409 mtspr SPRN_SPRG1,r13 /* save r13 */
411 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
414 .globl data_access_pSeries
423 rlwimi r13,r12,16,0x20
426 beq .do_stab_bolted_pSeries
429 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
430 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
433 .globl data_access_slb_pSeries
434 data_access_slb_pSeries:
438 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
439 std r3,PACA_EXSLB+EX_R3(r13)
441 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
444 /* Keep that around for when we re-implement dynamic VSIDs */
446 bge slb_miss_user_pseries
447 #endif /* __DISABLED__ */
448 std r10,PACA_EXSLB+EX_R10(r13)
449 std r11,PACA_EXSLB+EX_R11(r13)
450 std r12,PACA_EXSLB+EX_R12(r13)
452 std r10,PACA_EXSLB+EX_R13(r13)
453 mfspr r12,SPRN_SRR1 /* and SRR1 */
454 b .slb_miss_realmode /* Rel. branch works in real mode */
456 STD_EXCEPTION_PSERIES(0x400, instruction_access)
459 .globl instruction_access_slb_pSeries
460 instruction_access_slb_pSeries:
464 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
465 std r3,PACA_EXSLB+EX_R3(r13)
466 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
467 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
470 /* Keep that around for when we re-implement dynamic VSIDs */
472 bge slb_miss_user_pseries
473 #endif /* __DISABLED__ */
474 std r10,PACA_EXSLB+EX_R10(r13)
475 std r11,PACA_EXSLB+EX_R11(r13)
476 std r12,PACA_EXSLB+EX_R12(r13)
478 std r10,PACA_EXSLB+EX_R13(r13)
479 mfspr r12,SPRN_SRR1 /* and SRR1 */
480 b .slb_miss_realmode /* Rel. branch works in real mode */
482 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
483 STD_EXCEPTION_PSERIES(0x600, alignment)
484 STD_EXCEPTION_PSERIES(0x700, program_check)
485 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
486 STD_EXCEPTION_PSERIES(0x900, decrementer)
487 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
488 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
491 .globl system_call_pSeries
500 oris r12,r12,system_call_common@h
501 ori r12,r12,system_call_common@l
503 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
507 b . /* prevent speculative execution */
509 STD_EXCEPTION_PSERIES(0xd00, single_step)
510 STD_EXCEPTION_PSERIES(0xe00, trap_0e)
512 /* We need to deal with the Altivec unavailable exception
513 * here which is at 0xf20, thus in the middle of the
514 * prolog code of the PerformanceMonitor one. A little
515 * trickery is thus necessary
518 b performance_monitor_pSeries
520 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
522 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
523 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
527 /*** pSeries interrupt support ***/
529 /* moved from 0xf00 */
530 STD_EXCEPTION_PSERIES(., performance_monitor)
533 _GLOBAL(do_stab_bolted_pSeries)
536 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
539 * We have some room here we use that to put
540 * the peries slb miss user trampoline code so it's reasonably
541 * away from slb_miss_user_common to avoid problems with rfid
543 * This is used for when the SLB miss handler has to go virtual,
544 * which doesn't happen for now anymore but will once we re-implement
545 * dynamic VSIDs for shared page tables
548 slb_miss_user_pseries:
549 std r10,PACA_EXGEN+EX_R10(r13)
550 std r11,PACA_EXGEN+EX_R11(r13)
551 std r12,PACA_EXGEN+EX_R12(r13)
553 ld r11,PACA_EXSLB+EX_R9(r13)
554 ld r12,PACA_EXSLB+EX_R3(r13)
555 std r10,PACA_EXGEN+EX_R13(r13)
556 std r11,PACA_EXGEN+EX_R9(r13)
557 std r12,PACA_EXGEN+EX_R3(r13)
560 mfspr r11,SRR0 /* save SRR0 */
561 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
562 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
564 mfspr r12,SRR1 /* and SRR1 */
567 b . /* prevent spec. execution */
568 #endif /* __DISABLED__ */
571 * Vectors for the FWNMI option. Share common code.
573 .globl system_reset_fwnmi
577 mtspr SPRN_SPRG1,r13 /* save r13 */
579 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
581 .globl machine_check_fwnmi
585 mtspr SPRN_SPRG1,r13 /* save r13 */
587 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
589 #ifdef CONFIG_PPC_ISERIES
590 /*** ISeries-LPAR interrupt handlers ***/
592 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
594 .globl data_access_iSeries
602 rlwimi r13,r12,16,0x20
605 beq .do_stab_bolted_iSeries
608 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
609 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
610 EXCEPTION_PROLOG_ISERIES_2
613 .do_stab_bolted_iSeries:
616 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
617 EXCEPTION_PROLOG_ISERIES_2
620 .globl data_access_slb_iSeries
621 data_access_slb_iSeries:
622 mtspr SPRN_SPRG1,r13 /* save r13 */
623 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
624 std r3,PACA_EXSLB+EX_R3(r13)
626 std r9,PACA_EXSLB+EX_R9(r13)
630 bge slb_miss_user_iseries
632 std r10,PACA_EXSLB+EX_R10(r13)
633 std r11,PACA_EXSLB+EX_R11(r13)
634 std r12,PACA_EXSLB+EX_R12(r13)
636 std r10,PACA_EXSLB+EX_R13(r13)
637 ld r12,PACALPPACA+LPPACASRR1(r13);
640 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
642 .globl instruction_access_slb_iSeries
643 instruction_access_slb_iSeries:
644 mtspr SPRN_SPRG1,r13 /* save r13 */
645 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
646 std r3,PACA_EXSLB+EX_R3(r13)
647 ld r3,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
648 std r9,PACA_EXSLB+EX_R9(r13)
652 bge .slb_miss_user_iseries
654 std r10,PACA_EXSLB+EX_R10(r13)
655 std r11,PACA_EXSLB+EX_R11(r13)
656 std r12,PACA_EXSLB+EX_R12(r13)
658 std r10,PACA_EXSLB+EX_R13(r13)
659 ld r12,PACALPPACA+LPPACASRR1(r13);
663 slb_miss_user_iseries:
664 std r10,PACA_EXGEN+EX_R10(r13)
665 std r11,PACA_EXGEN+EX_R11(r13)
666 std r12,PACA_EXGEN+EX_R12(r13)
668 ld r11,PACA_EXSLB+EX_R9(r13)
669 ld r12,PACA_EXSLB+EX_R3(r13)
670 std r10,PACA_EXGEN+EX_R13(r13)
671 std r11,PACA_EXGEN+EX_R9(r13)
672 std r12,PACA_EXGEN+EX_R3(r13)
673 EXCEPTION_PROLOG_ISERIES_2
674 b slb_miss_user_common
677 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
678 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
679 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
680 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
681 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
682 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
683 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
685 .globl system_call_iSeries
689 EXCEPTION_PROLOG_ISERIES_2
692 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
693 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
694 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
696 .globl system_reset_iSeries
697 system_reset_iSeries:
698 mfspr r13,SPRN_SPRG3 /* Get paca address */
701 mtmsrd r24 /* RI on */
702 lhz r24,PACAPACAINDEX(r13) /* Get processor # */
703 cmpwi 0,r24,0 /* Are we processor 0? */
704 beq .__start_initialization_iSeries /* Start up the first processor */
706 li r5,CTRL_RUNLATCH /* Turn off the run light */
713 lbz r23,PACAPROCSTART(r13) /* Test if this processor
716 LOADADDR(r3,current_set)
717 sldi r28,r24,3 /* get current_set[cpu#] */
719 addi r1,r3,THREAD_SIZE
720 subi r1,r1,STACK_FRAME_OVERHEAD
723 beq iSeries_secondary_smp_loop /* Loop until told to go */
724 bne .__secondary_start /* Loop until told to go */
725 iSeries_secondary_smp_loop:
726 /* Let the Hypervisor know we are alive */
727 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
729 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
730 #else /* CONFIG_SMP */
731 /* Yield the processor. This is required for non-SMP kernels
732 which are running on multi-threaded machines. */
734 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
735 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
736 li r4,0 /* "yield timed" */
737 li r5,-1 /* "yield forever" */
738 #endif /* CONFIG_SMP */
739 li r0,-1 /* r0=-1 indicates a Hypervisor call */
740 sc /* Invoke the hypervisor via a system call */
741 mfspr r13,SPRN_SPRG3 /* Put r13 back ???? */
742 b 1b /* If SMP not configured, secondaries
745 .globl decrementer_iSeries_masked
746 decrementer_iSeries_masked:
748 stb r11,PACALPPACA+LPPACADECRINT(r13)
749 LOADBASE(r12,tb_ticks_per_jiffy)
750 lwz r12,OFF(tb_ticks_per_jiffy)(r12)
754 .globl hardware_interrupt_iSeries_masked
755 hardware_interrupt_iSeries_masked:
756 mtcrf 0x80,r9 /* Restore regs */
757 ld r11,PACALPPACA+LPPACASRR0(r13)
758 ld r12,PACALPPACA+LPPACASRR1(r13)
761 ld r9,PACA_EXGEN+EX_R9(r13)
762 ld r10,PACA_EXGEN+EX_R10(r13)
763 ld r11,PACA_EXGEN+EX_R11(r13)
764 ld r12,PACA_EXGEN+EX_R12(r13)
765 ld r13,PACA_EXGEN+EX_R13(r13)
767 b . /* prevent speculative execution */
768 #endif /* CONFIG_PPC_ISERIES */
770 /*** Common interrupt handlers ***/
772 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
775 * Machine check is different because we use a different
776 * save area: PACA_EXMC instead of PACA_EXGEN.
779 .globl machine_check_common
780 machine_check_common:
781 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
784 addi r3,r1,STACK_FRAME_OVERHEAD
785 bl .machine_check_exception
788 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
789 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
790 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
791 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
792 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
793 STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
794 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
795 #ifdef CONFIG_ALTIVEC
796 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
798 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
802 * Here we have detected that the kernel stack pointer is bad.
803 * R9 contains the saved CR, r13 points to the paca,
804 * r10 contains the (bad) kernel stack pointer,
805 * r11 and r12 contain the saved SRR0 and SRR1.
806 * We switch to using an emergency stack, save the registers there,
807 * and call kernel_bad_stack(), which panics.
810 ld r1,PACAEMERGSP(r13)
811 subi r1,r1,64+INT_FRAME_SIZE
832 addi r11,r1,INT_FRAME_SIZE
837 1: addi r3,r1,STACK_FRAME_OVERHEAD
842 * Return from an exception with minimal checks.
843 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
844 * If interrupts have been enabled, or anything has been
845 * done that might have changed the scheduling status of
846 * any task or sent any task a signal, you should use
847 * ret_from_except or ret_from_except_lite instead of this.
849 .globl fast_exception_return
850 fast_exception_return:
853 andi. r3,r12,MSR_RI /* check if RI is set */
867 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
875 b . /* prevent speculative execution */
879 1: addi r3,r1,STACK_FRAME_OVERHEAD
880 bl .unrecoverable_exception
884 * Here r13 points to the paca, r9 contains the saved CR,
885 * SRR0 and SRR1 are saved in r11 and r12,
886 * r9 - r13 are saved in paca->exgen.
889 .globl data_access_common
891 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
893 std r10,PACA_EXGEN+EX_DAR(r13)
895 stw r10,PACA_EXGEN+EX_DSISR(r13)
896 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
897 ld r3,PACA_EXGEN+EX_DAR(r13)
898 lwz r4,PACA_EXGEN+EX_DSISR(r13)
900 b .do_hash_page /* Try to handle as hpte fault */
903 .globl instruction_access_common
904 instruction_access_common:
905 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
909 b .do_hash_page /* Try to handle as hpte fault */
912 * Here is the common SLB miss user that is used when going to virtual
913 * mode for SLB misses, that is currently not used
917 .globl slb_miss_user_common
918 slb_miss_user_common:
920 std r3,PACA_EXGEN+EX_DAR(r13)
921 stw r9,PACA_EXGEN+EX_CCR(r13)
922 std r10,PACA_EXGEN+EX_LR(r13)
923 std r11,PACA_EXGEN+EX_SRR0(r13)
924 bl .slb_allocate_user
926 ld r10,PACA_EXGEN+EX_LR(r13)
927 ld r3,PACA_EXGEN+EX_R3(r13)
928 lwz r9,PACA_EXGEN+EX_CCR(r13)
929 ld r11,PACA_EXGEN+EX_SRR0(r13)
933 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
934 beq- unrecov_user_slb
942 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
948 ld r9,PACA_EXGEN+EX_R9(r13)
949 ld r10,PACA_EXGEN+EX_R10(r13)
950 ld r11,PACA_EXGEN+EX_R11(r13)
951 ld r12,PACA_EXGEN+EX_R12(r13)
952 ld r13,PACA_EXGEN+EX_R13(r13)
957 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
958 ld r4,PACA_EXGEN+EX_DAR(r13)
965 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
968 1: addi r3,r1,STACK_FRAME_OVERHEAD
969 bl .unrecoverable_exception
972 #endif /* __DISABLED__ */
976 * r13 points to the PACA, r9 contains the saved CR,
977 * r12 contain the saved SRR1, SRR0 is still ready for return
978 * r3 has the faulting address
979 * r9 - r13 are saved in paca->exslb.
980 * r3 is saved in paca->slb_r3
981 * We assume we aren't going to take any exceptions during this procedure.
983 _GLOBAL(slb_miss_realmode)
986 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
987 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
989 bl .slb_allocate_realmode
991 /* All done -- return from exception. */
993 ld r10,PACA_EXSLB+EX_LR(r13)
994 ld r3,PACA_EXSLB+EX_R3(r13)
995 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
996 #ifdef CONFIG_PPC_ISERIES
997 ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
998 #endif /* CONFIG_PPC_ISERIES */
1002 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1008 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1011 #ifdef CONFIG_PPC_ISERIES
1014 #endif /* CONFIG_PPC_ISERIES */
1015 ld r9,PACA_EXSLB+EX_R9(r13)
1016 ld r10,PACA_EXSLB+EX_R10(r13)
1017 ld r11,PACA_EXSLB+EX_R11(r13)
1018 ld r12,PACA_EXSLB+EX_R12(r13)
1019 ld r13,PACA_EXSLB+EX_R13(r13)
1021 b . /* prevent speculative execution */
1024 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1027 1: addi r3,r1,STACK_FRAME_OVERHEAD
1028 bl .unrecoverable_exception
1032 .globl hardware_interrupt_common
1033 .globl hardware_interrupt_entry
1034 hardware_interrupt_common:
1035 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
1036 hardware_interrupt_entry:
1038 addi r3,r1,STACK_FRAME_OVERHEAD
1040 b .ret_from_except_lite
1043 .globl alignment_common
1046 std r10,PACA_EXGEN+EX_DAR(r13)
1047 mfspr r10,SPRN_DSISR
1048 stw r10,PACA_EXGEN+EX_DSISR(r13)
1049 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1050 ld r3,PACA_EXGEN+EX_DAR(r13)
1051 lwz r4,PACA_EXGEN+EX_DSISR(r13)
1055 addi r3,r1,STACK_FRAME_OVERHEAD
1057 bl .alignment_exception
1061 .globl program_check_common
1062 program_check_common:
1063 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1065 addi r3,r1,STACK_FRAME_OVERHEAD
1067 bl .program_check_exception
1071 .globl fp_unavailable_common
1072 fp_unavailable_common:
1073 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1074 bne .load_up_fpu /* if from user, just load it up */
1076 addi r3,r1,STACK_FRAME_OVERHEAD
1078 bl .kernel_fp_unavailable_exception
1082 .globl altivec_unavailable_common
1083 altivec_unavailable_common:
1084 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1085 #ifdef CONFIG_ALTIVEC
1087 bne .load_up_altivec /* if from user, just load it up */
1088 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1091 addi r3,r1,STACK_FRAME_OVERHEAD
1093 bl .altivec_unavailable_exception
1096 #ifdef CONFIG_ALTIVEC
1098 * load_up_altivec(unused, unused, tsk)
1099 * Disable VMX for the task which had it previously,
1100 * and save its vector registers in its thread_struct.
1101 * Enables the VMX for use in the kernel on return.
1102 * On SMP we know the VMX is free, since we give it up every
1103 * switch (ie, no lazy save of the vector registers).
1104 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
1106 _STATIC(load_up_altivec)
1107 mfmsr r5 /* grab the current MSR */
1108 oris r5,r5,MSR_VEC@h
1109 mtmsrd r5 /* enable use of VMX now */
1113 * For SMP, we don't do lazy VMX switching because it just gets too
1114 * horrendously complex, especially when a task switches from one CPU
1115 * to another. Instead we call giveup_altvec in switch_to.
1116 * VRSAVE isn't dealt with here, that is done in the normal context
1117 * switch code. Note that we could rely on vrsave value to eventually
1118 * avoid saving all of the VREGs here...
1121 ld r3,last_task_used_altivec@got(r2)
1125 /* Save VMX state to last_task_used_altivec's THREAD struct */
1131 /* Disable VMX for last_task_used_altivec */
1133 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1136 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1138 #endif /* CONFIG_SMP */
1139 /* Hack: if we get an altivec unavailable trap with VRSAVE
1140 * set to all zeros, we assume this is a broken application
1141 * that fails to set it properly, and thus we switch it to
1144 mfspr r4,SPRN_VRSAVE
1148 mtspr SPRN_VRSAVE,r4
1150 /* enable use of VMX after return */
1151 ld r4,PACACURRENT(r13)
1152 addi r5,r4,THREAD /* Get THREAD */
1153 oris r12,r12,MSR_VEC@h
1157 stw r4,THREAD_USED_VR(r5)
1162 /* Update last_task_used_math to 'current' */
1163 subi r4,r5,THREAD /* Back to 'current' */
1165 #endif /* CONFIG_SMP */
1166 /* restore registers and return */
1167 b fast_exception_return
1168 #endif /* CONFIG_ALTIVEC */
1174 _GLOBAL(do_hash_page)
1178 andis. r0,r4,0xa450 /* weird error? */
1179 bne- .handle_page_fault /* if not, try to insert a HPTE */
1181 andis. r0,r4,0x0020 /* Is it a segment table fault? */
1182 bne- .do_ste_alloc /* If so handle it */
1183 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
1186 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1187 * accessing a userspace segment (even from the kernel). We assume
1188 * kernel addresses always have the high bit set.
1190 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
1191 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
1192 orc r0,r12,r0 /* MSR_PR | ~high_bit */
1193 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
1194 ori r4,r4,1 /* add _PAGE_PRESENT */
1195 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
1198 * On iSeries, we soft-disable interrupts here, then
1199 * hard-enable interrupts so that the hash_page code can spin on
1200 * the hash_table_lock without problems on a shared processor.
1205 * r3 contains the faulting address
1206 * r4 contains the required access permissions
1207 * r5 contains the trap number
1209 * at return r3 = 0 for success
1211 bl .hash_page /* build HPTE if possible */
1212 cmpdi r3,0 /* see if hash_page succeeded */
1214 #ifdef DO_SOFT_DISABLE
1216 * If we had interrupts soft-enabled at the point where the
1217 * DSI/ISI occurred, and an interrupt came in during hash_page,
1219 * We jump to ret_from_except_lite rather than fast_exception_return
1220 * because ret_from_except_lite will check for and handle pending
1221 * interrupts if necessary.
1223 beq .ret_from_except_lite
1224 /* For a hash failure, we don't bother re-enabling interrupts */
1228 * hash_page couldn't handle it, set soft interrupt enable back
1229 * to what it was before the trap. Note that .local_irq_restore
1230 * handles any interrupts pending at this point.
1233 bl .local_irq_restore
1236 beq fast_exception_return /* Return from exception on success */
1237 ble- 12f /* Failure return from hash_page */
1242 /* Here we have a page fault that hash_page can't handle. */
1243 _GLOBAL(handle_page_fault)
1247 addi r3,r1,STACK_FRAME_OVERHEAD
1250 beq+ .ret_from_except_lite
1253 addi r3,r1,STACK_FRAME_OVERHEAD
1258 /* We have a page fault that hash_page could handle but HV refused
1262 addi r3,r1,STACK_FRAME_OVERHEAD
1267 /* here we have a segment miss */
1268 _GLOBAL(do_ste_alloc)
1269 bl .ste_allocate /* try to insert stab entry */
1271 beq+ fast_exception_return
1272 b .handle_page_fault
1275 * r13 points to the PACA, r9 contains the saved CR,
1276 * r11 and r12 contain the saved SRR0 and SRR1.
1277 * r9 - r13 are saved in paca->exslb.
1278 * We assume we aren't going to take any exceptions during this procedure.
1279 * We assume (DAR >> 60) == 0xc.
1282 _GLOBAL(do_stab_bolted)
1283 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1284 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1286 /* Hash to the primary group */
1287 ld r10,PACASTABVIRT(r13)
1290 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1292 /* Calculate VSID */
1293 /* This is a kernel address, so protovsid = ESID */
1294 ASM_VSID_SCRAMBLE(r11, r9)
1295 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1297 /* Search the primary group for a free entry */
1298 1: ld r11,0(r10) /* Test valid bit of the current ste */
1305 /* Stick for only searching the primary group for now. */
1306 /* At least for now, we use a very simple random castout scheme */
1307 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1309 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1312 /* r10 currently points to an ste one past the group of interest */
1313 /* make it point to the randomly selected entry */
1315 or r10,r10,r11 /* r10 is the entry to invalidate */
1317 isync /* mark the entry invalid */
1319 rldicl r11,r11,56,1 /* clear the valid bit */
1324 clrrdi r11,r11,28 /* Get the esid part of the ste */
1327 2: std r9,8(r10) /* Store the vsid part of the ste */
1330 mfspr r11,SPRN_DAR /* Get the new esid */
1331 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1332 ori r11,r11,0x90 /* Turn on valid and kp */
1333 std r11,0(r10) /* Put new entry back into the stab */
1337 /* All done -- return from exception. */
1338 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1339 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1341 andi. r10,r12,MSR_RI
1344 mtcrf 0x80,r9 /* restore CR */
1352 ld r9,PACA_EXSLB+EX_R9(r13)
1353 ld r10,PACA_EXSLB+EX_R10(r13)
1354 ld r11,PACA_EXSLB+EX_R11(r13)
1355 ld r12,PACA_EXSLB+EX_R12(r13)
1356 ld r13,PACA_EXSLB+EX_R13(r13)
1358 b . /* prevent speculative execution */
1361 * Space for CPU0's segment table.
1363 * On iSeries, the hypervisor must fill in at least one entry before
1364 * we get control (with relocate on). The address is give to the hv
1365 * as a page number (see xLparMap in lpardata.c), so this must be at a
1366 * fixed address (the linker can't compute (u64)&initial_stab >>
1369 . = STAB0_OFFSET /* 0x6000 */
1375 * Data area reserved for FWNMI option.
1376 * This address (0x7000) is fixed by the RPA.
1379 .globl fwnmi_data_area
1382 /* iSeries does not use the FWNMI stuff, so it is safe to put
1383 * this here, even if we later allow kernels that will boot on
1384 * both pSeries and iSeries */
1385 #ifdef CONFIG_PPC_ISERIES
1387 #include "lparmap.s"
1389 * This ".text" is here for old compilers that generate a trailing
1390 * .note section when compiling .c files to .s
1393 #endif /* CONFIG_PPC_ISERIES */
1398 * On pSeries, secondary processors spin in the following code.
1399 * At entry, r3 = this processor's number (physical cpu id)
1401 _GLOBAL(pSeries_secondary_smp_init)
1404 /* turn on 64-bit mode */
1408 /* Copy some CPU settings from CPU 0 */
1409 bl .__restore_cpu_setup
1411 /* Set up a paca value for this processor. Since we have the
1412 * physical cpu id in r24, we need to search the pacas to find
1413 * which logical id maps to our physical one.
1415 LOADADDR(r13, paca) /* Get base vaddr of paca array */
1416 li r5,0 /* logical cpu id */
1417 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
1418 cmpw r6,r24 /* Compare to our id */
1420 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
1425 mr r3,r24 /* not found, copy phys to r3 */
1426 b .kexec_wait /* next kernel might do better */
1428 2: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1429 /* From now on, r24 is expected to be logical cpuid */
1432 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
1436 /* Create a temp kernel stack for use before relocation is on. */
1437 ld r1,PACAEMERGSP(r13)
1438 subi r1,r1,STACK_FRAME_OVERHEAD
1442 bne .__secondary_start
1444 b 3b /* Loop until told to go */
1446 #ifdef CONFIG_PPC_ISERIES
1447 _STATIC(__start_initialization_iSeries)
1448 /* Clear out the BSS */
1449 LOADADDR(r11,__bss_stop)
1450 LOADADDR(r8,__bss_start)
1451 sub r11,r11,r8 /* bss size */
1452 addi r11,r11,7 /* round up to an even double word */
1453 rldicl. r11,r11,61,3 /* shift right by 3 */
1457 mtctr r11 /* zero this many doublewords */
1461 LOADADDR(r1,init_thread_union)
1462 addi r1,r1,THREAD_SIZE
1464 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1466 LOADADDR(r3,cpu_specs)
1467 LOADADDR(r4,cur_cpu_spec)
1471 LOADADDR(r2,__toc_start)
1475 bl .iSeries_early_setup
1478 /* relocation is on at this point */
1480 b .start_here_common
1481 #endif /* CONFIG_PPC_ISERIES */
1483 #ifdef CONFIG_PPC_MULTIPLATFORM
1487 andi. r0,r3,MSR_IR|MSR_DR
1494 b . /* prevent speculative execution */
1498 * Here is our main kernel entry point. We support currently 2 kind of entries
1499 * depending on the value of r5.
1501 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1504 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1505 * DT block, r4 is a physical pointer to the kernel itself
1508 _GLOBAL(__start_initialization_multiplatform)
1509 #ifdef CONFIG_PPC_MULTIPLATFORM
1511 * Are we booted from a PROM Of-type client-interface ?
1514 bne .__boot_from_prom /* yes -> prom */
1517 /* Save parameters */
1521 /* Make sure we are running in 64 bits mode */
1524 /* Setup some critical 970 SPRs before switching MMU off */
1525 bl .__970_cpu_preinit
1530 /* Switch off MMU if not already */
1531 LOADADDR(r4, .__after_prom_start - KERNELBASE)
1534 b .__after_prom_start
1536 #ifdef CONFIG_PPC_MULTIPLATFORM
1537 _STATIC(__boot_from_prom)
1538 /* Save parameters */
1545 /* Make sure we are running in 64 bits mode */
1548 /* put a relocation offset into r3 */
1551 LOADADDR(r2,__toc_start)
1555 /* Relocate the TOC from a virt addr to a real addr */
1558 /* Restore parameters */
1565 /* Do all of the interaction with OF client interface */
1567 /* We never return */
1572 * At this point, r3 contains the physical address we are running at,
1573 * returned by prom_init()
1575 _STATIC(__after_prom_start)
1578 * We need to run with __start at physical address PHYSICAL_START.
1579 * This will leave some code in the first 256B of
1580 * real memory, which are reserved for software use.
1581 * The remainder of the first page is loaded with the fixed
1582 * interrupt vectors. The next two pages are filled with
1583 * unknown exception placeholders.
1585 * Note: This process overwrites the OF exception vectors.
1586 * r26 == relocation offset
1591 SET_REG_TO_CONST(r27,KERNELBASE)
1593 LOADADDR(r3, PHYSICAL_START) /* target addr */
1595 // XXX FIXME: Use phys returned by OF (r30)
1596 add r4,r27,r26 /* source addr */
1597 /* current address of _start */
1598 /* i.e. where we are running */
1599 /* the source addr */
1601 LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
1604 li r6,0x100 /* Start offset, the first 0x100 */
1605 /* bytes were copied earlier. */
1607 bl .copy_and_flush /* copy the first n bytes */
1608 /* this includes the code being */
1609 /* executed here. */
1611 LOADADDR(r0, 4f) /* Jump to the copy of this code */
1612 mtctr r0 /* that we just made/relocated */
1615 4: LOADADDR(r5,klimit)
1617 ld r5,0(r5) /* get the value of klimit */
1619 bl .copy_and_flush /* copy the rest */
1620 b .start_here_multiplatform
1622 #endif /* CONFIG_PPC_MULTIPLATFORM */
1625 * Copy routine used to copy the kernel to start at physical address 0
1626 * and flush and invalidate the caches as needed.
1627 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1628 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1630 * Note: this routine *only* clobbers r0, r6 and lr
1632 _GLOBAL(copy_and_flush)
1635 4: li r0,16 /* Use the least common */
1636 /* denominator cache line */
1637 /* size. This results in */
1638 /* extra cache line flushes */
1639 /* but operation is correct. */
1640 /* Can't get cache line size */
1641 /* from NACA as it is being */
1644 mtctr r0 /* put # words/line in ctr */
1645 3: addi r6,r6,8 /* copy a cache line */
1649 dcbst r6,r3 /* write it to memory */
1651 icbi r6,r3 /* flush the icache line */
1663 #ifdef CONFIG_PPC_PMAC
1665 * On PowerMac, secondary processors starts from the reset vector, which
1666 * is temporarily turned into a call to one of the functions below.
1671 .globl __secondary_start_pmac_0
1672 __secondary_start_pmac_0:
1673 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
1683 _GLOBAL(pmac_secondary_start)
1684 /* turn on 64-bit mode */
1688 /* Copy some CPU settings from CPU 0 */
1689 bl .__restore_cpu_setup
1691 /* pSeries do that early though I don't think we really need it */
1694 mtmsrd r3 /* RI on */
1696 /* Set up a paca value for this processor. */
1697 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1698 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1699 add r13,r13,r4 /* for this processor. */
1700 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1702 /* Create a temp kernel stack for use before relocation is on. */
1703 ld r1,PACAEMERGSP(r13)
1704 subi r1,r1,STACK_FRAME_OVERHEAD
1706 b .__secondary_start
1708 #endif /* CONFIG_PPC_PMAC */
1711 * This function is called after the master CPU has released the
1712 * secondary processors. The execution environment is relocation off.
1713 * The paca for this processor has the following fields initialized at
1715 * 1. Processor number
1716 * 2. Segment table pointer (virtual address)
1717 * On entry the following are set:
1718 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
1719 * r24 = cpu# (in Linux terms)
1720 * r13 = paca virtual address
1721 * SPRG3 = paca virtual address
1723 _GLOBAL(__secondary_start)
1724 /* Set thread priority to MEDIUM */
1730 /* Do early setup for that CPU (stab, slb, hash table pointer) */
1731 bl .early_setup_secondary
1733 /* Initialize the kernel stack. Just a repeat for iSeries. */
1734 LOADADDR(r3,current_set)
1735 sldi r28,r24,3 /* get current_set[cpu#] */
1737 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1738 std r1,PACAKSAVE(r13)
1740 /* Clear backchain so we get nice backtraces */
1744 /* enable MMU and jump to start_secondary */
1745 LOADADDR(r3,.start_secondary_prolog)
1746 SET_REG_TO_CONST(r4, MSR_KERNEL)
1747 #ifdef DO_SOFT_DISABLE
1753 b . /* prevent speculative execution */
1756 * Running with relocation on at this point. All we want to do is
1757 * zero the stack back-chain pointer before going into C code.
1759 _GLOBAL(start_secondary_prolog)
1761 std r3,0(r1) /* Zero the stack frame pointer */
1767 * This subroutine clobbers r11 and r12
1769 _GLOBAL(enable_64b_mode)
1770 mfmsr r11 /* grab the current MSR */
1772 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1775 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1781 #ifdef CONFIG_PPC_MULTIPLATFORM
1783 * This is where the main kernel code starts.
1785 _STATIC(start_here_multiplatform)
1786 /* get a new offset, now that the kernel has moved. */
1790 /* Clear out the BSS. It may have been done in prom_init,
1791 * already but that's irrelevant since prom_init will soon
1792 * be detached from the kernel completely. Besides, we need
1793 * to clear it now for kexec-style entry.
1795 LOADADDR(r11,__bss_stop)
1796 LOADADDR(r8,__bss_start)
1797 sub r11,r11,r8 /* bss size */
1798 addi r11,r11,7 /* round up to an even double word */
1799 rldicl. r11,r11,61,3 /* shift right by 3 */
1803 mtctr r11 /* zero this many doublewords */
1810 mtmsrd r6 /* RI on */
1813 /* Start up the second thread on cpu 0 */
1816 cmpwi r3,0x34 /* Pulsar */
1818 cmpwi r3,0x36 /* Icestar */
1820 cmpwi r3,0x37 /* SStar */
1822 b 91f /* HMT not supported */
1824 bl .hmt_start_secondary
1828 /* The following gets the stack and TOC set up with the regs */
1829 /* pointing to the real addr of the kernel stack. This is */
1830 /* all done to support the C function call below which sets */
1831 /* up the htab. This is done because we have relocated the */
1832 /* kernel but are still running in real mode. */
1834 LOADADDR(r3,init_thread_union)
1837 /* set up a stack pointer (physical address) */
1838 addi r1,r3,THREAD_SIZE
1840 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1842 /* set up the TOC (physical address) */
1843 LOADADDR(r2,__toc_start)
1848 LOADADDR(r3,cpu_specs)
1850 LOADADDR(r4,cur_cpu_spec)
1855 /* Save some low level config HIDs of CPU0 to be copied to
1856 * other CPUs later on, or used for suspend/resume
1858 bl .__save_cpu_setup
1861 /* Setup a valid physical PACA pointer in SPRG3 for early_setup
1862 * note that boot_cpuid can always be 0 nowadays since there is
1863 * nowhere it can be initialized differently before we reach this
1866 LOADADDR(r27, boot_cpuid)
1870 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1871 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1872 add r13,r13,r24 /* for this processor. */
1873 add r13,r13,r26 /* convert to physical addr */
1874 mtspr SPRN_SPRG3,r13
1876 /* Do very early kernel initializations, including initial hash table,
1877 * stab and slb setup before we turn on relocation. */
1879 /* Restore parameters passed from prom_init/kexec */
1883 LOADADDR(r3,.start_here_common)
1884 SET_REG_TO_CONST(r4, MSR_KERNEL)
1888 b . /* prevent speculative execution */
1889 #endif /* CONFIG_PPC_MULTIPLATFORM */
1891 /* This is where all platforms converge execution */
1892 _STATIC(start_here_common)
1893 /* relocation is on at this point */
1895 /* The following code sets up the SP and TOC now that we are */
1896 /* running with translation enabled. */
1898 LOADADDR(r3,init_thread_union)
1900 /* set up the stack */
1901 addi r1,r3,THREAD_SIZE
1903 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1905 /* Apply the CPUs-specific fixups (nop out sections not relevant
1909 bl .do_cpu_ftr_fixups
1911 LOADADDR(r26, boot_cpuid)
1914 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1915 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
1916 add r13,r13,r24 /* for this processor. */
1917 mtspr SPRN_SPRG3,r13
1919 /* ptr to current */
1920 LOADADDR(r4,init_task)
1921 std r4,PACACURRENT(r13)
1925 std r1,PACAKSAVE(r13)
1929 /* Load up the kernel context */
1931 #ifdef DO_SOFT_DISABLE
1933 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
1935 ori r5,r5,MSR_EE /* Hard Enabled */
1943 LOADADDR(r5, hmt_thread_data)
1946 cmpwi r7,0x34 /* Pulsar */
1948 cmpwi r7,0x36 /* Icestar */
1950 cmpwi r7,0x37 /* SStar */
1953 90: mfspr r6,SPRN_PIR
1956 91: mfspr r6,SPRN_PIR
1960 bl .hmt_start_secondary
1963 __hmt_secondary_hold:
1964 LOADADDR(r5, hmt_thread_data)
1974 93: andi. r6,r6,0x3f
1988 b .pSeries_secondary_smp_init
1991 _GLOBAL(hmt_start_secondary)
1992 LOADADDR(r4,__hmt_secondary_hold)
1994 mtspr SPRN_NIADORM, r4
1995 mfspr r4, SPRN_MSRDORM
1998 mtspr SPRN_MSRDORM, r4
2007 mfspr r4, SPRN_CTRLF
2009 mtspr SPRN_CTRLT, r4
2014 * We put a few things here that have to be page-aligned.
2015 * This stuff goes at the beginning of the bss, which is page-aligned.
2021 .globl empty_zero_page
2025 .globl swapper_pg_dir
2030 * This space gets a copy of optional info passed to us by the bootstrap
2031 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
2035 .space COMMAND_LINE_SIZE