3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
21 #include <linux/errno.h>
22 #include <asm/unistd.h>
23 #include <asm/processor.h>
26 #include <asm/thread_info.h>
27 #include <asm/ppc_asm.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/cputable.h>
30 #include <asm/firmware.h>
32 #include <asm/ptrace.h>
39 .tc .sys_call_table[TC],.sys_call_table
41 /* This value is used to mark exception frames on the stack. */
43 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
50 .globl system_call_common
54 addi r1,r1,-INT_FRAME_SIZE
63 ACCOUNT_CPU_USER_ENTRY(r10, r11)
89 addi r9,r1,STACK_FRAME_OVERHEAD
90 ld r11,exception_marker@toc(r2)
91 std r11,-16(r9) /* "regshere" marker */
93 stb r10,PACASOFTIRQEN(r13)
94 stb r10,PACAHARDIRQEN(r13)
96 #ifdef CONFIG_PPC_ISERIES
98 /* Hack for handling interrupts when soft-enabling on iSeries */
99 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
100 andi. r10,r12,MSR_PR /* from kernel */
101 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
103 b hardware_interrupt_entry
105 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
116 addi r9,r1,STACK_FRAME_OVERHEAD
118 clrrdi r11,r1,THREAD_SHIFT
120 andi. r11,r10,_TIF_SYSCALL_T_OR_A
122 syscall_dotrace_cont:
123 cmpldi 0,r0,NR_syscalls
126 system_call: /* label this so stack traces look sane */
128 * Need to vector to 32 Bit or default sys_call_table here,
129 * based on caller's run-mode / personality.
131 ld r11,.SYS_CALL_TABLE@toc(2)
132 andi. r10,r10,_TIF_32BIT
134 addi r11,r11,8 /* use 32-bit syscall entries */
143 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
145 bctrl /* Call handler */
150 bl .do_show_syscall_exit
153 clrrdi r12,r1,THREAD_SHIFT
155 /* disable interrupts so current_thread_info()->flags can't change,
156 and so that we don't get interrupted after loading SRR0/1. */
166 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
167 bne- syscall_exit_work
173 stdcx. r0,0,r1 /* to clear the reservation */
177 * Clear RI before restoring r13. If we are returning to
178 * userspace and we take an exception after restoring r13,
179 * we end up corrupting the userspace r13 value.
183 mtmsrd r11,1 /* clear MSR.RI */
185 ACCOUNT_CPU_USER_EXIT(r11, r12)
186 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
194 b . /* prevent speculative execution */
197 oris r5,r5,0x1000 /* Set SO bit in CR */
202 /* Traced system call support */
205 addi r3,r1,STACK_FRAME_OVERHEAD
206 bl .do_syscall_trace_enter
207 ld r0,GPR0(r1) /* Restore original registers */
214 addi r9,r1,STACK_FRAME_OVERHEAD
215 clrrdi r10,r1,THREAD_SHIFT
217 b syscall_dotrace_cont
224 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
225 If TIF_NOERROR is set, just save r3 as it is. */
227 andi. r0,r9,_TIF_RESTOREALL
231 0: cmpld r3,r11 /* r10 is -LAST_ERRNO */
233 andi. r0,r9,_TIF_NOERROR
237 oris r5,r5,0x1000 /* Set SO bit in CR */
240 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
243 /* Clear per-syscall TIF flags if any are set. */
245 li r11,_TIF_PERSYSCALL_MASK
246 addi r12,r12,TI_FLAGS
251 subi r12,r12,TI_FLAGS
253 4: /* Anything else left to do? */
254 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
255 beq .ret_from_except_lite
257 /* Re-enable interrupts */
263 addi r3,r1,STACK_FRAME_OVERHEAD
264 bl .do_syscall_trace_leave
267 /* Save non-volatile GPRs, if not already saved. */
279 * The sigsuspend and rt_sigsuspend system calls can call do_signal
280 * and thus put the process into the stopped state where we might
281 * want to examine its user state with ptrace. Therefore we need
282 * to save all the nonvolatile registers (r14 - r31) before calling
283 * the C code. Similarly, fork, vfork and clone need the full
284 * register state on the stack so that it can be copied to the child.
302 _GLOBAL(ppc32_swapcontext)
304 bl .compat_sys_swapcontext
307 _GLOBAL(ppc64_swapcontext)
312 _GLOBAL(ret_from_fork)
319 * This routine switches between two different tasks. The process
320 * state of one is saved on its kernel stack. Then the state
321 * of the other is restored from its kernel stack. The memory
322 * management hardware is updated to the second process's state.
323 * Finally, we can return to the second process, via ret_from_except.
324 * On entry, r3 points to the THREAD for the current task, r4
325 * points to the THREAD for the new task.
327 * Note: there are two ways to get to the "going out" portion
328 * of this code; either by coming in via the entry (_switch)
329 * or via "fork" which must set up an environment equivalent
330 * to the "_switch" path. If you change this you'll have to change
331 * the fork code also.
333 * The code which creates the new task context is in 'copy_thread'
334 * in arch/powerpc/kernel/process.c
340 stdu r1,-SWITCH_FRAME_SIZE(r1)
341 /* r3-r13 are caller saved -- Cort */
344 mflr r20 /* Return to switch caller */
347 #ifdef CONFIG_ALTIVEC
349 oris r0,r0,MSR_VEC@h /* Disable altivec */
350 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
351 std r24,THREAD_VRSAVE(r3)
352 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
353 #endif /* CONFIG_ALTIVEC */
362 std r1,KSP(r3) /* Set old stack pointer */
365 /* We need a sync somewhere here to make sure that if the
366 * previous task gets rescheduled on another CPU, it sees all
367 * stores it has performed on this one.
370 #endif /* CONFIG_SMP */
372 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
373 std r6,PACACURRENT(r13) /* Set new 'current' */
375 ld r8,KSP(r4) /* new stack pointer */
378 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
380 clrrdi r6,r8,28 /* get its ESID */
381 clrrdi r9,r1,28 /* get current sp ESID */
382 END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
384 clrrdi r6,r8,40 /* get its 1T ESID */
385 clrrdi r9,r1,40 /* get current sp 1T ESID */
386 END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
387 clrldi. r0,r6,2 /* is new ESID c00000000? */
388 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
390 beq 2f /* if yes, don't slbie it */
392 /* Bolt in the new stack SLB entry */
393 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
394 oris r0,r6,(SLB_ESID_V)@h
395 ori r0,r0,(SLB_NUM_BOLTED-1)@l
397 li r9,MMU_SEGSIZE_1T /* insert B field */
398 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
399 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
400 END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
402 /* Update the last bolted SLB. No write barriers are needed
403 * here, provided we only update the current CPU's SLB shadow
406 ld r9,PACA_SLBSHADOWPTR(r13)
408 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
409 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
410 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
412 /* No need to check for CPU_FTR_NO_SLBIE_B here, since when
413 * we have 1TB segments, the only CPUs known to have the errata
414 * only support less than 1TB of system memory and we'll never
415 * actually hit this code path.
419 slbie r6 /* Workaround POWER5 < DD2.1 issue */
424 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
425 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
426 because we don't need to leave the 288-byte ABI gap at the
427 top of the kernel stack. */
428 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
430 mr r1,r8 /* start using new stack pointer */
431 std r7,PACAKSAVE(r13)
436 #ifdef CONFIG_ALTIVEC
438 ld r0,THREAD_VRSAVE(r4)
439 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
440 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
441 #endif /* CONFIG_ALTIVEC */
443 /* r3-r13 are destroyed -- Cort */
447 /* convert old thread to its task_struct for return value */
449 ld r7,_NIP(r1) /* Return to _switch caller in new task */
451 addi r1,r1,SWITCH_FRAME_SIZE
455 _GLOBAL(ret_from_except)
458 bne .ret_from_except_lite
461 _GLOBAL(ret_from_except_lite)
463 * Disable interrupts so that current_thread_info()->flags
464 * can't change between when we test it and when we return
465 * from the interrupt.
467 mfmsr r10 /* Get current interrupt state */
468 rldicl r9,r10,48,1 /* clear MSR_EE */
470 mtmsrd r9,1 /* Update machine state */
472 #ifdef CONFIG_PREEMPT
473 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
474 li r0,_TIF_NEED_RESCHED /* bits to check */
477 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
478 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
479 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
482 #else /* !CONFIG_PREEMPT */
483 ld r3,_MSR(r1) /* Returning to user mode? */
485 beq restore /* if not, just restore regs and return */
487 /* Check current_thread_info()->flags */
488 clrrdi r9,r1,THREAD_SHIFT
490 andi. r0,r4,_TIF_USER_WORK_MASK
496 #ifdef CONFIG_PPC_ISERIES
500 /* Check for pending interrupts (iSeries) */
501 ld r3,PACALPPACAPTR(r13)
502 ld r3,LPPACAANYINT(r3)
504 beq+ 4f /* skip do_IRQ if no interrupts */
507 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
509 mtmsrd r10 /* hard-enable again */
510 addi r3,r1,STACK_FRAME_OVERHEAD
512 b .ret_from_except_lite /* loop back and handle more */
514 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
516 stb r5,PACASOFTIRQEN(r13)
518 /* extract EE bit and use it to restore paca->hard_enabled */
520 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
521 stb r4,PACAHARDIRQEN(r13)
535 stdcx. r0,0,r1 /* to clear the reservation */
538 * Clear RI before restoring r13. If we are returning to
539 * userspace and we take an exception after restoring r13,
540 * we end up corrupting the userspace r13 value.
543 andc r4,r4,r0 /* r0 contains MSR_RI here */
547 * r13 is our per cpu area, only restore it if we are returning to
552 ACCOUNT_CPU_USER_EXIT(r2, r4)
569 b . /* prevent speculative execution */
572 #ifdef CONFIG_PREEMPT
573 andi. r0,r3,MSR_PR /* Returning to user mode? */
575 /* Check that preempt_count() == 0 and interrupts are enabled */
576 lwz r8,TI_PREEMPT(r9)
580 crandc eq,cr1*4+eq,eq
582 /* here we are preempting the current task */
585 stb r0,PACASOFTIRQEN(r13)
586 stb r0,PACAHARDIRQEN(r13)
588 mtmsrd r10,1 /* reenable interrupts */
591 clrrdi r9,r1,THREAD_SHIFT
592 rldicl r10,r10,48,1 /* disable interrupts again */
596 andi. r0,r4,_TIF_NEED_RESCHED
602 /* Enable interrupts */
606 andi. r0,r4,_TIF_NEED_RESCHED
609 b .ret_from_except_lite
613 addi r4,r1,STACK_FRAME_OVERHEAD
618 addi r3,r1,STACK_FRAME_OVERHEAD
619 bl .unrecoverable_exception
622 #ifdef CONFIG_PPC_RTAS
624 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
625 * called with the MMU off.
627 * In addition, we need to be in 32b mode, at least for now.
629 * Note: r3 is an input parameter to rtas, so don't trash it...
634 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
636 /* Because RTAS is running in 32b mode, it clobbers the high order half
637 * of all registers that it saves. We therefore save those registers
638 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
640 SAVE_GPR(2, r1) /* Save the TOC */
641 SAVE_GPR(13, r1) /* Save paca */
642 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
643 SAVE_10GPRS(22, r1) /* ditto */
660 /* Temporary workaround to clear CR until RTAS can be modified to
667 /* There is no way it is acceptable to get here with interrupts enabled,
668 * check it with the asm equivalent of WARN_ON
670 lbz r0,PACASOFTIRQEN(r13)
672 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
675 /* Hard-disable interrupts */
681 /* Unfortunately, the stack pointer and the MSR are also clobbered,
682 * so they are saved in the PACA which allows us to restore
683 * our original state after RTAS returns.
686 std r6,PACASAVEDMSR(r13)
688 /* Setup our real return addr */
689 LOAD_REG_ADDR(r4,.rtas_return_loc)
690 clrldi r4,r4,2 /* convert to realmode address */
694 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
698 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
699 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
702 sync /* disable interrupts so SRR0/1 */
703 mtmsrd r0 /* don't get trashed */
705 LOAD_REG_ADDR(r4, rtas)
706 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
707 ld r4,RTASBASE(r4) /* get the rtas->base value */
712 b . /* prevent speculative execution */
714 _STATIC(rtas_return_loc)
715 /* relocation is off at this point */
716 mfspr r4,SPRN_SPRG3 /* Get PACA */
717 clrldi r4,r4,2 /* convert to realmode address */
725 ld r1,PACAR1(r4) /* Restore our SP */
726 LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
727 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
732 b . /* prevent speculative execution */
734 _STATIC(rtas_restore_regs)
735 /* relocation is on at this point */
736 REST_GPR(2, r1) /* Restore the TOC */
737 REST_GPR(13, r1) /* Restore paca */
738 REST_8GPRS(14, r1) /* Restore the non-volatiles */
739 REST_10GPRS(22, r1) /* ditto */
758 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
759 ld r0,16(r1) /* get return address */
762 blr /* return to caller */
764 #endif /* CONFIG_PPC_RTAS */
769 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
771 /* Because PROM is running in 32b mode, it clobbers the high order half
772 * of all registers that it saves. We therefore save those registers
773 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
796 /* Get the PROM entrypoint */
800 /* Switch MSR to 32 bits mode
804 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
807 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
812 /* Restore arguments & enter PROM here... */
816 /* Just make sure that r1 top 32 bits didn't get
821 /* Restore the MSR (back to 64 bits) */
826 /* Restore other registers */
846 addi r1,r1,PROM_FRAME_SIZE