2 * linux/arch/arm/kernel/entry-common.S
4 * Copyright (C) 2000 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/config.h>
12 #include <asm/unistd.h>
14 #include "entry-header.S"
19 * This is the fast syscall return path. We do as little as
20 * possible here, and this includes saving r0 back into the SVC
24 disable_irq @ disable interrupts
25 ldr r1, [tsk, #TI_FLAGS]
26 tst r1, #_TIF_WORK_MASK
29 @ fast_restore_user_regs
30 ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
31 ldr lr, [sp, #S_OFF + S_PC]! @ get pc
32 msr spsr_cxsf, r1 @ save in spsr_svc
33 ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
35 add sp, sp, #S_FRAME_SIZE - S_PC
36 movs pc, lr @ return & move spsr_svc into cpsr
39 * Ok, we need to do extra processing, enter the slow path.
42 str r0, [sp, #S_R0+S_OFF]! @ returned r0
44 tst r1, #_TIF_NEED_RESCHED
46 tst r1, #_TIF_NOTIFY_RESUME | _TIF_SIGPENDING
49 mov r2, why @ 'syscall'
51 b ret_slow_syscall @ Check work again
56 * "slow" syscall return path. "why" tells us if this was a real syscall.
60 disable_irq @ disable interrupts
61 ldr r1, [tsk, #TI_FLAGS]
62 tst r1, #_TIF_WORK_MASK
65 @ slow_restore_user_regs
66 ldr r1, [sp, #S_PSR] @ get calling cpsr
67 ldr lr, [sp, #S_PC]! @ get pc
68 msr spsr_cxsf, r1 @ save in spsr_svc
69 ldmdb sp, {r0 - lr}^ @ get calling r1 - lr
71 add sp, sp, #S_FRAME_SIZE - S_PC
72 movs pc, lr @ return & move spsr_svc into cpsr
75 * This is how we return from a fork.
80 ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
82 tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
85 mov r0, #1 @ trace exit [IP = 1]
92 /*=============================================================================
94 *-----------------------------------------------------------------------------
97 /* If we're optimising for StrongARM the resulting code won't
98 run on an ARM7 and we can save a couple of instructions.
100 #ifdef CONFIG_CPU_ARM710
101 #define A710(code...) code
103 ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
105 add sp, sp, #S_FRAME_SIZE
108 #define A710(code...)
113 sub sp, sp, #S_FRAME_SIZE
114 stmia sp, {r0 - r12} @ Calling r0 - r12
116 stmdb r8, {sp, lr}^ @ Calling sp, lr
117 mrs r8, spsr @ called from non-FIQ mode, so ok.
118 str lr, [sp, #S_PC] @ Save calling PC
119 str r8, [sp, #S_PSR] @ Save CPSR
120 str r0, [sp, #S_OLD_R0] @ Save OLD_R0
124 * Get the system call number.
126 #if defined(CONFIG_AEABI)
128 @ syscall number is in scno (r7) already.
130 A710( ldr ip, [lr, #-4] @ get SWI instruction )
131 A710( and ip, ip, #0x0f000000 @ check for SWI )
132 A710( teq ip, #0x0f000000 )
133 A710( bne .Larm710bug )
134 #elif defined(CONFIG_ARM_THUMB)
135 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
136 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
137 ldreq scno, [lr, #-4]
139 ldr scno, [lr, #-4] @ get SWI instruction
140 A710( and ip, scno, #0x0f000000 @ check for SWI )
141 A710( teq ip, #0x0f000000 )
142 A710( bne .Larm710bug )
145 #ifdef CONFIG_ALIGNMENT_TRAP
146 ldr ip, __cr_alignment
148 mcr p15, 0, ip, c1, c0 @ update control register
153 ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing
155 bic scno, scno, #0xff000000 @ mask off SWI op-code
156 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
158 adr tbl, sys_call_table @ load syscall table pointer
159 stmdb sp!, {r4, r5} @ push fifth and sixth args
160 tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
163 cmp scno, #NR_syscalls @ check upper syscall limit
164 adr lr, ret_fast_syscall @ return address
165 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
168 2: mov why, #0 @ no longer a real syscall
169 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
170 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
172 b sys_ni_syscall @ not private func
175 * This is the really slow path. We're going to be doing
176 * context switches, and waiting for our parent to respond.
180 mov r0, #0 @ trace entry [IP = 0]
183 adr lr, __sys_trace_return @ return address
184 add r1, sp, #S_R0 + S_OFF @ pointer to regs
185 cmp scno, #NR_syscalls @ check upper syscall limit
186 ldmccia r1, {r0 - r3} @ have to reload r0 - r3
187 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
191 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
193 mov r0, #1 @ trace exit [IP = 1]
198 #ifdef CONFIG_ALIGNMENT_TRAP
199 .type __cr_alignment, #object
204 .type sys_call_table, #object
205 ENTRY(sys_call_table)
208 /*============================================================================
209 * Special system call wrappers
211 @ r0 = syscall number
213 .type sys_syscall, #function
216 eor scno, r0, #__NR_SYSCALL_BASE
217 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
218 cmpne scno, #NR_syscalls @ check range
219 stmloia sp, {r5, r6} @ shuffle args
224 ldrlo pc, [tbl, scno, lsl #2]
245 sys_sigsuspend_wrapper:
249 sys_rt_sigsuspend_wrapper:
253 sys_sigreturn_wrapper:
257 sys_rt_sigreturn_wrapper:
261 sys_sigaltstack_wrapper:
262 ldr r2, [sp, #S_OFF + S_SP]
265 sys_statfs64_wrapper:
270 sys_fstatfs64_wrapper:
276 * Note: off_4k (r5) is always units of 4K. If we can't do the requested
277 * offset, we return EINVAL.
282 moveq r5, r5, lsr #PAGE_SHIFT - 12
292 #ifdef CONFIG_OABI_COMPAT
294 * These are syscalls with argument register differences
310 sys_oabi_ftruncate64: