1 #ifndef _ARCH_POWERPC_UACCESS_H
2 #define _ARCH_POWERPC_UACCESS_H
7 #include <linux/sched.h>
8 #include <linux/errno.h>
9 #include <asm/processor.h>
12 #define VERIFY_WRITE 1
15 * The fs value determines whether argument validity checking should be
16 * performed or not. If get_fs() == USER_DS, checking is performed, with
17 * get_fs() == KERNEL_DS, checking is bypassed.
19 * For historical reasons, these macros are grossly misnamed.
21 * The fs/ds values are now the highest legal address in the "segment".
22 * This simplifies the checking in the routines below.
25 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
27 #define KERNEL_DS MAKE_MM_SEG(~0UL)
29 /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
30 #define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
32 #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
35 #define get_ds() (KERNEL_DS)
36 #define get_fs() (current->thread.fs)
37 #define set_fs(val) (current->thread.fs = (val))
39 #define segment_eq(a, b) ((a).seg == (b).seg)
43 * This check is sufficient because there is a large enough
44 * gap between user addresses and the kernel addresses
46 #define __access_ok(addr, size, segment) \
47 (((addr) <= (segment).seg) && ((size) <= (segment).seg))
51 #define __access_ok(addr, size, segment) \
52 (((addr) <= (segment).seg) && \
53 (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr)))))
57 #define access_ok(type, addr, size) \
58 (__chk_user_ptr(addr), \
59 __access_ok((__force unsigned long)(addr), (size), get_fs()))
62 * The exception table consists of pairs of addresses: the first is the
63 * address of an instruction that is allowed to fault, and the second is
64 * the address at which the program should continue. No registers are
65 * modified, so it is entirely up to the continuation code to figure out
68 * All the routines below use bits of fixup code that are out of line
69 * with the main instruction path. This means when everything is well,
70 * we don't even have to jump over them. Further, they do not intrude
71 * on our cache or tlb entries.
74 struct exception_table_entry {
80 * These are the main single-value transfer routines. They automatically
81 * use the right size if we just have the right pointer type.
83 * This gets kind of ugly. We want to return _two_ values in "get_user()"
84 * and yet we don't want to do any pointers, because that is too much
85 * of a performance impact. Thus we have a few rather ugly macros here,
86 * and hide all the ugliness from the user.
88 * The "__xxx" versions of the user access functions are versions that
89 * do not verify the address space, that must have been done previously
90 * with a separate "access_ok()" call (this is used when we do multiple
91 * accesses to the same area of user memory).
93 * As we use the same address space for kernel and user data on the
94 * PowerPC, we can just do these as direct assignments. (Of course, the
95 * exception handling means that it's no longer "just"...)
97 * The "user64" versions of the user access functions are versions that
98 * allow access of 64-bit data. The "get_user" functions do not
99 * properly handle 64-bit data because the value gets down cast to a long.
100 * The "put_user" functions already handle 64-bit data properly but we add
101 * "user64" versions for completeness
103 #define get_user(x, ptr) \
104 __get_user_check((x), (ptr), sizeof(*(ptr)))
105 #define put_user(x, ptr) \
106 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
108 #define __get_user(x, ptr) \
109 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
110 #define __put_user(x, ptr) \
111 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
112 #ifndef __powerpc64__
113 #define __get_user64(x, ptr) \
114 __get_user64_nocheck((x), (ptr), sizeof(*(ptr)))
115 #define __put_user64(x, ptr) __put_user(x, ptr)
119 #define __get_user_unaligned __get_user
120 #define __put_user_unaligned __put_user
123 extern long __put_user_bad(void);
126 #define __EX_TABLE_ALIGN "3"
127 #define __EX_TABLE_TYPE "llong"
129 #define __EX_TABLE_ALIGN "2"
130 #define __EX_TABLE_TYPE "long"
134 * We don't tell gcc that we are accessing memory, but this is OK
135 * because we do not write to any memory gcc knows about, so there
136 * are no aliasing issues.
138 #define __put_user_asm(x, addr, err, op) \
139 __asm__ __volatile__( \
140 "1: " op " %1,0(%2) # put_user\n" \
142 ".section .fixup,\"ax\"\n" \
146 ".section __ex_table,\"a\"\n" \
147 " .align " __EX_TABLE_ALIGN "\n" \
148 " ."__EX_TABLE_TYPE" 1b,3b\n" \
151 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
154 #define __put_user_asm2(x, ptr, retval) \
155 __put_user_asm(x, ptr, retval, "std")
156 #else /* __powerpc64__ */
157 #define __put_user_asm2(x, addr, err) \
158 __asm__ __volatile__( \
159 "1: stw %1,0(%2)\n" \
160 "2: stw %1+1,4(%2)\n" \
162 ".section .fixup,\"ax\"\n" \
166 ".section __ex_table,\"a\"\n" \
167 " .align " __EX_TABLE_ALIGN "\n" \
168 " ." __EX_TABLE_TYPE " 1b,4b\n" \
169 " ." __EX_TABLE_TYPE " 2b,4b\n" \
172 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
173 #endif /* __powerpc64__ */
175 #define __put_user_size(x, ptr, size, retval) \
179 case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
180 case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
181 case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
182 case 8: __put_user_asm2(x, ptr, retval); break; \
183 default: __put_user_bad(); \
187 #define __put_user_nocheck(x, ptr, size) \
191 __chk_user_ptr(ptr); \
192 __put_user_size((x), (ptr), (size), __pu_err); \
196 #define __put_user_check(x, ptr, size) \
198 long __pu_err = -EFAULT; \
199 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
201 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
202 __put_user_size((x), __pu_addr, (size), __pu_err); \
206 extern long __get_user_bad(void);
208 #define __get_user_asm(x, addr, err, op) \
209 __asm__ __volatile__( \
210 "1: "op" %1,0(%2) # get_user\n" \
212 ".section .fixup,\"ax\"\n" \
217 ".section __ex_table,\"a\"\n" \
218 " .align "__EX_TABLE_ALIGN "\n" \
219 " ." __EX_TABLE_TYPE " 1b,3b\n" \
221 : "=r" (err), "=r" (x) \
222 : "b" (addr), "i" (-EFAULT), "0" (err))
225 #define __get_user_asm2(x, addr, err) \
226 __get_user_asm(x, addr, err, "ld")
227 #else /* __powerpc64__ */
228 #define __get_user_asm2(x, addr, err) \
229 __asm__ __volatile__( \
230 "1: lwz %1,0(%2)\n" \
231 "2: lwz %1+1,4(%2)\n" \
233 ".section .fixup,\"ax\"\n" \
239 ".section __ex_table,\"a\"\n" \
240 " .align " __EX_TABLE_ALIGN "\n" \
241 " ." __EX_TABLE_TYPE " 1b,4b\n" \
242 " ." __EX_TABLE_TYPE " 2b,4b\n" \
244 : "=r" (err), "=&r" (x) \
245 : "b" (addr), "i" (-EFAULT), "0" (err))
246 #endif /* __powerpc64__ */
248 #define __get_user_size(x, ptr, size, retval) \
251 __chk_user_ptr(ptr); \
252 if (size > sizeof(x)) \
253 (x) = __get_user_bad(); \
255 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
256 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
257 case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \
258 case 8: __get_user_asm2(x, ptr, retval); break; \
259 default: (x) = __get_user_bad(); \
263 #define __get_user_nocheck(x, ptr, size) \
266 unsigned long __gu_val; \
267 __chk_user_ptr(ptr); \
269 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
270 (x) = (__typeof__(*(ptr)))__gu_val; \
274 #ifndef __powerpc64__
275 #define __get_user64_nocheck(x, ptr, size) \
278 long long __gu_val; \
279 __chk_user_ptr(ptr); \
281 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
282 (x) = (__typeof__(*(ptr)))__gu_val; \
285 #endif /* __powerpc64__ */
287 #define __get_user_check(x, ptr, size) \
289 long __gu_err = -EFAULT; \
290 unsigned long __gu_val = 0; \
291 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
293 if (access_ok(VERIFY_READ, __gu_addr, (size))) \
294 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
295 (x) = (__typeof__(*(ptr)))__gu_val; \
299 /* more complex routines */
301 extern unsigned long __copy_tofrom_user(void __user *to,
302 const void __user *from, unsigned long size);
304 #ifndef __powerpc64__
306 extern inline unsigned long copy_from_user(void *to,
307 const void __user *from, unsigned long n)
311 if (access_ok(VERIFY_READ, from, n))
312 return __copy_tofrom_user((__force void __user *)to, from, n);
313 if ((unsigned long)from < TASK_SIZE) {
314 over = (unsigned long)from + n - TASK_SIZE;
315 return __copy_tofrom_user((__force void __user *)to, from,
321 extern inline unsigned long copy_to_user(void __user *to,
322 const void *from, unsigned long n)
326 if (access_ok(VERIFY_WRITE, to, n))
327 return __copy_tofrom_user(to, (__force void __user *)from, n);
328 if ((unsigned long)to < TASK_SIZE) {
329 over = (unsigned long)to + n - TASK_SIZE;
330 return __copy_tofrom_user(to, (__force void __user *)from,
336 #define __copy_to_user_inatomic __copy_to_user
337 #define __copy_from_user_inatomic __copy_from_user
339 #else /* __powerpc64__ */
341 #define __copy_in_user(to, from, size) \
342 __copy_tofrom_user((to), (from), (size))
344 extern unsigned long copy_from_user(void *to, const void __user *from,
346 extern unsigned long copy_to_user(void __user *to, const void *from,
348 extern unsigned long copy_in_user(void __user *to, const void __user *from,
351 static inline unsigned long __copy_from_user_inatomic(void *to,
352 const void __user *from, unsigned long n)
354 if (__builtin_constant_p(n) && (n <= 8)) {
359 __get_user_size(*(u8 *)to, from, 1, ret);
362 __get_user_size(*(u16 *)to, from, 2, ret);
365 __get_user_size(*(u32 *)to, from, 4, ret);
368 __get_user_size(*(u64 *)to, from, 8, ret);
371 return (ret == -EFAULT) ? n : 0;
373 return __copy_tofrom_user((__force void __user *) to, from, n);
376 static inline unsigned long __copy_to_user_inatomic(void __user *to,
377 const void *from, unsigned long n)
379 if (__builtin_constant_p(n) && (n <= 8)) {
384 __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
387 __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
390 __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
393 __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
396 return (ret == -EFAULT) ? n : 0;
398 return __copy_tofrom_user(to, (__force const void __user *) from, n);
401 #endif /* __powerpc64__ */
403 static inline unsigned long __copy_from_user(void *to,
404 const void __user *from, unsigned long size)
407 #ifndef __powerpc64__
408 return __copy_tofrom_user((__force void __user *)to, from, size);
409 #else /* __powerpc64__ */
410 return __copy_from_user_inatomic(to, from, size);
411 #endif /* __powerpc64__ */
414 static inline unsigned long __copy_to_user(void __user *to,
415 const void *from, unsigned long size)
418 #ifndef __powerpc64__
419 return __copy_tofrom_user(to, (__force void __user *)from, size);
420 #else /* __powerpc64__ */
421 return __copy_to_user_inatomic(to, from, size);
422 #endif /* __powerpc64__ */
425 extern unsigned long __clear_user(void __user *addr, unsigned long size);
427 static inline unsigned long clear_user(void __user *addr, unsigned long size)
430 if (likely(access_ok(VERIFY_WRITE, addr, size)))
431 return __clear_user(addr, size);
432 #ifndef __powerpc64__
433 if ((unsigned long)addr < TASK_SIZE) {
434 unsigned long over = (unsigned long)addr + size - TASK_SIZE;
435 return __clear_user(addr, size - over) + over;
437 #endif /* __powerpc64__ */
441 extern int __strncpy_from_user(char *dst, const char __user *src, long count);
443 static inline long strncpy_from_user(char *dst, const char __user *src,
447 if (likely(access_ok(VERIFY_READ, src, 1)))
448 return __strncpy_from_user(dst, src, count);
453 * Return the size of a string (including the ending 0)
457 extern int __strnlen_user(const char __user *str, long len, unsigned long top);
460 * Returns the length of the string at str (including the null byte),
461 * or 0 if we hit a page we can't access,
462 * or something > len if we didn't find a null byte.
464 * The `top' parameter to __strnlen_user is to make sure that
465 * we can never overflow from the user area into kernel space.
467 static inline int strnlen_user(const char __user *str, long len)
469 unsigned long top = current->thread.fs.seg;
471 if ((unsigned long)str > top)
473 return __strnlen_user(str, len, top);
476 #define strlen_user(str) strnlen_user((str), 0x7ffffffe)
478 #endif /* __ASSEMBLY__ */
479 #endif /* __KERNEL__ */
481 #endif /* _ARCH_POWERPC_UACCESS_H */