]> err.no Git - linux-2.6/blob - include/asm-x86/uaccess_32.h
x86: use k modifier for 4-byte access.
[linux-2.6] / include / asm-x86 / uaccess_32.h
1 #ifndef __i386_UACCESS_H
2 #define __i386_UACCESS_H
3
4 /*
5  * User space memory access functions
6  */
7 #include <linux/errno.h>
8 #include <linux/thread_info.h>
9 #include <linux/prefetch.h>
10 #include <linux/string.h>
11 #include <asm/asm.h>
12 #include <asm/page.h>
13
14 /*
15  * movsl can be slow when source and dest are not both 8-byte aligned
16  */
17 #ifdef CONFIG_X86_INTEL_USERCOPY
18 extern struct movsl_mask {
19         int mask;
20 } ____cacheline_aligned_in_smp movsl_mask;
21 #endif
22
23 extern void __put_user_bad(void);
24
25 /*
26  * Strange magic calling convention: pointer in %ecx,
27  * value in %eax(:%edx), return value in %eax, no clobbers.
28  */
29 extern void __put_user_1(void);
30 extern void __put_user_2(void);
31 extern void __put_user_4(void);
32 extern void __put_user_8(void);
33
34 #define __put_user_x(size, x, ptr)                              \
35         asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
36                      :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
37
38 #define __put_user_8(x, ptr)                                    \
39         asm volatile("call __put_user_8" : "=a" (__ret_pu)      \
40                      : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
41
42
43 /**
44  * put_user: - Write a simple value into user space.
45  * @x:   Value to copy to user space.
46  * @ptr: Destination address, in user space.
47  *
48  * Context: User context only.  This function may sleep.
49  *
50  * This macro copies a single simple value from kernel space to user
51  * space.  It supports simple types like char and int, but not larger
52  * data types like structures or arrays.
53  *
54  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
55  * to the result of dereferencing @ptr.
56  *
57  * Returns zero on success, or -EFAULT on error.
58  */
59 #ifdef CONFIG_X86_WP_WORKS_OK
60
61 #define put_user(x, ptr)                                        \
62 ({                                                              \
63         int __ret_pu;                                           \
64         __typeof__(*(ptr)) __pu_val;                            \
65         __chk_user_ptr(ptr);                                    \
66         __pu_val = x;                                           \
67         switch (sizeof(*(ptr))) {                               \
68         case 1:                                                 \
69                 __put_user_x(1, __pu_val, ptr);                 \
70                 break;                                          \
71         case 2:                                                 \
72                 __put_user_x(2, __pu_val, ptr);                 \
73                 break;                                          \
74         case 4:                                                 \
75                 __put_user_x(4, __pu_val, ptr);                 \
76                 break;                                          \
77         case 8:                                                 \
78                 __put_user_8(__pu_val, ptr);                    \
79                 break;                                          \
80         default:                                                \
81                 __put_user_x(X, __pu_val, ptr);                 \
82                 break;                                          \
83         }                                                       \
84         __ret_pu;                                               \
85 })
86
87 #else
88 #define put_user(x, ptr)                                        \
89 ({                                                              \
90         int __ret_pu;                                           \
91         __typeof__(*(ptr))__pus_tmp = x;                        \
92         __ret_pu = 0;                                           \
93         if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp,         \
94                                        sizeof(*(ptr))) != 0))   \
95                 __ret_pu = -EFAULT;                             \
96         __ret_pu;                                               \
97 })
98
99
100 #endif
101
102 /**
103  * __get_user: - Get a simple variable from user space, with less checking.
104  * @x:   Variable to store result.
105  * @ptr: Source address, in user space.
106  *
107  * Context: User context only.  This function may sleep.
108  *
109  * This macro copies a single simple variable from user space to kernel
110  * space.  It supports simple types like char and int, but not larger
111  * data types like structures or arrays.
112  *
113  * @ptr must have pointer-to-simple-variable type, and the result of
114  * dereferencing @ptr must be assignable to @x without a cast.
115  *
116  * Caller must check the pointer with access_ok() before calling this
117  * function.
118  *
119  * Returns zero on success, or -EFAULT on error.
120  * On error, the variable @x is set to zero.
121  */
122 #define __get_user(x, ptr)                              \
123         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
124
125
126 /**
127  * __put_user: - Write a simple value into user space, with less checking.
128  * @x:   Value to copy to user space.
129  * @ptr: Destination address, in user space.
130  *
131  * Context: User context only.  This function may sleep.
132  *
133  * This macro copies a single simple value from kernel space to user
134  * space.  It supports simple types like char and int, but not larger
135  * data types like structures or arrays.
136  *
137  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
138  * to the result of dereferencing @ptr.
139  *
140  * Caller must check the pointer with access_ok() before calling this
141  * function.
142  *
143  * Returns zero on success, or -EFAULT on error.
144  */
145 #define __put_user(x, ptr)                                              \
146         __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
147
148 #define __put_user_nocheck(x, ptr, size)                        \
149 ({                                                              \
150         long __pu_err;                                          \
151         __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
152         __pu_err;                                               \
153 })
154
155
156 #define __put_user_u64(x, addr, err)                                    \
157         asm volatile("1:        movl %%eax,0(%2)\n"                     \
158                      "2:        movl %%edx,4(%2)\n"                     \
159                      "3:\n"                                             \
160                      ".section .fixup,\"ax\"\n"                         \
161                      "4:        movl %3,%0\n"                           \
162                      "  jmp 3b\n"                                       \
163                      ".previous\n"                                      \
164                      _ASM_EXTABLE(1b, 4b)                               \
165                      _ASM_EXTABLE(2b, 4b)                               \
166                      : "=r" (err)                                       \
167                      : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
168
169 #ifdef CONFIG_X86_WP_WORKS_OK
170
171 #define __put_user_size(x, ptr, size, retval, errret)                   \
172 do {                                                                    \
173         retval = 0;                                                     \
174         __chk_user_ptr(ptr);                                            \
175         switch (size) {                                                 \
176         case 1:                                                         \
177                 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
178                 break;                                                  \
179         case 2:                                                         \
180                 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
181                 break;                                                  \
182         case 4:                                                         \
183                 __put_user_asm(x, ptr, retval, "l", "k",  "ir", errret);\
184                 break;                                                  \
185         case 8:                                                         \
186                 __put_user_u64((__typeof__(*ptr))(x), ptr, retval);     \
187                 break;                                                  \
188         default:                                                        \
189                 __put_user_bad();                                       \
190         }                                                               \
191 } while (0)
192
193 #else
194
195 #define __put_user_size(x, ptr, size, retval, errret)                   \
196 do {                                                                    \
197         __typeof__(*(ptr))__pus_tmp = x;                                \
198         retval = 0;                                                     \
199                                                                         \
200         if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0))    \
201                 retval = errret;                                        \
202 } while (0)
203
204 #endif
205 struct __large_struct { unsigned long buf[100]; };
206 #define __m(x) (*(struct __large_struct __user *)(x))
207
208 /*
209  * Tell gcc we read from memory instead of writing: this is because
210  * we do not write to any memory gcc knows about, so there are no
211  * aliasing issues.
212  */
213 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
214         asm volatile("1:        mov"itype" %"rtype"1,%2\n"              \
215                      "2:\n"                                             \
216                      ".section .fixup,\"ax\"\n"                         \
217                      "3:        movl %3,%0\n"                           \
218                      "  jmp 2b\n"                                       \
219                      ".previous\n"                                      \
220                      _ASM_EXTABLE(1b, 3b)                               \
221                      : "=r"(err)                                        \
222                      : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
223
224
225 #define __get_user_nocheck(x, ptr, size)                                \
226 ({                                                                      \
227         long __gu_err;                                                  \
228         unsigned long __gu_val;                                         \
229         __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
230         (x) = (__typeof__(*(ptr)))__gu_val;                             \
231         __gu_err;                                                       \
232 })
233
234 #define __get_user_size(x, ptr, size, retval, errret)                   \
235 do {                                                                    \
236         retval = 0;                                                     \
237         __chk_user_ptr(ptr);                                            \
238         switch (size) {                                                 \
239         case 1:                                                         \
240                 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
241                 break;                                                  \
242         case 2:                                                         \
243                 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
244                 break;                                                  \
245         case 4:                                                         \
246                 __get_user_asm(x, ptr, retval, "l", "", "=r", errret);  \
247                 break;                                                  \
248         default:                                                        \
249                 (x) = __get_user_bad();                                 \
250         }                                                               \
251 } while (0)
252
253 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
254         asm volatile("1:        mov"itype" %2,%"rtype"1\n"              \
255                      "2:\n"                                             \
256                      ".section .fixup,\"ax\"\n"                         \
257                      "3:        movl %3,%0\n"                           \
258                      "  xor"itype" %"rtype"1,%"rtype"1\n"               \
259                      "  jmp 2b\n"                                       \
260                      ".previous\n"                                      \
261                      _ASM_EXTABLE(1b, 3b)                               \
262                      : "=r" (err), ltype (x)                            \
263                      : "m" (__m(addr)), "i" (errret), "0" (err))
264
265
266 unsigned long __must_check __copy_to_user_ll
267                 (void __user *to, const void *from, unsigned long n);
268 unsigned long __must_check __copy_from_user_ll
269                 (void *to, const void __user *from, unsigned long n);
270 unsigned long __must_check __copy_from_user_ll_nozero
271                 (void *to, const void __user *from, unsigned long n);
272 unsigned long __must_check __copy_from_user_ll_nocache
273                 (void *to, const void __user *from, unsigned long n);
274 unsigned long __must_check __copy_from_user_ll_nocache_nozero
275                 (void *to, const void __user *from, unsigned long n);
276
277 /**
278  * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
279  * @to:   Destination address, in user space.
280  * @from: Source address, in kernel space.
281  * @n:    Number of bytes to copy.
282  *
283  * Context: User context only.
284  *
285  * Copy data from kernel space to user space.  Caller must check
286  * the specified block with access_ok() before calling this function.
287  * The caller should also make sure he pins the user space address
288  * so that the we don't result in page fault and sleep.
289  *
290  * Here we special-case 1, 2 and 4-byte copy_*_user invocations.  On a fault
291  * we return the initial request size (1, 2 or 4), as copy_*_user should do.
292  * If a store crosses a page boundary and gets a fault, the x86 will not write
293  * anything, so this is accurate.
294  */
295
296 static __always_inline unsigned long __must_check
297 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
298 {
299         if (__builtin_constant_p(n)) {
300                 unsigned long ret;
301
302                 switch (n) {
303                 case 1:
304                         __put_user_size(*(u8 *)from, (u8 __user *)to,
305                                         1, ret, 1);
306                         return ret;
307                 case 2:
308                         __put_user_size(*(u16 *)from, (u16 __user *)to,
309                                         2, ret, 2);
310                         return ret;
311                 case 4:
312                         __put_user_size(*(u32 *)from, (u32 __user *)to,
313                                         4, ret, 4);
314                         return ret;
315                 }
316         }
317         return __copy_to_user_ll(to, from, n);
318 }
319
320 /**
321  * __copy_to_user: - Copy a block of data into user space, with less checking.
322  * @to:   Destination address, in user space.
323  * @from: Source address, in kernel space.
324  * @n:    Number of bytes to copy.
325  *
326  * Context: User context only.  This function may sleep.
327  *
328  * Copy data from kernel space to user space.  Caller must check
329  * the specified block with access_ok() before calling this function.
330  *
331  * Returns number of bytes that could not be copied.
332  * On success, this will be zero.
333  */
334 static __always_inline unsigned long __must_check
335 __copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337        might_sleep();
338        return __copy_to_user_inatomic(to, from, n);
339 }
340
341 static __always_inline unsigned long
342 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
343 {
344         /* Avoid zeroing the tail if the copy fails..
345          * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
346          * but as the zeroing behaviour is only significant when n is not
347          * constant, that shouldn't be a problem.
348          */
349         if (__builtin_constant_p(n)) {
350                 unsigned long ret;
351
352                 switch (n) {
353                 case 1:
354                         __get_user_size(*(u8 *)to, from, 1, ret, 1);
355                         return ret;
356                 case 2:
357                         __get_user_size(*(u16 *)to, from, 2, ret, 2);
358                         return ret;
359                 case 4:
360                         __get_user_size(*(u32 *)to, from, 4, ret, 4);
361                         return ret;
362                 }
363         }
364         return __copy_from_user_ll_nozero(to, from, n);
365 }
366
367 /**
368  * __copy_from_user: - Copy a block of data from user space, with less checking.
369  * @to:   Destination address, in kernel space.
370  * @from: Source address, in user space.
371  * @n:    Number of bytes to copy.
372  *
373  * Context: User context only.  This function may sleep.
374  *
375  * Copy data from user space to kernel space.  Caller must check
376  * the specified block with access_ok() before calling this function.
377  *
378  * Returns number of bytes that could not be copied.
379  * On success, this will be zero.
380  *
381  * If some data could not be copied, this function will pad the copied
382  * data to the requested size using zero bytes.
383  *
384  * An alternate version - __copy_from_user_inatomic() - may be called from
385  * atomic context and will fail rather than sleep.  In this case the
386  * uncopied bytes will *NOT* be padded with zeros.  See fs/filemap.h
387  * for explanation of why this is needed.
388  */
389 static __always_inline unsigned long
390 __copy_from_user(void *to, const void __user *from, unsigned long n)
391 {
392         might_sleep();
393         if (__builtin_constant_p(n)) {
394                 unsigned long ret;
395
396                 switch (n) {
397                 case 1:
398                         __get_user_size(*(u8 *)to, from, 1, ret, 1);
399                         return ret;
400                 case 2:
401                         __get_user_size(*(u16 *)to, from, 2, ret, 2);
402                         return ret;
403                 case 4:
404                         __get_user_size(*(u32 *)to, from, 4, ret, 4);
405                         return ret;
406                 }
407         }
408         return __copy_from_user_ll(to, from, n);
409 }
410
411 #define ARCH_HAS_NOCACHE_UACCESS
412
413 static __always_inline unsigned long __copy_from_user_nocache(void *to,
414                                 const void __user *from, unsigned long n)
415 {
416         might_sleep();
417         if (__builtin_constant_p(n)) {
418                 unsigned long ret;
419
420                 switch (n) {
421                 case 1:
422                         __get_user_size(*(u8 *)to, from, 1, ret, 1);
423                         return ret;
424                 case 2:
425                         __get_user_size(*(u16 *)to, from, 2, ret, 2);
426                         return ret;
427                 case 4:
428                         __get_user_size(*(u32 *)to, from, 4, ret, 4);
429                         return ret;
430                 }
431         }
432         return __copy_from_user_ll_nocache(to, from, n);
433 }
434
435 static __always_inline unsigned long
436 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
437                                   unsigned long n)
438 {
439        return __copy_from_user_ll_nocache_nozero(to, from, n);
440 }
441
442 unsigned long __must_check copy_to_user(void __user *to,
443                                         const void *from, unsigned long n);
444 unsigned long __must_check copy_from_user(void *to,
445                                           const void __user *from,
446                                           unsigned long n);
447 long __must_check strncpy_from_user(char *dst, const char __user *src,
448                                     long count);
449 long __must_check __strncpy_from_user(char *dst,
450                                       const char __user *src, long count);
451
452 /**
453  * strlen_user: - Get the size of a string in user space.
454  * @str: The string to measure.
455  *
456  * Context: User context only.  This function may sleep.
457  *
458  * Get the size of a NUL-terminated string in user space.
459  *
460  * Returns the size of the string INCLUDING the terminating NUL.
461  * On exception, returns 0.
462  *
463  * If there is a limit on the length of a valid string, you may wish to
464  * consider using strnlen_user() instead.
465  */
466 #define strlen_user(str) strnlen_user(str, LONG_MAX)
467
468 long strnlen_user(const char __user *str, long n);
469 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
470 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
471
472 #endif /* __i386_UACCESS_H */