]> err.no Git - linux-2.6/commitdiff
[ARM] nommu: uaccess tweaks
authorRussell King <rmk@dyn-67.arm.linux.org.uk>
Wed, 21 Jun 2006 19:38:17 +0000 (20:38 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Wed, 28 Jun 2006 16:59:46 +0000 (17:59 +0100)
MMUless systems have only one address space for all threads, so
both the usual access_ok() checks, and the exception handling do
not make much sense.

Hence, discard the fixup and exception tables at link time, use
memcpy/memset for the user copy/clearing functions, and define
the permission check macros to be constants.

Some of this patch was derived from the equivalent patch by
Hyok S. Choi.

Signed-off-by: Hyok S. Choi <hyok.choi@samsung.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/kernel/armksyms.c
arch/arm/kernel/vmlinux.lds.S
arch/arm/lib/Makefile
include/asm-arm/uaccess.h

index f8bb7abd3e9bda4196593fbaaed8f09bbe37411a..da69e660574bf1510f8f1457d07c7368b7de12e2 100644 (file)
@@ -109,11 +109,13 @@ EXPORT_SYMBOL(memchr);
 EXPORT_SYMBOL(__memzero);
 
        /* user mem (segment) */
+EXPORT_SYMBOL(__strnlen_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+
+#ifdef CONFIG_MMU
 EXPORT_SYMBOL(__copy_from_user);
 EXPORT_SYMBOL(__copy_to_user);
 EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__strnlen_user);
-EXPORT_SYMBOL(__strncpy_from_user);
 
 EXPORT_SYMBOL(__get_user_1);
 EXPORT_SYMBOL(__get_user_2);
@@ -123,6 +125,7 @@ EXPORT_SYMBOL(__put_user_1);
 EXPORT_SYMBOL(__put_user_2);
 EXPORT_SYMBOL(__put_user_4);
 EXPORT_SYMBOL(__put_user_8);
+#endif
 
        /* crypto hash */
 EXPORT_SYMBOL(sha_transform);
index 2b254e88595c76e15c233290e0f1339f04e5a621..2df9688a70282fca86ac0fb854a64da305961111 100644 (file)
@@ -80,6 +80,10 @@ SECTIONS
                *(.exit.text)
                *(.exit.data)
                *(.exitcall.exit)
+#ifndef CONFIG_MMU
+               *(.fixup)
+               *(__ex_table)
+#endif
        }
 
        .text : {                       /* Real text segment            */
@@ -87,7 +91,9 @@ SECTIONS
                        *(.text)
                        SCHED_TEXT
                        LOCK_TEXT
+#ifdef CONFIG_MMU
                        *(.fixup)
+#endif
                        *(.gnu.warning)
                        *(.rodata)
                        *(.rodata.*)
@@ -142,7 +148,9 @@ SECTIONS
                 */
                . = ALIGN(32);
                __start___ex_table = .;
+#ifdef CONFIG_MMU
                *(__ex_table)
+#endif
                __stop___ex_table = .;
 
                /*
index 7b726b627ea5ee38d85090774ab5d7f555d0c84c..30351cd4560dfcc7052a8be4eaa84620643878a3 100644 (file)
@@ -6,28 +6,31 @@
 
 lib-y          := backtrace.o changebit.o csumipv6.o csumpartial.o   \
                   csumpartialcopy.o csumpartialcopyuser.o clearbit.o \
-                  copy_page.o delay.o findbit.o memchr.o memcpy.o    \
+                  delay.o findbit.o memchr.o memcpy.o                \
                   memmove.o memset.o memzero.o setbit.o              \
                   strncpy_from_user.o strnlen_user.o                 \
                   strchr.o strrchr.o                                 \
                   testchangebit.o testclearbit.o testsetbit.o        \
-                  getuser.o putuser.o clear_user.o                   \
                   ashldi3.o ashrdi3.o lshrdi3.o muldi3.o             \
                   ucmpdi2.o lib1funcs.o div64.o sha1.o               \
                   io-readsb.o io-writesb.o io-readsl.o io-writesl.o
 
+mmu-y  := clear_user.o copy_page.o getuser.o putuser.o
+
 # the code in uaccess.S is not preemption safe and
 # probably faster on ARMv3 only
 ifeq ($(CONFIG_PREEMPT),y)
-  lib-y        += copy_from_user.o copy_to_user.o
+  mmu-y        += copy_from_user.o copy_to_user.o
 else
 ifneq ($(CONFIG_CPU_32v3),y)
-  lib-y        += copy_from_user.o copy_to_user.o
+  mmu-y        += copy_from_user.o copy_to_user.o
 else
-  lib-y        += uaccess.o
+  mmu-y        += uaccess.o
 endif
 endif
 
+lib-$(CONFIG_MMU) += $(mmu-y)
+
 ifeq ($(CONFIG_CPU_32v3),y)
   lib-y        += io-readsw-armv3.o io-writesw-armv3.o
 else
index f909dc75301a15b336395dee77a5f2d763d1951d..87aba57a66c40d5b0f10fe199c3159fa5710a9e3 100644 (file)
@@ -40,16 +40,25 @@ struct exception_table_entry
 
 extern int fixup_exception(struct pt_regs *regs);
 
+/*
+ * These two are intentionally not defined anywhere - if the kernel
+ * code generates any references to them, that's a bug.
+ */
+extern int __get_user_bad(void);
+extern int __put_user_bad(void);
+
 /*
  * Note that this is actually 0x1,0000,0000
  */
 #define KERNEL_DS      0x00000000
-#define USER_DS                TASK_SIZE
-
 #define get_ds()       (KERNEL_DS)
+
+#ifdef CONFIG_MMU
+
+#define USER_DS                TASK_SIZE
 #define get_fs()       (current_thread_info()->addr_limit)
 
-static inline void set_fs (mm_segment_t fs)
+static inline void set_fs(mm_segment_t fs)
 {
        current_thread_info()->addr_limit = fs;
        modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
@@ -75,8 +84,6 @@ static inline void set_fs (mm_segment_t fs)
                : "cc"); \
        flag; })
 
-#define access_ok(type,addr,size)      (__range_ok(addr,size) == 0)
-
 /*
  * Single-value transfer routines.  They automatically use the right
  * size if we just have the right pointer type.  Note that the functions
@@ -87,20 +94,10 @@ static inline void set_fs (mm_segment_t fs)
  * fixup code, but there are a few places where it intrudes on the
  * main code path.  When we only write to user space, there is no
  * problem.
- *
- * The "__xxx" versions of the user access functions do not verify the
- * address space - it must have been done previously with a separate
- * "access_ok()" call.
- *
- * The "xxx_error" versions set the third argument to EFAULT if an
- * error occurs, and leave it unchanged on success.  Note that these
- * versions are void (ie, don't return a value as such).
  */
-
 extern int __get_user_1(void *);
 extern int __get_user_2(void *);
 extern int __get_user_4(void *);
-extern int __get_user_bad(void);
 
 #define __get_user_x(__r2,__p,__e,__s,__i...)                          \
           __asm__ __volatile__ (                                       \
@@ -131,6 +128,74 @@ extern int __get_user_bad(void);
                __e;                                                    \
        })
 
+extern int __put_user_1(void *, unsigned int);
+extern int __put_user_2(void *, unsigned int);
+extern int __put_user_4(void *, unsigned int);
+extern int __put_user_8(void *, unsigned long long);
+
+#define __put_user_x(__r2,__p,__e,__s)                                 \
+          __asm__ __volatile__ (                                       \
+               __asmeq("%0", "r0") __asmeq("%2", "r2")                 \
+               "bl     __put_user_" #__s                               \
+               : "=&r" (__e)                                           \
+               : "0" (__p), "r" (__r2)                                 \
+               : "ip", "lr", "cc")
+
+#define put_user(x,p)                                                  \
+       ({                                                              \
+               const register typeof(*(p)) __r2 asm("r2") = (x);       \
+               const register typeof(*(p)) __user *__p asm("r0") = (p);\
+               register int __e asm("r0");                             \
+               switch (sizeof(*(__p))) {                               \
+               case 1:                                                 \
+                       __put_user_x(__r2, __p, __e, 1);                \
+                       break;                                          \
+               case 2:                                                 \
+                       __put_user_x(__r2, __p, __e, 2);                \
+                       break;                                          \
+               case 4:                                                 \
+                       __put_user_x(__r2, __p, __e, 4);                \
+                       break;                                          \
+               case 8:                                                 \
+                       __put_user_x(__r2, __p, __e, 8);                \
+                       break;                                          \
+               default: __e = __put_user_bad(); break;                 \
+               }                                                       \
+               __e;                                                    \
+       })
+
+#else /* CONFIG_MMU */
+
+/*
+ * uClinux has only one addr space, so has simplified address limits.
+ */
+#define USER_DS                        KERNEL_DS
+
+#define segment_eq(a,b)                (1)
+#define __addr_ok(addr)                (1)
+#define __range_ok(addr,size)  (0)
+#define get_fs()               (KERNEL_DS)
+
+static inline void set_fs(mm_segment_t fs)
+{
+}
+
+#define get_user(x,p)  __get_user(x,p)
+#define put_user(x,p)  __put_user(x,p)
+
+#endif /* CONFIG_MMU */
+
+#define access_ok(type,addr,size)      (__range_ok(addr,size) == 0)
+
+/*
+ * The "__xxx" versions of the user access functions do not verify the
+ * address space - it must have been done previously with a separate
+ * "access_ok()" call.
+ *
+ * The "xxx_error" versions set the third argument to EFAULT if an
+ * error occurs, and leave it unchanged on success.  Note that these
+ * versions are void (ie, don't return a value as such).
+ */
 #define __get_user(x,ptr)                                              \
 ({                                                                     \
        long __gu_err = 0;                                              \
@@ -212,43 +277,6 @@ do {                                                                       \
        : "r" (addr), "i" (-EFAULT)                             \
        : "cc")
 
-extern int __put_user_1(void *, unsigned int);
-extern int __put_user_2(void *, unsigned int);
-extern int __put_user_4(void *, unsigned int);
-extern int __put_user_8(void *, unsigned long long);
-extern int __put_user_bad(void);
-
-#define __put_user_x(__r2,__p,__e,__s)                                 \
-          __asm__ __volatile__ (                                       \
-               __asmeq("%0", "r0") __asmeq("%2", "r2")                 \
-               "bl     __put_user_" #__s                               \
-               : "=&r" (__e)                                           \
-               : "0" (__p), "r" (__r2)                                 \
-               : "ip", "lr", "cc")
-
-#define put_user(x,p)                                                  \
-       ({                                                              \
-               const register typeof(*(p)) __r2 asm("r2") = (x);       \
-               const register typeof(*(p)) __user *__p asm("r0") = (p);\
-               register int __e asm("r0");                             \
-               switch (sizeof(*(__p))) {                               \
-               case 1:                                                 \
-                       __put_user_x(__r2, __p, __e, 1);                \
-                       break;                                          \
-               case 2:                                                 \
-                       __put_user_x(__r2, __p, __e, 2);                \
-                       break;                                          \
-               case 4:                                                 \
-                       __put_user_x(__r2, __p, __e, 4);                \
-                       break;                                          \
-               case 8:                                                 \
-                       __put_user_x(__r2, __p, __e, 8);                \
-                       break;                                          \
-               default: __e = __put_user_bad(); break;                 \
-               }                                                       \
-               __e;                                                    \
-       })
-
 #define __put_user(x,ptr)                                              \
 ({                                                                     \
        long __pu_err = 0;                                              \
@@ -354,9 +382,16 @@ do {                                                                       \
        : "cc")
 
 
+#ifdef CONFIG_MMU
 extern unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n);
 extern unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n);
 extern unsigned long __clear_user(void __user *addr, unsigned long n);
+#else
+#define __copy_from_user(to,from,n)    (memcpy(to, (void __force *)from, n), 0)
+#define __copy_to_user(to,from,n)      (memcpy((void __force *)to, from, n), 0)
+#define __clear_user(addr,n)           (memset((void __force *)addr, 0, n), 0)
+#endif
+
 extern unsigned long __strncpy_from_user(char *to, const char __user *from, unsigned long count);
 extern unsigned long __strnlen_user(const char __user *s, long n);