]> err.no Git - linux-2.6/blob - arch/s390/lib/uaccess_pt.c
[S390] Introduce follow_table in uaccess_pt.c
[linux-2.6] / arch / s390 / lib / uaccess_pt.c
1 /*
2  *  arch/s390/lib/uaccess_pt.c
3  *
4  *  User access functions based on page table walks for enhanced
5  *  system layout without hardware support.
6  *
7  *    Copyright IBM Corp. 2006
8  *    Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
9  */
10
11 #include <linux/errno.h>
12 #include <linux/hardirq.h>
13 #include <linux/mm.h>
14 #include <asm/uaccess.h>
15 #include <asm/futex.h>
16 #include "uaccess.h"
17
18 static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
19 {
20         pgd_t *pgd;
21         pmd_t *pmd;
22
23         pgd = pgd_offset(mm, addr);
24         if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
25                 return NULL;
26
27         pmd = pmd_offset(pgd, addr);
28         if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
29                 return NULL;
30
31         return pte_offset_map(pmd, addr);
32 }
33
34 static int __handle_fault(struct mm_struct *mm, unsigned long address,
35                           int write_access)
36 {
37         struct vm_area_struct *vma;
38         int ret = -EFAULT;
39         int fault;
40
41         if (in_atomic())
42                 return ret;
43         down_read(&mm->mmap_sem);
44         vma = find_vma(mm, address);
45         if (unlikely(!vma))
46                 goto out;
47         if (unlikely(vma->vm_start > address)) {
48                 if (!(vma->vm_flags & VM_GROWSDOWN))
49                         goto out;
50                 if (expand_stack(vma, address))
51                         goto out;
52         }
53
54         if (!write_access) {
55                 /* page not present, check vm flags */
56                 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
57                         goto out;
58         } else {
59                 if (!(vma->vm_flags & VM_WRITE))
60                         goto out;
61         }
62
63 survive:
64         fault = handle_mm_fault(mm, vma, address, write_access);
65         if (unlikely(fault & VM_FAULT_ERROR)) {
66                 if (fault & VM_FAULT_OOM)
67                         goto out_of_memory;
68                 else if (fault & VM_FAULT_SIGBUS)
69                         goto out_sigbus;
70                 BUG();
71         }
72         if (fault & VM_FAULT_MAJOR)
73                 current->maj_flt++;
74         else
75                 current->min_flt++;
76         ret = 0;
77 out:
78         up_read(&mm->mmap_sem);
79         return ret;
80
81 out_of_memory:
82         up_read(&mm->mmap_sem);
83         if (is_global_init(current)) {
84                 yield();
85                 down_read(&mm->mmap_sem);
86                 goto survive;
87         }
88         printk("VM: killing process %s\n", current->comm);
89         return ret;
90
91 out_sigbus:
92         up_read(&mm->mmap_sem);
93         current->thread.prot_addr = address;
94         current->thread.trap_no = 0x11;
95         force_sig(SIGBUS, current);
96         return ret;
97 }
98
99 static size_t __user_copy_pt(unsigned long uaddr, void *kptr,
100                              size_t n, int write_user)
101 {
102         struct mm_struct *mm = current->mm;
103         unsigned long offset, pfn, done, size;
104         pte_t *pte;
105         void *from, *to;
106
107         done = 0;
108 retry:
109         spin_lock(&mm->page_table_lock);
110         do {
111                 pte = follow_table(mm, uaddr);
112                 if (!pte || !pte_present(*pte) ||
113                     (write_user && !pte_write(*pte)))
114                         goto fault;
115
116                 pfn = pte_pfn(*pte);
117                 if (!pfn_valid(pfn))
118                         goto out;
119
120                 offset = uaddr & (PAGE_SIZE - 1);
121                 size = min(n - done, PAGE_SIZE - offset);
122                 if (write_user) {
123                         to = (void *)((pfn << PAGE_SHIFT) + offset);
124                         from = kptr + done;
125                 } else {
126                         from = (void *)((pfn << PAGE_SHIFT) + offset);
127                         to = kptr + done;
128                 }
129                 memcpy(to, from, size);
130                 done += size;
131                 uaddr += size;
132         } while (done < n);
133 out:
134         spin_unlock(&mm->page_table_lock);
135         return n - done;
136 fault:
137         spin_unlock(&mm->page_table_lock);
138         if (__handle_fault(mm, uaddr, write_user))
139                 return n - done;
140         goto retry;
141 }
142
143 /*
144  * Do DAT for user address by page table walk, return kernel address.
145  * This function needs to be called with current->mm->page_table_lock held.
146  */
147 static unsigned long __dat_user_addr(unsigned long uaddr)
148 {
149         struct mm_struct *mm = current->mm;
150         unsigned long pfn, ret;
151         pte_t *pte;
152         int rc;
153
154         ret = 0;
155 retry:
156         pte = follow_table(mm, uaddr);
157         if (!pte || !pte_present(*pte))
158                 goto fault;
159
160         pfn = pte_pfn(*pte);
161         if (!pfn_valid(pfn))
162                 goto out;
163
164         ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
165 out:
166         return ret;
167 fault:
168         spin_unlock(&mm->page_table_lock);
169         rc = __handle_fault(mm, uaddr, 0);
170         spin_lock(&mm->page_table_lock);
171         if (rc)
172                 goto out;
173         goto retry;
174 }
175
176 size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
177 {
178         size_t rc;
179
180         if (segment_eq(get_fs(), KERNEL_DS)) {
181                 memcpy(to, (void __kernel __force *) from, n);
182                 return 0;
183         }
184         rc = __user_copy_pt((unsigned long) from, to, n, 0);
185         if (unlikely(rc))
186                 memset(to + n - rc, 0, rc);
187         return rc;
188 }
189
190 size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
191 {
192         if (segment_eq(get_fs(), KERNEL_DS)) {
193                 memcpy((void __kernel __force *) to, from, n);
194                 return 0;
195         }
196         return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
197 }
198
199 static size_t clear_user_pt(size_t n, void __user *to)
200 {
201         long done, size, ret;
202
203         if (segment_eq(get_fs(), KERNEL_DS)) {
204                 memset((void __kernel __force *) to, 0, n);
205                 return 0;
206         }
207         done = 0;
208         do {
209                 if (n - done > PAGE_SIZE)
210                         size = PAGE_SIZE;
211                 else
212                         size = n - done;
213                 ret = __user_copy_pt((unsigned long) to + done,
214                                       &empty_zero_page, size, 1);
215                 done += size;
216                 if (ret)
217                         return ret + n - done;
218         } while (done < n);
219         return 0;
220 }
221
222 static size_t strnlen_user_pt(size_t count, const char __user *src)
223 {
224         char *addr;
225         unsigned long uaddr = (unsigned long) src;
226         struct mm_struct *mm = current->mm;
227         unsigned long offset, pfn, done, len;
228         pte_t *pte;
229         size_t len_str;
230
231         if (segment_eq(get_fs(), KERNEL_DS))
232                 return strnlen((const char __kernel __force *) src, count) + 1;
233         done = 0;
234 retry:
235         spin_lock(&mm->page_table_lock);
236         do {
237                 pte = follow_table(mm, uaddr);
238                 if (!pte || !pte_present(*pte))
239                         goto fault;
240
241                 pfn = pte_pfn(*pte);
242                 if (!pfn_valid(pfn)) {
243                         done = -1;
244                         goto out;
245                 }
246
247                 offset = uaddr & (PAGE_SIZE-1);
248                 addr = (char *)(pfn << PAGE_SHIFT) + offset;
249                 len = min(count - done, PAGE_SIZE - offset);
250                 len_str = strnlen(addr, len);
251                 done += len_str;
252                 uaddr += len_str;
253         } while ((len_str == len) && (done < count));
254 out:
255         spin_unlock(&mm->page_table_lock);
256         return done + 1;
257 fault:
258         spin_unlock(&mm->page_table_lock);
259         if (__handle_fault(mm, uaddr, 0)) {
260                 return 0;
261         }
262         goto retry;
263 }
264
265 static size_t strncpy_from_user_pt(size_t count, const char __user *src,
266                                    char *dst)
267 {
268         size_t n = strnlen_user_pt(count, src);
269
270         if (!n)
271                 return -EFAULT;
272         if (n > count)
273                 n = count;
274         if (segment_eq(get_fs(), KERNEL_DS)) {
275                 memcpy(dst, (const char __kernel __force *) src, n);
276                 if (dst[n-1] == '\0')
277                         return n-1;
278                 else
279                         return n;
280         }
281         if (__user_copy_pt((unsigned long) src, dst, n, 0))
282                 return -EFAULT;
283         if (dst[n-1] == '\0')
284                 return n-1;
285         else
286                 return n;
287 }
288
289 static size_t copy_in_user_pt(size_t n, void __user *to,
290                               const void __user *from)
291 {
292         struct mm_struct *mm = current->mm;
293         unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
294                       uaddr, done, size;
295         unsigned long uaddr_from = (unsigned long) from;
296         unsigned long uaddr_to = (unsigned long) to;
297         pte_t *pte_from, *pte_to;
298         int write_user;
299
300         done = 0;
301 retry:
302         spin_lock(&mm->page_table_lock);
303         do {
304                 pte_from = follow_table(mm, uaddr_from);
305                 if (!pte_from || !pte_present(*pte_from)) {
306                         uaddr = uaddr_from;
307                         write_user = 0;
308                         goto fault;
309                 }
310
311                 pte_to = follow_table(mm, uaddr_to);
312                 if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) {
313                         uaddr = uaddr_to;
314                         write_user = 1;
315                         goto fault;
316                 }
317
318                 pfn_from = pte_pfn(*pte_from);
319                 if (!pfn_valid(pfn_from))
320                         goto out;
321                 pfn_to = pte_pfn(*pte_to);
322                 if (!pfn_valid(pfn_to))
323                         goto out;
324
325                 offset_from = uaddr_from & (PAGE_SIZE-1);
326                 offset_to = uaddr_from & (PAGE_SIZE-1);
327                 offset_max = max(offset_from, offset_to);
328                 size = min(n - done, PAGE_SIZE - offset_max);
329
330                 memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to,
331                        (void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
332                 done += size;
333                 uaddr_from += size;
334                 uaddr_to += size;
335         } while (done < n);
336 out:
337         spin_unlock(&mm->page_table_lock);
338         return n - done;
339 fault:
340         spin_unlock(&mm->page_table_lock);
341         if (__handle_fault(mm, uaddr, write_user))
342                 return n - done;
343         goto retry;
344 }
345
346 #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg)      \
347         asm volatile("0: l   %1,0(%6)\n"                                \
348                      "1: " insn                                         \
349                      "2: cs  %1,%2,0(%6)\n"                             \
350                      "3: jl  1b\n"                                      \
351                      "   lhi %0,0\n"                                    \
352                      "4:\n"                                             \
353                      EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b)    \
354                      : "=d" (ret), "=&d" (oldval), "=&d" (newval),      \
355                        "=m" (*uaddr)                                    \
356                      : "0" (-EFAULT), "d" (oparg), "a" (uaddr),         \
357                        "m" (*uaddr) : "cc" );
358
359 int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
360 {
361         int oldval = 0, newval, ret;
362
363         spin_lock(&current->mm->page_table_lock);
364         uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
365         if (!uaddr) {
366                 spin_unlock(&current->mm->page_table_lock);
367                 return -EFAULT;
368         }
369         get_page(virt_to_page(uaddr));
370         spin_unlock(&current->mm->page_table_lock);
371         switch (op) {
372         case FUTEX_OP_SET:
373                 __futex_atomic_op("lr %2,%5\n",
374                                   ret, oldval, newval, uaddr, oparg);
375                 break;
376         case FUTEX_OP_ADD:
377                 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
378                                   ret, oldval, newval, uaddr, oparg);
379                 break;
380         case FUTEX_OP_OR:
381                 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
382                                   ret, oldval, newval, uaddr, oparg);
383                 break;
384         case FUTEX_OP_ANDN:
385                 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
386                                   ret, oldval, newval, uaddr, oparg);
387                 break;
388         case FUTEX_OP_XOR:
389                 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
390                                   ret, oldval, newval, uaddr, oparg);
391                 break;
392         default:
393                 ret = -ENOSYS;
394         }
395         put_page(virt_to_page(uaddr));
396         *old = oldval;
397         return ret;
398 }
399
400 int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
401 {
402         int ret;
403
404         spin_lock(&current->mm->page_table_lock);
405         uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
406         if (!uaddr) {
407                 spin_unlock(&current->mm->page_table_lock);
408                 return -EFAULT;
409         }
410         get_page(virt_to_page(uaddr));
411         spin_unlock(&current->mm->page_table_lock);
412         asm volatile("   cs   %1,%4,0(%5)\n"
413                      "0: lr   %0,%1\n"
414                      "1:\n"
415                      EX_TABLE(0b,1b)
416                      : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
417                      : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
418                      : "cc", "memory" );
419         put_page(virt_to_page(uaddr));
420         return ret;
421 }
422
423 struct uaccess_ops uaccess_pt = {
424         .copy_from_user         = copy_from_user_pt,
425         .copy_from_user_small   = copy_from_user_pt,
426         .copy_to_user           = copy_to_user_pt,
427         .copy_to_user_small     = copy_to_user_pt,
428         .copy_in_user           = copy_in_user_pt,
429         .clear_user             = clear_user_pt,
430         .strnlen_user           = strnlen_user_pt,
431         .strncpy_from_user      = strncpy_from_user_pt,
432         .futex_atomic_op        = futex_atomic_op_pt,
433         .futex_atomic_cmpxchg   = futex_atomic_cmpxchg_pt,
434 };