2 * arch/s390/lib/uaccess_pt.c
4 * User access functions based on page table walks for enhanced
5 * system layout without hardware support.
7 * Copyright IBM Corp. 2006
8 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
11 #include <linux/errno.h>
12 #include <linux/hardirq.h>
14 #include <asm/uaccess.h>
15 #include <asm/futex.h>
18 static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
23 pgd = pgd_offset(mm, addr);
24 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
27 pmd = pmd_offset(pgd, addr);
28 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
31 return pte_offset_map(pmd, addr);
34 static int __handle_fault(struct mm_struct *mm, unsigned long address,
37 struct vm_area_struct *vma;
43 down_read(&mm->mmap_sem);
44 vma = find_vma(mm, address);
47 if (unlikely(vma->vm_start > address)) {
48 if (!(vma->vm_flags & VM_GROWSDOWN))
50 if (expand_stack(vma, address))
55 /* page not present, check vm flags */
56 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
59 if (!(vma->vm_flags & VM_WRITE))
64 fault = handle_mm_fault(mm, vma, address, write_access);
65 if (unlikely(fault & VM_FAULT_ERROR)) {
66 if (fault & VM_FAULT_OOM)
68 else if (fault & VM_FAULT_SIGBUS)
72 if (fault & VM_FAULT_MAJOR)
78 up_read(&mm->mmap_sem);
82 up_read(&mm->mmap_sem);
83 if (is_global_init(current)) {
85 down_read(&mm->mmap_sem);
88 printk("VM: killing process %s\n", current->comm);
92 up_read(&mm->mmap_sem);
93 current->thread.prot_addr = address;
94 current->thread.trap_no = 0x11;
95 force_sig(SIGBUS, current);
99 static size_t __user_copy_pt(unsigned long uaddr, void *kptr,
100 size_t n, int write_user)
102 struct mm_struct *mm = current->mm;
103 unsigned long offset, pfn, done, size;
109 spin_lock(&mm->page_table_lock);
111 pte = follow_table(mm, uaddr);
112 if (!pte || !pte_present(*pte) ||
113 (write_user && !pte_write(*pte)))
120 offset = uaddr & (PAGE_SIZE - 1);
121 size = min(n - done, PAGE_SIZE - offset);
123 to = (void *)((pfn << PAGE_SHIFT) + offset);
126 from = (void *)((pfn << PAGE_SHIFT) + offset);
129 memcpy(to, from, size);
134 spin_unlock(&mm->page_table_lock);
137 spin_unlock(&mm->page_table_lock);
138 if (__handle_fault(mm, uaddr, write_user))
144 * Do DAT for user address by page table walk, return kernel address.
145 * This function needs to be called with current->mm->page_table_lock held.
147 static unsigned long __dat_user_addr(unsigned long uaddr)
149 struct mm_struct *mm = current->mm;
150 unsigned long pfn, ret;
156 pte = follow_table(mm, uaddr);
157 if (!pte || !pte_present(*pte))
164 ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
168 spin_unlock(&mm->page_table_lock);
169 rc = __handle_fault(mm, uaddr, 0);
170 spin_lock(&mm->page_table_lock);
176 size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
180 if (segment_eq(get_fs(), KERNEL_DS)) {
181 memcpy(to, (void __kernel __force *) from, n);
184 rc = __user_copy_pt((unsigned long) from, to, n, 0);
186 memset(to + n - rc, 0, rc);
190 size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
192 if (segment_eq(get_fs(), KERNEL_DS)) {
193 memcpy((void __kernel __force *) to, from, n);
196 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
199 static size_t clear_user_pt(size_t n, void __user *to)
201 long done, size, ret;
203 if (segment_eq(get_fs(), KERNEL_DS)) {
204 memset((void __kernel __force *) to, 0, n);
209 if (n - done > PAGE_SIZE)
213 ret = __user_copy_pt((unsigned long) to + done,
214 &empty_zero_page, size, 1);
217 return ret + n - done;
222 static size_t strnlen_user_pt(size_t count, const char __user *src)
225 unsigned long uaddr = (unsigned long) src;
226 struct mm_struct *mm = current->mm;
227 unsigned long offset, pfn, done, len;
231 if (segment_eq(get_fs(), KERNEL_DS))
232 return strnlen((const char __kernel __force *) src, count) + 1;
235 spin_lock(&mm->page_table_lock);
237 pte = follow_table(mm, uaddr);
238 if (!pte || !pte_present(*pte))
242 if (!pfn_valid(pfn)) {
247 offset = uaddr & (PAGE_SIZE-1);
248 addr = (char *)(pfn << PAGE_SHIFT) + offset;
249 len = min(count - done, PAGE_SIZE - offset);
250 len_str = strnlen(addr, len);
253 } while ((len_str == len) && (done < count));
255 spin_unlock(&mm->page_table_lock);
258 spin_unlock(&mm->page_table_lock);
259 if (__handle_fault(mm, uaddr, 0)) {
265 static size_t strncpy_from_user_pt(size_t count, const char __user *src,
268 size_t n = strnlen_user_pt(count, src);
274 if (segment_eq(get_fs(), KERNEL_DS)) {
275 memcpy(dst, (const char __kernel __force *) src, n);
276 if (dst[n-1] == '\0')
281 if (__user_copy_pt((unsigned long) src, dst, n, 0))
283 if (dst[n-1] == '\0')
289 static size_t copy_in_user_pt(size_t n, void __user *to,
290 const void __user *from)
292 struct mm_struct *mm = current->mm;
293 unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
295 unsigned long uaddr_from = (unsigned long) from;
296 unsigned long uaddr_to = (unsigned long) to;
297 pte_t *pte_from, *pte_to;
302 spin_lock(&mm->page_table_lock);
304 pte_from = follow_table(mm, uaddr_from);
305 if (!pte_from || !pte_present(*pte_from)) {
311 pte_to = follow_table(mm, uaddr_to);
312 if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) {
318 pfn_from = pte_pfn(*pte_from);
319 if (!pfn_valid(pfn_from))
321 pfn_to = pte_pfn(*pte_to);
322 if (!pfn_valid(pfn_to))
325 offset_from = uaddr_from & (PAGE_SIZE-1);
326 offset_to = uaddr_from & (PAGE_SIZE-1);
327 offset_max = max(offset_from, offset_to);
328 size = min(n - done, PAGE_SIZE - offset_max);
330 memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to,
331 (void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
337 spin_unlock(&mm->page_table_lock);
340 spin_unlock(&mm->page_table_lock);
341 if (__handle_fault(mm, uaddr, write_user))
346 #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
347 asm volatile("0: l %1,0(%6)\n" \
349 "2: cs %1,%2,0(%6)\n" \
353 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
354 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
356 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
357 "m" (*uaddr) : "cc" );
359 int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
361 int oldval = 0, newval, ret;
363 spin_lock(¤t->mm->page_table_lock);
364 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
366 spin_unlock(¤t->mm->page_table_lock);
369 get_page(virt_to_page(uaddr));
370 spin_unlock(¤t->mm->page_table_lock);
373 __futex_atomic_op("lr %2,%5\n",
374 ret, oldval, newval, uaddr, oparg);
377 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
378 ret, oldval, newval, uaddr, oparg);
381 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
382 ret, oldval, newval, uaddr, oparg);
385 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
386 ret, oldval, newval, uaddr, oparg);
389 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
390 ret, oldval, newval, uaddr, oparg);
395 put_page(virt_to_page(uaddr));
400 int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
404 spin_lock(¤t->mm->page_table_lock);
405 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
407 spin_unlock(¤t->mm->page_table_lock);
410 get_page(virt_to_page(uaddr));
411 spin_unlock(¤t->mm->page_table_lock);
412 asm volatile(" cs %1,%4,0(%5)\n"
416 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
417 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
419 put_page(virt_to_page(uaddr));
423 struct uaccess_ops uaccess_pt = {
424 .copy_from_user = copy_from_user_pt,
425 .copy_from_user_small = copy_from_user_pt,
426 .copy_to_user = copy_to_user_pt,
427 .copy_to_user_small = copy_to_user_pt,
428 .copy_in_user = copy_in_user_pt,
429 .clear_user = clear_user_pt,
430 .strnlen_user = strnlen_user_pt,
431 .strncpy_from_user = strncpy_from_user_pt,
432 .futex_atomic_op = futex_atomic_op_pt,
433 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,