2 * linux/arch/m68k/kernel/sys_m68k.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
9 #include <linux/capability.h>
10 #include <linux/errno.h>
11 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/smp_lock.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/syscalls.h>
21 #include <linux/mman.h>
22 #include <linux/file.h>
23 #include <linux/utsname.h>
25 #include <asm/setup.h>
26 #include <asm/uaccess.h>
27 #include <asm/cachectl.h>
28 #include <asm/traps.h>
31 #include <asm/unistd.h>
34 * sys_pipe() is the normal C calling standard for creating
35 * a pipe. It's not the way unix traditionally does this, though.
37 asmlinkage int sys_pipe(unsigned long __user * fildes)
44 if (copy_to_user(fildes, fd, 2*sizeof(int)))
50 /* common code for old and new mmaps */
51 static inline long do_mmap2(
52 unsigned long addr, unsigned long len,
53 unsigned long prot, unsigned long flags,
54 unsigned long fd, unsigned long pgoff)
57 struct file * file = NULL;
59 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
60 if (!(flags & MAP_ANONYMOUS)) {
66 down_write(¤t->mm->mmap_sem);
67 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
68 up_write(¤t->mm->mmap_sem);
76 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
77 unsigned long prot, unsigned long flags,
78 unsigned long fd, unsigned long pgoff)
80 return do_mmap2(addr, len, prot, flags, fd, pgoff);
84 * Perform the select(nd, in, out, ex, tv) and mmap() system
85 * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
86 * handle more than 4 system call parameters, so these system calls
87 * used a memory block for parameter passing..
90 struct mmap_arg_struct {
99 asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
101 struct mmap_arg_struct a;
104 if (copy_from_user(&a, arg, sizeof(a)))
108 if (a.offset & ~PAGE_MASK)
111 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
113 error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
119 struct mmap_arg_struct64 {
124 __u64 offset; /* 64 bits */
128 asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
131 struct file * file = NULL;
132 struct mmap_arg_struct64 a;
135 if (copy_from_user(&a, arg, sizeof(a)))
138 if ((long)a.offset & ~PAGE_MASK)
141 pgoff = a.offset >> PAGE_SHIFT;
142 if ((a.offset >> PAGE_SHIFT) != pgoff)
145 if (!(a.flags & MAP_ANONYMOUS)) {
151 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
153 down_write(¤t->mm->mmap_sem);
154 error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
155 up_write(¤t->mm->mmap_sem);
163 struct sel_arg_struct {
165 fd_set __user *inp, *outp, *exp;
166 struct timeval __user *tvp;
169 asmlinkage int old_select(struct sel_arg_struct __user *arg)
171 struct sel_arg_struct a;
173 if (copy_from_user(&a, arg, sizeof(a)))
175 /* sys_select() does the appropriate kernel locking */
176 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
180 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
182 * This is really horribly ugly.
184 asmlinkage int sys_ipc (uint call, int first, int second,
185 int third, void __user *ptr, long fifth)
189 version = call >> 16; /* hack for backward compatibility */
195 return sys_semop (first, ptr, second);
197 return sys_semget (first, second, third);
202 if (get_user(fourth.__pad, (void __user *__user *) ptr))
204 return sys_semctl (first, second, third, fourth);
212 return sys_msgsnd (first, ptr, second, third);
216 struct ipc_kludge tmp;
219 if (copy_from_user (&tmp, ptr, sizeof (tmp)))
221 return sys_msgrcv (first, tmp.msgp, second,
225 return sys_msgrcv (first, ptr,
226 second, fifth, third);
229 return sys_msgget ((key_t) first, second);
231 return sys_msgctl (first, second, ptr);
241 ret = do_shmat (first, ptr, second, &raddr);
244 return put_user (raddr, (ulong __user *) third);
248 return sys_shmdt (ptr);
250 return sys_shmget (first, second, third);
252 return sys_shmctl (first, second, ptr);
260 /* Convert virtual (user) address VADDR to physical address PADDR */
261 #define virt_to_phys_040(vaddr) \
263 unsigned long _mmusr, _paddr; \
265 __asm__ __volatile__ (".chip 68040\n\t" \
267 "movec %%mmusr,%0\n\t" \
271 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
276 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
278 unsigned long paddr, i;
282 case FLUSH_SCOPE_ALL:
285 case FLUSH_CACHE_DATA:
286 /* This nop is needed for some broken versions of the 68040. */
287 __asm__ __volatile__ ("nop\n\t"
292 case FLUSH_CACHE_INSN:
293 __asm__ __volatile__ ("nop\n\t"
299 case FLUSH_CACHE_BOTH:
300 __asm__ __volatile__ ("nop\n\t"
308 case FLUSH_SCOPE_LINE:
309 /* Find the physical address of the first mapped page in the
311 if ((paddr = virt_to_phys_040(addr))) {
312 paddr += addr & ~(PAGE_MASK | 15);
313 len = (len + (addr & 15) + 15) >> 4;
315 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
324 if ((paddr = virt_to_phys_040(addr)))
331 len = (len + 15) >> 4;
333 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
338 case FLUSH_CACHE_DATA:
339 __asm__ __volatile__ ("nop\n\t"
341 "cpushl %%dc,(%0)\n\t"
345 case FLUSH_CACHE_INSN:
346 __asm__ __volatile__ ("nop\n\t"
348 "cpushl %%ic,(%0)\n\t"
353 case FLUSH_CACHE_BOTH:
354 __asm__ __volatile__ ("nop\n\t"
356 "cpushl %%bc,(%0)\n\t"
364 * No need to page align here since it is done by
365 * virt_to_phys_040().
369 /* Recompute physical address when crossing a page
373 if ((paddr = virt_to_phys_040(addr)))
387 case FLUSH_SCOPE_PAGE:
388 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
389 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
391 if (!(paddr = virt_to_phys_040(addr)))
395 case FLUSH_CACHE_DATA:
396 __asm__ __volatile__ ("nop\n\t"
398 "cpushp %%dc,(%0)\n\t"
402 case FLUSH_CACHE_INSN:
403 __asm__ __volatile__ ("nop\n\t"
405 "cpushp %%ic,(%0)\n\t"
410 case FLUSH_CACHE_BOTH:
411 __asm__ __volatile__ ("nop\n\t"
413 "cpushp %%bc,(%0)\n\t"
424 #define virt_to_phys_060(vaddr) \
426 unsigned long paddr; \
427 __asm__ __volatile__ (".chip 68060\n\t" \
436 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
438 unsigned long paddr, i;
442 * cpush %dc : flush DC, remains valid (with our %cacr setup)
443 * cpush %ic : invalidate IC
444 * cpush %bc : flush DC + invalidate IC
448 case FLUSH_SCOPE_ALL:
451 case FLUSH_CACHE_DATA:
452 __asm__ __volatile__ (".chip 68060\n\t"
456 case FLUSH_CACHE_INSN:
457 __asm__ __volatile__ (".chip 68060\n\t"
462 case FLUSH_CACHE_BOTH:
463 __asm__ __volatile__ (".chip 68060\n\t"
470 case FLUSH_SCOPE_LINE:
471 /* Find the physical address of the first mapped page in the
475 if (!(paddr = virt_to_phys_060(addr))) {
476 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
485 if ((paddr = virt_to_phys_060(addr)))
493 len = (len + 15) >> 4;
494 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
499 case FLUSH_CACHE_DATA:
500 __asm__ __volatile__ (".chip 68060\n\t"
501 "cpushl %%dc,(%0)\n\t"
505 case FLUSH_CACHE_INSN:
506 __asm__ __volatile__ (".chip 68060\n\t"
507 "cpushl %%ic,(%0)\n\t"
512 case FLUSH_CACHE_BOTH:
513 __asm__ __volatile__ (".chip 68060\n\t"
514 "cpushl %%bc,(%0)\n\t"
523 * We just want to jump to the first cache line
530 /* Recompute physical address when crossing a page
534 if ((paddr = virt_to_phys_060(addr)))
548 case FLUSH_SCOPE_PAGE:
549 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
550 addr &= PAGE_MASK; /* Workaround for bug in some
551 revisions of the 68060 */
552 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
554 if (!(paddr = virt_to_phys_060(addr)))
558 case FLUSH_CACHE_DATA:
559 __asm__ __volatile__ (".chip 68060\n\t"
560 "cpushp %%dc,(%0)\n\t"
564 case FLUSH_CACHE_INSN:
565 __asm__ __volatile__ (".chip 68060\n\t"
566 "cpushp %%ic,(%0)\n\t"
571 case FLUSH_CACHE_BOTH:
572 __asm__ __volatile__ (".chip 68060\n\t"
573 "cpushp %%bc,(%0)\n\t"
584 /* sys_cacheflush -- flush (part of) the processor cache. */
586 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
588 struct vm_area_struct *vma;
592 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
593 cache & ~FLUSH_CACHE_BOTH)
596 if (scope == FLUSH_SCOPE_ALL) {
597 /* Only the superuser may explicitly flush the whole cache. */
599 if (!capable(CAP_SYS_ADMIN))
603 * Verify that the specified address region actually belongs
606 vma = find_vma (current->mm, addr);
608 /* Check for overflow. */
609 if (addr + len < addr)
611 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
615 if (CPU_IS_020_OR_030) {
616 if (scope == FLUSH_SCOPE_LINE && len < 256) {
618 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
619 if (cache & FLUSH_CACHE_INSN)
621 if (cache & FLUSH_CACHE_DATA)
625 __asm__ __volatile__ ("movec %1, %%caar\n\t"
628 : "r" (cacr), "r" (addr));
632 /* Flush the whole cache, even if page granularity requested. */
634 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
635 if (cache & FLUSH_CACHE_INSN)
637 if (cache & FLUSH_CACHE_DATA)
639 __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
645 * 040 or 060: don't blindly trust 'scope', someone could
646 * try to flush a few megs of memory.
649 if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
650 scope=FLUSH_SCOPE_PAGE;
651 if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
652 scope=FLUSH_SCOPE_ALL;
654 ret = cache_flush_040 (addr, scope, cache, len);
655 } else if (CPU_IS_060) {
656 ret = cache_flush_060 (addr, scope, cache, len);
664 asmlinkage int sys_getpagesize(void)
670 * Do a system call from kernel instead of calling sys_execve so we
671 * end up with proper pt_regs.
673 int kernel_execve(const char *filename, char *const argv[], char *const envp[])
675 register long __res asm ("%d0") = __NR_execve;
676 register long __a asm ("%d1") = (long)(filename);
677 register long __b asm ("%d2") = (long)(argv);
678 register long __c asm ("%d3") = (long)(envp);
679 asm volatile ("trap #0" : "+d" (__res)
680 : "d" (__a), "d" (__b), "d" (__c));