4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/config.h>
8 #include <linux/module.h>
10 #include <linux/utsname.h>
11 #include <linux/mman.h>
12 #include <linux/smp_lock.h>
13 #include <linux/notifier.h>
14 #include <linux/reboot.h>
15 #include <linux/prctl.h>
16 #include <linux/init.h>
17 #include <linux/highuid.h>
19 #include <linux/kernel.h>
20 #include <linux/kexec.h>
21 #include <linux/workqueue.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
33 #include <linux/compat.h>
34 #include <linux/syscalls.h>
36 #include <asm/uaccess.h>
38 #include <asm/unistd.h>
40 #ifndef SET_UNALIGN_CTL
41 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
43 #ifndef GET_UNALIGN_CTL
44 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
47 # define SET_FPEMU_CTL(a,b) (-EINVAL)
50 # define GET_FPEMU_CTL(a,b) (-EINVAL)
53 # define SET_FPEXC_CTL(a,b) (-EINVAL)
56 # define GET_FPEXC_CTL(a,b) (-EINVAL)
60 * this is where the system-wide overflow UID and GID are defined, for
61 * architectures that now have 32-bit UID/GID but didn't in the past
64 int overflowuid = DEFAULT_OVERFLOWUID;
65 int overflowgid = DEFAULT_OVERFLOWGID;
68 EXPORT_SYMBOL(overflowuid);
69 EXPORT_SYMBOL(overflowgid);
73 * the same as above, but for filesystems which can only store a 16-bit
74 * UID and GID. as such, this is needed on all architectures
77 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
78 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
80 EXPORT_SYMBOL(fs_overflowuid);
81 EXPORT_SYMBOL(fs_overflowgid);
84 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
91 * Notifier list for kernel code which wants to be called
92 * at shutdown. This is used to stop any idling DMA operations
96 static struct notifier_block *reboot_notifier_list;
97 static DEFINE_RWLOCK(notifier_lock);
100 * notifier_chain_register - Add notifier to a notifier chain
101 * @list: Pointer to root list pointer
102 * @n: New entry in notifier chain
104 * Adds a notifier to a notifier chain.
106 * Currently always returns zero.
109 int notifier_chain_register(struct notifier_block **list, struct notifier_block *n)
111 write_lock(¬ifier_lock);
114 if(n->priority > (*list)->priority)
116 list= &((*list)->next);
120 write_unlock(¬ifier_lock);
124 EXPORT_SYMBOL(notifier_chain_register);
127 * notifier_chain_unregister - Remove notifier from a notifier chain
128 * @nl: Pointer to root list pointer
129 * @n: New entry in notifier chain
131 * Removes a notifier from a notifier chain.
133 * Returns zero on success, or %-ENOENT on failure.
136 int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n)
138 write_lock(¬ifier_lock);
144 write_unlock(¬ifier_lock);
149 write_unlock(¬ifier_lock);
153 EXPORT_SYMBOL(notifier_chain_unregister);
156 * notifier_call_chain - Call functions in a notifier chain
157 * @n: Pointer to root pointer of notifier chain
158 * @val: Value passed unmodified to notifier function
159 * @v: Pointer passed unmodified to notifier function
161 * Calls each function in a notifier chain in turn.
163 * If the return value of the notifier can be and'd
164 * with %NOTIFY_STOP_MASK, then notifier_call_chain
165 * will return immediately, with the return value of
166 * the notifier function which halted execution.
167 * Otherwise, the return value is the return value
168 * of the last notifier function called.
171 int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
174 struct notifier_block *nb = *n;
178 ret=nb->notifier_call(nb,val,v);
179 if(ret&NOTIFY_STOP_MASK)
188 EXPORT_SYMBOL(notifier_call_chain);
191 * register_reboot_notifier - Register function to be called at reboot time
192 * @nb: Info about notifier function to be called
194 * Registers a function with the list of functions
195 * to be called at reboot time.
197 * Currently always returns zero, as notifier_chain_register
198 * always returns zero.
201 int register_reboot_notifier(struct notifier_block * nb)
203 return notifier_chain_register(&reboot_notifier_list, nb);
206 EXPORT_SYMBOL(register_reboot_notifier);
209 * unregister_reboot_notifier - Unregister previously registered reboot notifier
210 * @nb: Hook to be unregistered
212 * Unregisters a previously registered reboot
215 * Returns zero on success, or %-ENOENT on failure.
218 int unregister_reboot_notifier(struct notifier_block * nb)
220 return notifier_chain_unregister(&reboot_notifier_list, nb);
223 EXPORT_SYMBOL(unregister_reboot_notifier);
225 static int set_one_prio(struct task_struct *p, int niceval, int error)
229 if (p->uid != current->euid &&
230 p->euid != current->euid && !capable(CAP_SYS_NICE)) {
234 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
238 no_nice = security_task_setnice(p, niceval);
245 set_user_nice(p, niceval);
250 asmlinkage long sys_setpriority(int which, int who, int niceval)
252 struct task_struct *g, *p;
253 struct user_struct *user;
256 if (which > 2 || which < 0)
259 /* normalize: avoid signed division (rounding problems) */
266 read_lock(&tasklist_lock);
271 p = find_task_by_pid(who);
273 error = set_one_prio(p, niceval, error);
277 who = process_group(current);
278 do_each_task_pid(who, PIDTYPE_PGID, p) {
279 error = set_one_prio(p, niceval, error);
280 } while_each_task_pid(who, PIDTYPE_PGID, p);
283 user = current->user;
287 if ((who != current->uid) && !(user = find_user(who)))
288 goto out_unlock; /* No processes for this user */
292 error = set_one_prio(p, niceval, error);
293 while_each_thread(g, p);
294 if (who != current->uid)
295 free_uid(user); /* For find_user() */
299 read_unlock(&tasklist_lock);
305 * Ugh. To avoid negative return values, "getpriority()" will
306 * not return the normal nice-value, but a negated value that
307 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
308 * to stay compatible.
310 asmlinkage long sys_getpriority(int which, int who)
312 struct task_struct *g, *p;
313 struct user_struct *user;
314 long niceval, retval = -ESRCH;
316 if (which > 2 || which < 0)
319 read_lock(&tasklist_lock);
324 p = find_task_by_pid(who);
326 niceval = 20 - task_nice(p);
327 if (niceval > retval)
333 who = process_group(current);
334 do_each_task_pid(who, PIDTYPE_PGID, p) {
335 niceval = 20 - task_nice(p);
336 if (niceval > retval)
338 } while_each_task_pid(who, PIDTYPE_PGID, p);
341 user = current->user;
345 if ((who != current->uid) && !(user = find_user(who)))
346 goto out_unlock; /* No processes for this user */
350 niceval = 20 - task_nice(p);
351 if (niceval > retval)
354 while_each_thread(g, p);
355 if (who != current->uid)
356 free_uid(user); /* for find_user() */
360 read_unlock(&tasklist_lock);
366 * emergency_restart - reboot the system
368 * Without shutting down any hardware or taking any locks
369 * reboot the system. This is called when we know we are in
370 * trouble so this is our best effort to reboot. This is
371 * safe to call in interrupt context.
373 void emergency_restart(void)
375 machine_emergency_restart();
377 EXPORT_SYMBOL_GPL(emergency_restart);
380 * kernel_restart - reboot the system
382 * Shutdown everything and perform a clean reboot.
383 * This is not safe to call in interrupt context.
385 void kernel_restart_prepare(char *cmd)
387 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
388 system_state = SYSTEM_RESTART;
391 void kernel_restart(char *cmd)
393 kernel_restart_prepare(cmd);
395 printk(KERN_EMERG "Restarting system.\n");
397 printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
400 machine_restart(cmd);
402 EXPORT_SYMBOL_GPL(kernel_restart);
405 * kernel_kexec - reboot the system
407 * Move into place and start executing a preloaded standalone
408 * executable. If nothing was preloaded return an error.
410 void kernel_kexec(void)
413 struct kimage *image;
414 image = xchg(&kexec_image, 0);
418 kernel_restart_prepare(NULL);
419 printk(KERN_EMERG "Starting new kernel\n");
421 machine_kexec(image);
424 EXPORT_SYMBOL_GPL(kernel_kexec);
427 * kernel_halt - halt the system
429 * Shutdown everything and perform a clean system halt.
431 void kernel_halt_prepare(void)
433 notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL);
434 system_state = SYSTEM_HALT;
437 void kernel_halt(void)
439 kernel_halt_prepare();
440 printk(KERN_EMERG "System halted.\n");
443 EXPORT_SYMBOL_GPL(kernel_halt);
446 * kernel_power_off - power_off the system
448 * Shutdown everything and perform a clean system power_off.
450 void kernel_power_off_prepare(void)
452 notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL);
453 system_state = SYSTEM_POWER_OFF;
456 void kernel_power_off(void)
458 kernel_power_off_prepare();
459 printk(KERN_EMERG "Power down.\n");
462 EXPORT_SYMBOL_GPL(kernel_power_off);
465 * Reboot system call: for obvious reasons only root may call it,
466 * and even root needs to set up some magic numbers in the registers
467 * so that some mistake won't make this reboot the whole machine.
468 * You can also set the meaning of the ctrl-alt-del-key here.
470 * reboot doesn't sync: do that yourself before calling this.
472 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg)
476 /* We only trust the superuser with rebooting the system. */
477 if (!capable(CAP_SYS_BOOT))
480 /* For safety, we require "magic" arguments. */
481 if (magic1 != LINUX_REBOOT_MAGIC1 ||
482 (magic2 != LINUX_REBOOT_MAGIC2 &&
483 magic2 != LINUX_REBOOT_MAGIC2A &&
484 magic2 != LINUX_REBOOT_MAGIC2B &&
485 magic2 != LINUX_REBOOT_MAGIC2C))
490 case LINUX_REBOOT_CMD_RESTART:
491 kernel_restart(NULL);
494 case LINUX_REBOOT_CMD_CAD_ON:
498 case LINUX_REBOOT_CMD_CAD_OFF:
502 case LINUX_REBOOT_CMD_HALT:
508 case LINUX_REBOOT_CMD_POWER_OFF:
514 case LINUX_REBOOT_CMD_RESTART2:
515 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
519 buffer[sizeof(buffer) - 1] = '\0';
521 kernel_restart(buffer);
524 case LINUX_REBOOT_CMD_KEXEC:
529 #ifdef CONFIG_SOFTWARE_SUSPEND
530 case LINUX_REBOOT_CMD_SW_SUSPEND:
532 int ret = software_suspend();
546 static void deferred_cad(void *dummy)
548 kernel_restart(NULL);
552 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
553 * As it's called within an interrupt, it may NOT sync: the only choice
554 * is whether to reboot at once, or just ignore the ctrl-alt-del.
556 void ctrl_alt_del(void)
558 static DECLARE_WORK(cad_work, deferred_cad, NULL);
561 schedule_work(&cad_work);
563 kill_proc(cad_pid, SIGINT, 1);
568 * Unprivileged users may change the real gid to the effective gid
569 * or vice versa. (BSD-style)
571 * If you set the real gid at all, or set the effective gid to a value not
572 * equal to the real gid, then the saved gid is set to the new effective gid.
574 * This makes it possible for a setgid program to completely drop its
575 * privileges, which is often a useful assertion to make when you are doing
576 * a security audit over a program.
578 * The general idea is that a program which uses just setregid() will be
579 * 100% compatible with BSD. A program which uses just setgid() will be
580 * 100% compatible with POSIX with saved IDs.
582 * SMP: There are not races, the GIDs are checked only by filesystem
583 * operations (as far as semantic preservation is concerned).
585 asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
587 int old_rgid = current->gid;
588 int old_egid = current->egid;
589 int new_rgid = old_rgid;
590 int new_egid = old_egid;
593 retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE);
597 if (rgid != (gid_t) -1) {
598 if ((old_rgid == rgid) ||
599 (current->egid==rgid) ||
605 if (egid != (gid_t) -1) {
606 if ((old_rgid == egid) ||
607 (current->egid == egid) ||
608 (current->sgid == egid) ||
615 if (new_egid != old_egid)
617 current->mm->dumpable = suid_dumpable;
620 if (rgid != (gid_t) -1 ||
621 (egid != (gid_t) -1 && egid != old_rgid))
622 current->sgid = new_egid;
623 current->fsgid = new_egid;
624 current->egid = new_egid;
625 current->gid = new_rgid;
626 key_fsgid_changed(current);
627 proc_id_connector(current, PROC_EVENT_GID);
632 * setgid() is implemented like SysV w/ SAVED_IDS
634 * SMP: Same implicit races as above.
636 asmlinkage long sys_setgid(gid_t gid)
638 int old_egid = current->egid;
641 retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID);
645 if (capable(CAP_SETGID))
649 current->mm->dumpable = suid_dumpable;
652 current->gid = current->egid = current->sgid = current->fsgid = gid;
654 else if ((gid == current->gid) || (gid == current->sgid))
658 current->mm->dumpable = suid_dumpable;
661 current->egid = current->fsgid = gid;
666 key_fsgid_changed(current);
667 proc_id_connector(current, PROC_EVENT_GID);
671 static int set_user(uid_t new_ruid, int dumpclear)
673 struct user_struct *new_user;
675 new_user = alloc_uid(new_ruid);
679 if (atomic_read(&new_user->processes) >=
680 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
681 new_user != &root_user) {
686 switch_uid(new_user);
690 current->mm->dumpable = suid_dumpable;
693 current->uid = new_ruid;
698 * Unprivileged users may change the real uid to the effective uid
699 * or vice versa. (BSD-style)
701 * If you set the real uid at all, or set the effective uid to a value not
702 * equal to the real uid, then the saved uid is set to the new effective uid.
704 * This makes it possible for a setuid program to completely drop its
705 * privileges, which is often a useful assertion to make when you are doing
706 * a security audit over a program.
708 * The general idea is that a program which uses just setreuid() will be
709 * 100% compatible with BSD. A program which uses just setuid() will be
710 * 100% compatible with POSIX with saved IDs.
712 asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
714 int old_ruid, old_euid, old_suid, new_ruid, new_euid;
717 retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE);
721 new_ruid = old_ruid = current->uid;
722 new_euid = old_euid = current->euid;
723 old_suid = current->suid;
725 if (ruid != (uid_t) -1) {
727 if ((old_ruid != ruid) &&
728 (current->euid != ruid) &&
729 !capable(CAP_SETUID))
733 if (euid != (uid_t) -1) {
735 if ((old_ruid != euid) &&
736 (current->euid != euid) &&
737 (current->suid != euid) &&
738 !capable(CAP_SETUID))
742 if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
745 if (new_euid != old_euid)
747 current->mm->dumpable = suid_dumpable;
750 current->fsuid = current->euid = new_euid;
751 if (ruid != (uid_t) -1 ||
752 (euid != (uid_t) -1 && euid != old_ruid))
753 current->suid = current->euid;
754 current->fsuid = current->euid;
756 key_fsuid_changed(current);
757 proc_id_connector(current, PROC_EVENT_UID);
759 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE);
765 * setuid() is implemented like SysV with SAVED_IDS
767 * Note that SAVED_ID's is deficient in that a setuid root program
768 * like sendmail, for example, cannot set its uid to be a normal
769 * user and then switch back, because if you're root, setuid() sets
770 * the saved uid too. If you don't like this, blame the bright people
771 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
772 * will allow a root program to temporarily drop privileges and be able to
773 * regain them by swapping the real and effective uid.
775 asmlinkage long sys_setuid(uid_t uid)
777 int old_euid = current->euid;
778 int old_ruid, old_suid, new_ruid, new_suid;
781 retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
785 old_ruid = new_ruid = current->uid;
786 old_suid = current->suid;
789 if (capable(CAP_SETUID)) {
790 if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
793 } else if ((uid != current->uid) && (uid != new_suid))
798 current->mm->dumpable = suid_dumpable;
801 current->fsuid = current->euid = uid;
802 current->suid = new_suid;
804 key_fsuid_changed(current);
805 proc_id_connector(current, PROC_EVENT_UID);
807 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID);
812 * This function implements a generic ability to update ruid, euid,
813 * and suid. This allows you to implement the 4.4 compatible seteuid().
815 asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
817 int old_ruid = current->uid;
818 int old_euid = current->euid;
819 int old_suid = current->suid;
822 retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES);
826 if (!capable(CAP_SETUID)) {
827 if ((ruid != (uid_t) -1) && (ruid != current->uid) &&
828 (ruid != current->euid) && (ruid != current->suid))
830 if ((euid != (uid_t) -1) && (euid != current->uid) &&
831 (euid != current->euid) && (euid != current->suid))
833 if ((suid != (uid_t) -1) && (suid != current->uid) &&
834 (suid != current->euid) && (suid != current->suid))
837 if (ruid != (uid_t) -1) {
838 if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0)
841 if (euid != (uid_t) -1) {
842 if (euid != current->euid)
844 current->mm->dumpable = suid_dumpable;
847 current->euid = euid;
849 current->fsuid = current->euid;
850 if (suid != (uid_t) -1)
851 current->suid = suid;
853 key_fsuid_changed(current);
854 proc_id_connector(current, PROC_EVENT_UID);
856 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES);
859 asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid)
863 if (!(retval = put_user(current->uid, ruid)) &&
864 !(retval = put_user(current->euid, euid)))
865 retval = put_user(current->suid, suid);
871 * Same as above, but for rgid, egid, sgid.
873 asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
877 retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES);
881 if (!capable(CAP_SETGID)) {
882 if ((rgid != (gid_t) -1) && (rgid != current->gid) &&
883 (rgid != current->egid) && (rgid != current->sgid))
885 if ((egid != (gid_t) -1) && (egid != current->gid) &&
886 (egid != current->egid) && (egid != current->sgid))
888 if ((sgid != (gid_t) -1) && (sgid != current->gid) &&
889 (sgid != current->egid) && (sgid != current->sgid))
892 if (egid != (gid_t) -1) {
893 if (egid != current->egid)
895 current->mm->dumpable = suid_dumpable;
898 current->egid = egid;
900 current->fsgid = current->egid;
901 if (rgid != (gid_t) -1)
903 if (sgid != (gid_t) -1)
904 current->sgid = sgid;
906 key_fsgid_changed(current);
907 proc_id_connector(current, PROC_EVENT_GID);
911 asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid)
915 if (!(retval = put_user(current->gid, rgid)) &&
916 !(retval = put_user(current->egid, egid)))
917 retval = put_user(current->sgid, sgid);
924 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
925 * is used for "access()" and for the NFS daemon (letting nfsd stay at
926 * whatever uid it wants to). It normally shadows "euid", except when
927 * explicitly set by setfsuid() or for access..
929 asmlinkage long sys_setfsuid(uid_t uid)
933 old_fsuid = current->fsuid;
934 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS))
937 if (uid == current->uid || uid == current->euid ||
938 uid == current->suid || uid == current->fsuid ||
941 if (uid != old_fsuid)
943 current->mm->dumpable = suid_dumpable;
946 current->fsuid = uid;
949 key_fsuid_changed(current);
950 proc_id_connector(current, PROC_EVENT_UID);
952 security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS);
958 * Samma på svenska..
960 asmlinkage long sys_setfsgid(gid_t gid)
964 old_fsgid = current->fsgid;
965 if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS))
968 if (gid == current->gid || gid == current->egid ||
969 gid == current->sgid || gid == current->fsgid ||
972 if (gid != old_fsgid)
974 current->mm->dumpable = suid_dumpable;
977 current->fsgid = gid;
978 key_fsgid_changed(current);
979 proc_id_connector(current, PROC_EVENT_GID);
984 asmlinkage long sys_times(struct tms __user * tbuf)
987 * In the SMP world we might just be unlucky and have one of
988 * the times increment as we use it. Since the value is an
989 * atomically safe type this is just fine. Conceptually its
990 * as if the syscall took an instant longer to occur.
994 cputime_t utime, stime, cutime, cstime;
997 if (thread_group_empty(current)) {
999 * Single thread case without the use of any locks.
1001 * We may race with release_task if two threads are
1002 * executing. However, release task first adds up the
1003 * counters (__exit_signal) before removing the task
1004 * from the process tasklist (__unhash_process).
1005 * __exit_signal also acquires and releases the
1006 * siglock which results in the proper memory ordering
1007 * so that the list modifications are always visible
1008 * after the counters have been updated.
1010 * If the counters have been updated by the second thread
1011 * but the thread has not yet been removed from the list
1012 * then the other branch will be executing which will
1013 * block on tasklist_lock until the exit handling of the
1014 * other task is finished.
1016 * This also implies that the sighand->siglock cannot
1017 * be held by another processor. So we can also
1018 * skip acquiring that lock.
1020 utime = cputime_add(current->signal->utime, current->utime);
1021 stime = cputime_add(current->signal->utime, current->stime);
1022 cutime = current->signal->cutime;
1023 cstime = current->signal->cstime;
1028 /* Process with multiple threads */
1029 struct task_struct *tsk = current;
1030 struct task_struct *t;
1032 read_lock(&tasklist_lock);
1033 utime = tsk->signal->utime;
1034 stime = tsk->signal->stime;
1037 utime = cputime_add(utime, t->utime);
1038 stime = cputime_add(stime, t->stime);
1043 * While we have tasklist_lock read-locked, no dying thread
1044 * can be updating current->signal->[us]time. Instead,
1045 * we got their counts included in the live thread loop.
1046 * However, another thread can come in right now and
1047 * do a wait call that updates current->signal->c[us]time.
1048 * To make sure we always see that pair updated atomically,
1049 * we take the siglock around fetching them.
1051 spin_lock_irq(&tsk->sighand->siglock);
1052 cutime = tsk->signal->cutime;
1053 cstime = tsk->signal->cstime;
1054 spin_unlock_irq(&tsk->sighand->siglock);
1055 read_unlock(&tasklist_lock);
1057 tmp.tms_utime = cputime_to_clock_t(utime);
1058 tmp.tms_stime = cputime_to_clock_t(stime);
1059 tmp.tms_cutime = cputime_to_clock_t(cutime);
1060 tmp.tms_cstime = cputime_to_clock_t(cstime);
1061 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
1064 return (long) jiffies_64_to_clock_t(get_jiffies_64());
1068 * This needs some heavy checking ...
1069 * I just haven't the stomach for it. I also don't fully
1070 * understand sessions/pgrp etc. Let somebody who does explain it.
1072 * OK, I think I have the protection semantics right.... this is really
1073 * only important on a multi-user system anyway, to make sure one user
1074 * can't send a signal to a process owned by another. -TYT, 12/12/91
1076 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1080 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
1082 struct task_struct *p;
1092 /* From this point forward we keep holding onto the tasklist lock
1093 * so that our parent does not change from under us. -DaveM
1095 write_lock_irq(&tasklist_lock);
1098 p = find_task_by_pid(pid);
1103 if (!thread_group_leader(p))
1106 if (p->parent == current || p->real_parent == current) {
1108 if (p->signal->session != current->signal->session)
1120 if (p->signal->leader)
1124 struct task_struct *p;
1126 do_each_task_pid(pgid, PIDTYPE_PGID, p) {
1127 if (p->signal->session == current->signal->session)
1129 } while_each_task_pid(pgid, PIDTYPE_PGID, p);
1134 err = security_task_setpgid(p, pgid);
1138 if (process_group(p) != pgid) {
1139 detach_pid(p, PIDTYPE_PGID);
1140 p->signal->pgrp = pgid;
1141 attach_pid(p, PIDTYPE_PGID, pgid);
1146 /* All paths lead to here, thus we are safe. -DaveM */
1147 write_unlock_irq(&tasklist_lock);
1151 asmlinkage long sys_getpgid(pid_t pid)
1154 return process_group(current);
1157 struct task_struct *p;
1159 read_lock(&tasklist_lock);
1160 p = find_task_by_pid(pid);
1164 retval = security_task_getpgid(p);
1166 retval = process_group(p);
1168 read_unlock(&tasklist_lock);
1173 #ifdef __ARCH_WANT_SYS_GETPGRP
1175 asmlinkage long sys_getpgrp(void)
1177 /* SMP - assuming writes are word atomic this is fine */
1178 return process_group(current);
1183 asmlinkage long sys_getsid(pid_t pid)
1186 return current->signal->session;
1189 struct task_struct *p;
1191 read_lock(&tasklist_lock);
1192 p = find_task_by_pid(pid);
1196 retval = security_task_getsid(p);
1198 retval = p->signal->session;
1200 read_unlock(&tasklist_lock);
1205 asmlinkage long sys_setsid(void)
1210 if (!thread_group_leader(current))
1214 write_lock_irq(&tasklist_lock);
1216 pid = find_pid(PIDTYPE_PGID, current->pid);
1220 current->signal->leader = 1;
1221 __set_special_pids(current->pid, current->pid);
1222 current->signal->tty = NULL;
1223 current->signal->tty_old_pgrp = 0;
1224 err = process_group(current);
1226 write_unlock_irq(&tasklist_lock);
1232 * Supplementary group IDs
1235 /* init to 2 - one for init_task, one to ensure it is never freed */
1236 struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
1238 struct group_info *groups_alloc(int gidsetsize)
1240 struct group_info *group_info;
1244 nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
1245 /* Make sure we always allocate at least one indirect block pointer */
1246 nblocks = nblocks ? : 1;
1247 group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
1250 group_info->ngroups = gidsetsize;
1251 group_info->nblocks = nblocks;
1252 atomic_set(&group_info->usage, 1);
1254 if (gidsetsize <= NGROUPS_SMALL) {
1255 group_info->blocks[0] = group_info->small_block;
1257 for (i = 0; i < nblocks; i++) {
1259 b = (void *)__get_free_page(GFP_USER);
1261 goto out_undo_partial_alloc;
1262 group_info->blocks[i] = b;
1267 out_undo_partial_alloc:
1269 free_page((unsigned long)group_info->blocks[i]);
1275 EXPORT_SYMBOL(groups_alloc);
1277 void groups_free(struct group_info *group_info)
1279 if (group_info->blocks[0] != group_info->small_block) {
1281 for (i = 0; i < group_info->nblocks; i++)
1282 free_page((unsigned long)group_info->blocks[i]);
1287 EXPORT_SYMBOL(groups_free);
1289 /* export the group_info to a user-space array */
1290 static int groups_to_user(gid_t __user *grouplist,
1291 struct group_info *group_info)
1294 int count = group_info->ngroups;
1296 for (i = 0; i < group_info->nblocks; i++) {
1297 int cp_count = min(NGROUPS_PER_BLOCK, count);
1298 int off = i * NGROUPS_PER_BLOCK;
1299 int len = cp_count * sizeof(*grouplist);
1301 if (copy_to_user(grouplist+off, group_info->blocks[i], len))
1309 /* fill a group_info from a user-space array - it must be allocated already */
1310 static int groups_from_user(struct group_info *group_info,
1311 gid_t __user *grouplist)
1314 int count = group_info->ngroups;
1316 for (i = 0; i < group_info->nblocks; i++) {
1317 int cp_count = min(NGROUPS_PER_BLOCK, count);
1318 int off = i * NGROUPS_PER_BLOCK;
1319 int len = cp_count * sizeof(*grouplist);
1321 if (copy_from_user(group_info->blocks[i], grouplist+off, len))
1329 /* a simple Shell sort */
1330 static void groups_sort(struct group_info *group_info)
1332 int base, max, stride;
1333 int gidsetsize = group_info->ngroups;
1335 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
1340 max = gidsetsize - stride;
1341 for (base = 0; base < max; base++) {
1343 int right = left + stride;
1344 gid_t tmp = GROUP_AT(group_info, right);
1346 while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
1347 GROUP_AT(group_info, right) =
1348 GROUP_AT(group_info, left);
1352 GROUP_AT(group_info, right) = tmp;
1358 /* a simple bsearch */
1359 int groups_search(struct group_info *group_info, gid_t grp)
1367 right = group_info->ngroups;
1368 while (left < right) {
1369 int mid = (left+right)/2;
1370 int cmp = grp - GROUP_AT(group_info, mid);
1381 /* validate and set current->group_info */
1382 int set_current_groups(struct group_info *group_info)
1385 struct group_info *old_info;
1387 retval = security_task_setgroups(group_info);
1391 groups_sort(group_info);
1392 get_group_info(group_info);
1395 old_info = current->group_info;
1396 current->group_info = group_info;
1397 task_unlock(current);
1399 put_group_info(old_info);
1404 EXPORT_SYMBOL(set_current_groups);
1406 asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist)
1411 * SMP: Nobody else can change our grouplist. Thus we are
1418 /* no need to grab task_lock here; it cannot change */
1419 get_group_info(current->group_info);
1420 i = current->group_info->ngroups;
1422 if (i > gidsetsize) {
1426 if (groups_to_user(grouplist, current->group_info)) {
1432 put_group_info(current->group_info);
1437 * SMP: Our groups are copy-on-write. We can set them safely
1438 * without another task interfering.
1441 asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist)
1443 struct group_info *group_info;
1446 if (!capable(CAP_SETGID))
1448 if ((unsigned)gidsetsize > NGROUPS_MAX)
1451 group_info = groups_alloc(gidsetsize);
1454 retval = groups_from_user(group_info, grouplist);
1456 put_group_info(group_info);
1460 retval = set_current_groups(group_info);
1461 put_group_info(group_info);
1467 * Check whether we're fsgid/egid or in the supplemental group..
1469 int in_group_p(gid_t grp)
1472 if (grp != current->fsgid) {
1473 get_group_info(current->group_info);
1474 retval = groups_search(current->group_info, grp);
1475 put_group_info(current->group_info);
1480 EXPORT_SYMBOL(in_group_p);
1482 int in_egroup_p(gid_t grp)
1485 if (grp != current->egid) {
1486 get_group_info(current->group_info);
1487 retval = groups_search(current->group_info, grp);
1488 put_group_info(current->group_info);
1493 EXPORT_SYMBOL(in_egroup_p);
1495 DECLARE_RWSEM(uts_sem);
1497 EXPORT_SYMBOL(uts_sem);
1499 asmlinkage long sys_newuname(struct new_utsname __user * name)
1503 down_read(&uts_sem);
1504 if (copy_to_user(name,&system_utsname,sizeof *name))
1510 asmlinkage long sys_sethostname(char __user *name, int len)
1513 char tmp[__NEW_UTS_LEN];
1515 if (!capable(CAP_SYS_ADMIN))
1517 if (len < 0 || len > __NEW_UTS_LEN)
1519 down_write(&uts_sem);
1521 if (!copy_from_user(tmp, name, len)) {
1522 memcpy(system_utsname.nodename, tmp, len);
1523 system_utsname.nodename[len] = 0;
1530 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1532 asmlinkage long sys_gethostname(char __user *name, int len)
1538 down_read(&uts_sem);
1539 i = 1 + strlen(system_utsname.nodename);
1543 if (copy_to_user(name, system_utsname.nodename, i))
1552 * Only setdomainname; getdomainname can be implemented by calling
1555 asmlinkage long sys_setdomainname(char __user *name, int len)
1558 char tmp[__NEW_UTS_LEN];
1560 if (!capable(CAP_SYS_ADMIN))
1562 if (len < 0 || len > __NEW_UTS_LEN)
1565 down_write(&uts_sem);
1567 if (!copy_from_user(tmp, name, len)) {
1568 memcpy(system_utsname.domainname, tmp, len);
1569 system_utsname.domainname[len] = 0;
1576 asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1578 if (resource >= RLIM_NLIMITS)
1581 struct rlimit value;
1582 task_lock(current->group_leader);
1583 value = current->signal->rlim[resource];
1584 task_unlock(current->group_leader);
1585 return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1589 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1592 * Back compatibility for getrlimit. Needed for some apps.
1595 asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1598 if (resource >= RLIM_NLIMITS)
1601 task_lock(current->group_leader);
1602 x = current->signal->rlim[resource];
1603 task_unlock(current->group_leader);
1604 if(x.rlim_cur > 0x7FFFFFFF)
1605 x.rlim_cur = 0x7FFFFFFF;
1606 if(x.rlim_max > 0x7FFFFFFF)
1607 x.rlim_max = 0x7FFFFFFF;
1608 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1613 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1615 struct rlimit new_rlim, *old_rlim;
1618 if (resource >= RLIM_NLIMITS)
1620 if(copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1622 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1624 old_rlim = current->signal->rlim + resource;
1625 if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
1626 !capable(CAP_SYS_RESOURCE))
1628 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
1631 retval = security_task_setrlimit(resource, &new_rlim);
1635 task_lock(current->group_leader);
1636 *old_rlim = new_rlim;
1637 task_unlock(current->group_leader);
1639 if (resource == RLIMIT_CPU && new_rlim.rlim_cur != RLIM_INFINITY &&
1640 (cputime_eq(current->signal->it_prof_expires, cputime_zero) ||
1641 new_rlim.rlim_cur <= cputime_to_secs(
1642 current->signal->it_prof_expires))) {
1643 cputime_t cputime = secs_to_cputime(new_rlim.rlim_cur);
1644 read_lock(&tasklist_lock);
1645 spin_lock_irq(¤t->sighand->siglock);
1646 set_process_cpu_timer(current, CPUCLOCK_PROF,
1648 spin_unlock_irq(¤t->sighand->siglock);
1649 read_unlock(&tasklist_lock);
1656 * It would make sense to put struct rusage in the task_struct,
1657 * except that would make the task_struct be *really big*. After
1658 * task_struct gets moved into malloc'ed memory, it would
1659 * make sense to do this. It will make moving the rest of the information
1660 * a lot simpler! (Which we're not doing right now because we're not
1661 * measuring them yet).
1663 * This expects to be called with tasklist_lock read-locked or better,
1664 * and the siglock not locked. It may momentarily take the siglock.
1666 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1667 * races with threads incrementing their own counters. But since word
1668 * reads are atomic, we either get new values or old values and we don't
1669 * care which for the sums. We always take the siglock to protect reading
1670 * the c* fields from p->signal from races with exit.c updating those
1671 * fields when reaping, so a sample either gets all the additions of a
1672 * given child after it's reaped, or none so this sample is before reaping.
1675 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1677 struct task_struct *t;
1678 unsigned long flags;
1679 cputime_t utime, stime;
1681 memset((char *) r, 0, sizeof *r);
1683 if (unlikely(!p->signal))
1687 case RUSAGE_CHILDREN:
1688 spin_lock_irqsave(&p->sighand->siglock, flags);
1689 utime = p->signal->cutime;
1690 stime = p->signal->cstime;
1691 r->ru_nvcsw = p->signal->cnvcsw;
1692 r->ru_nivcsw = p->signal->cnivcsw;
1693 r->ru_minflt = p->signal->cmin_flt;
1694 r->ru_majflt = p->signal->cmaj_flt;
1695 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1696 cputime_to_timeval(utime, &r->ru_utime);
1697 cputime_to_timeval(stime, &r->ru_stime);
1700 spin_lock_irqsave(&p->sighand->siglock, flags);
1701 utime = stime = cputime_zero;
1704 spin_lock_irqsave(&p->sighand->siglock, flags);
1705 utime = p->signal->cutime;
1706 stime = p->signal->cstime;
1707 r->ru_nvcsw = p->signal->cnvcsw;
1708 r->ru_nivcsw = p->signal->cnivcsw;
1709 r->ru_minflt = p->signal->cmin_flt;
1710 r->ru_majflt = p->signal->cmaj_flt;
1712 utime = cputime_add(utime, p->signal->utime);
1713 stime = cputime_add(stime, p->signal->stime);
1714 r->ru_nvcsw += p->signal->nvcsw;
1715 r->ru_nivcsw += p->signal->nivcsw;
1716 r->ru_minflt += p->signal->min_flt;
1717 r->ru_majflt += p->signal->maj_flt;
1720 utime = cputime_add(utime, t->utime);
1721 stime = cputime_add(stime, t->stime);
1722 r->ru_nvcsw += t->nvcsw;
1723 r->ru_nivcsw += t->nivcsw;
1724 r->ru_minflt += t->min_flt;
1725 r->ru_majflt += t->maj_flt;
1728 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1729 cputime_to_timeval(utime, &r->ru_utime);
1730 cputime_to_timeval(stime, &r->ru_stime);
1737 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1740 read_lock(&tasklist_lock);
1741 k_getrusage(p, who, &r);
1742 read_unlock(&tasklist_lock);
1743 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1746 asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
1748 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
1750 return getrusage(current, who, ru);
1753 asmlinkage long sys_umask(int mask)
1755 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1759 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1760 unsigned long arg4, unsigned long arg5)
1764 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
1769 case PR_SET_PDEATHSIG:
1770 if (!valid_signal(arg2)) {
1774 current->pdeath_signal = arg2;
1776 case PR_GET_PDEATHSIG:
1777 error = put_user(current->pdeath_signal, (int __user *)arg2);
1779 case PR_GET_DUMPABLE:
1780 error = current->mm->dumpable;
1782 case PR_SET_DUMPABLE:
1783 if (arg2 < 0 || arg2 > 2) {
1787 current->mm->dumpable = arg2;
1790 case PR_SET_UNALIGN:
1791 error = SET_UNALIGN_CTL(current, arg2);
1793 case PR_GET_UNALIGN:
1794 error = GET_UNALIGN_CTL(current, arg2);
1797 error = SET_FPEMU_CTL(current, arg2);
1800 error = GET_FPEMU_CTL(current, arg2);
1803 error = SET_FPEXC_CTL(current, arg2);
1806 error = GET_FPEXC_CTL(current, arg2);
1809 error = PR_TIMING_STATISTICAL;
1812 if (arg2 == PR_TIMING_STATISTICAL)
1818 case PR_GET_KEEPCAPS:
1819 if (current->keep_capabilities)
1822 case PR_SET_KEEPCAPS:
1823 if (arg2 != 0 && arg2 != 1) {
1827 current->keep_capabilities = arg2;
1830 struct task_struct *me = current;
1831 unsigned char ncomm[sizeof(me->comm)];
1833 ncomm[sizeof(me->comm)-1] = 0;
1834 if (strncpy_from_user(ncomm, (char __user *)arg2,
1835 sizeof(me->comm)-1) < 0)
1837 set_task_comm(me, ncomm);
1841 struct task_struct *me = current;
1842 unsigned char tcomm[sizeof(me->comm)];
1844 get_task_comm(tcomm, me);
1845 if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm)))