X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;ds=sidebyside;f=kernel%2Fsignal.c;h=6c0958e52ea7bdac212353c2b42bbd59e702ce45;hb=eb6d42ea17329745d7d712d3aa3bb84ec1da9c85;hp=0298bd3d431be0f33b59ff5eba603ad6168fdbeb;hpb=db51aeccd7097ce19a522a4c5ff91c320f870e2b;p=linux-2.6 diff --git a/kernel/signal.c b/kernel/signal.c index 0298bd3d43..6c0958e52e 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -231,6 +231,40 @@ void flush_signals(struct task_struct *t) spin_unlock_irqrestore(&t->sighand->siglock, flags); } +static void __flush_itimer_signals(struct sigpending *pending) +{ + sigset_t signal, retain; + struct sigqueue *q, *n; + + signal = pending->signal; + sigemptyset(&retain); + + list_for_each_entry_safe(q, n, &pending->list, list) { + int sig = q->info.si_signo; + + if (likely(q->info.si_code != SI_TIMER)) { + sigaddset(&retain, sig); + } else { + sigdelset(&signal, sig); + list_del_init(&q->list); + __sigqueue_free(q); + } + } + + sigorsets(&pending->signal, &signal, &retain); +} + +void flush_itimer_signals(void) +{ + struct task_struct *tsk = current; + unsigned long flags; + + spin_lock_irqsave(&tsk->sighand->siglock, flags); + __flush_itimer_signals(&tsk->pending); + __flush_itimer_signals(&tsk->signal->shared_pending); + spin_unlock_irqrestore(&tsk->sighand->siglock, flags); +} + void ignore_signals(struct task_struct *t) { int i; @@ -533,6 +567,7 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s) static int check_kill_permission(int sig, struct siginfo *info, struct task_struct *t) { + struct pid *sid; int error; if (!valid_signal(sig)) @@ -545,11 +580,22 @@ static int check_kill_permission(int sig, struct siginfo *info, if (error) return error; - if (((sig != SIGCONT) || (task_session_nr(current) != task_session_nr(t))) - && (current->euid ^ t->suid) && (current->euid ^ t->uid) - && (current->uid ^ t->suid) && (current->uid ^ t->uid) - && !capable(CAP_KILL)) - return -EPERM; + if ((current->euid ^ t->suid) && (current->euid ^ t->uid) && + (current->uid ^ t->suid) && (current->uid ^ t->uid) && + !capable(CAP_KILL)) { + switch (sig) { + case SIGCONT: + sid = task_session(t); + /* + * We don't return the error if sid == NULL. The + * task was unhashed, the caller must notice this. + */ + if (!sid || sid == task_session(current)) + break; + default: + return -EPERM; + } + } return security_task_kill(t, info, sig, 0); } @@ -558,24 +604,25 @@ static int check_kill_permission(int sig, struct siginfo *info, static void do_notify_parent_cldstop(struct task_struct *tsk, int why); /* - * Handle magic process-wide effects of stop/continue signals. - * Unlike the signal actions, these happen immediately at signal-generation + * Handle magic process-wide effects of stop/continue signals. Unlike + * the signal actions, these happen immediately at signal-generation * time regardless of blocking, ignoring, or handling. This does the * actual continuing for SIGCONT, but not the actual stopping for stop - * signals. The process stop is done as a signal action for SIG_DFL. + * signals. The process stop is done as a signal action for SIG_DFL. + * + * Returns true if the signal should be actually delivered, otherwise + * it should be dropped. */ -static void handle_stop_signal(int sig, struct task_struct *p) +static int prepare_signal(int sig, struct task_struct *p) { struct signal_struct *signal = p->signal; struct task_struct *t; - if (signal->flags & SIGNAL_GROUP_EXIT) + if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) { /* - * The process is in the middle of dying already. + * The process is in the middle of dying, nothing to do. */ - return; - - if (sig_kernel_stop(sig)) { + } else if (sig_kernel_stop(sig)) { /* * This is a stop signal. Remove SIGCONT from all queues. */ @@ -632,6 +679,11 @@ static void handle_stop_signal(int sig, struct task_struct *p) why |= SIGNAL_CLD_STOPPED; if (why) { + /* + * The first thread which returns from finish_stop() + * will take ->siglock, notice SIGNAL_CLD_MASK, and + * notify its parent. See get_signal_to_deliver(). + */ signal->flags = why | SIGNAL_STOP_CONTINUED; signal->group_stop_count = 0; signal->group_exit_code = 0; @@ -643,13 +695,105 @@ static void handle_stop_signal(int sig, struct task_struct *p) */ signal->flags &= ~SIGNAL_STOP_DEQUEUED; } - } else if (sig == SIGKILL) { + } + + return !sig_ignored(p, sig); +} + +/* + * Test if P wants to take SIG. After we've checked all threads with this, + * it's equivalent to finding no threads not blocking SIG. Any threads not + * blocking SIG were ruled out because they are not running and already + * have pending signals. Such threads will dequeue from the shared queue + * as soon as they're available, so putting the signal on the shared queue + * will be equivalent to sending it to one such thread. + */ +static inline int wants_signal(int sig, struct task_struct *p) +{ + if (sigismember(&p->blocked, sig)) + return 0; + if (p->flags & PF_EXITING) + return 0; + if (sig == SIGKILL) + return 1; + if (task_is_stopped_or_traced(p)) + return 0; + return task_curr(p) || !signal_pending(p); +} + +static void complete_signal(int sig, struct task_struct *p, int group) +{ + struct signal_struct *signal = p->signal; + struct task_struct *t; + + /* + * Now find a thread we can wake up to take the signal off the queue. + * + * If the main thread wants the signal, it gets first crack. + * Probably the least surprising to the average bear. + */ + if (wants_signal(sig, p)) + t = p; + else if (!group || thread_group_empty(p)) + /* + * There is just one thread and it does not need to be woken. + * It will dequeue unblocked signals before it runs again. + */ + return; + else { + /* + * Otherwise try to find a suitable thread. + */ + t = signal->curr_target; + while (!wants_signal(sig, t)) { + t = next_thread(t); + if (t == signal->curr_target) + /* + * No thread needs to be woken. + * Any eligible threads will see + * the signal in the queue soon. + */ + return; + } + signal->curr_target = t; + } + + /* + * Found a killable thread. If the signal will be fatal, + * then start taking the whole group down immediately. + */ + if (sig_fatal(p, sig) && + !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && + !sigismember(&t->real_blocked, sig) && + (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { /* - * Make sure that any pending stop signal already dequeued - * is undone by the wakeup for SIGKILL. + * This signal will be fatal to the whole group. */ - signal->flags &= ~SIGNAL_STOP_DEQUEUED; + if (!sig_kernel_coredump(sig)) { + /* + * Start a group exit and wake everybody up. + * This way we don't have other threads + * running and doing things after a slower + * thread has the fatal signal pending. + */ + signal->flags = SIGNAL_GROUP_EXIT; + signal->group_exit_code = sig; + signal->group_stop_count = 0; + t = p; + do { + sigaddset(&t->pending.signal, SIGKILL); + signal_wake_up(t, 1); + } while_each_thread(p, t); + return; + } } + + /* + * The signal is already in the shared-pending queue. + * Tell the chosen thread to wake up and dequeue it. + */ + signal_wake_up(t, sig == SIGKILL); + return; } static inline int legacy_queue(struct sigpending *signals, int sig) @@ -658,26 +802,23 @@ static inline int legacy_queue(struct sigpending *signals, int sig) } static int send_signal(int sig, struct siginfo *info, struct task_struct *t, - struct sigpending *signals) + int group) { + struct sigpending *pending; struct sigqueue *q; assert_spin_locked(&t->sighand->siglock); - handle_stop_signal(sig, t); + if (!prepare_signal(sig, t)) + return 0; + + pending = group ? &t->signal->shared_pending : &t->pending; /* * Short-circuit ignored signals and support queuing * exactly one non-rt signal, so that we can get more * detailed information about the cause of the signal. */ - if (sig_ignored(t, sig) || legacy_queue(signals, sig)) + if (legacy_queue(pending, sig)) return 0; - - /* - * Deliver the signal to listening signalfds. This must be called - * with the sighand lock held. - */ - signalfd_notify(t, sig); - /* * fast-pathed signals for kernel-internal things like SIGSTOP * or SIGKILL. @@ -697,7 +838,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, (is_si_special(info) || info->si_code >= 0))); if (q) { - list_add_tail(&q->list, &signals->list); + list_add_tail(&q->list, &pending->list); switch ((unsigned long) info) { case (unsigned long) SEND_SIG_NOINFO: q->info.si_signo = sig; @@ -727,8 +868,10 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, } out_set: - sigaddset(&signals->signal, sig); - return 1; + signalfd_notify(t, sig); + sigaddset(&pending->signal, sig); + complete_signal(sig, t, group); + return 0; } int print_fatal_signals; @@ -763,18 +906,16 @@ static int __init setup_print_fatal_signals(char *str) __setup("print-fatal-signals=", setup_print_fatal_signals); +int +__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) +{ + return send_signal(sig, info, p, 1); +} + static int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) { - int ret; - - ret = send_signal(sig, info, t, &t->pending); - if (ret <= 0) - return ret; - - if (!sigismember(&t->blocked, sig)) - signal_wake_up(t, sig == SIGKILL); - return 0; + return send_signal(sig, info, t, 0); } /* @@ -785,7 +926,8 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) * since we do not want to have a signal handler that was blocked * be invoked when user space had explicitly blocked it. * - * We don't want to have recursive SIGSEGV's etc, for example. + * We don't want to have recursive SIGSEGV's etc, for example, + * that is why we also clear SIGNAL_UNKILLABLE. */ int force_sig_info(int sig, struct siginfo *info, struct task_struct *t) @@ -805,6 +947,8 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) recalc_sigpending_and_wake(t); } } + if (action->sa.sa_handler == SIG_DFL) + t->signal->flags &= ~SIGNAL_UNKILLABLE; ret = specific_send_sig_info(sig, info, t); spin_unlock_irqrestore(&t->sighand->siglock, flags); @@ -817,120 +961,6 @@ force_sig_specific(int sig, struct task_struct *t) force_sig_info(sig, SEND_SIG_FORCED, t); } -/* - * Test if P wants to take SIG. After we've checked all threads with this, - * it's equivalent to finding no threads not blocking SIG. Any threads not - * blocking SIG were ruled out because they are not running and already - * have pending signals. Such threads will dequeue from the shared queue - * as soon as they're available, so putting the signal on the shared queue - * will be equivalent to sending it to one such thread. - */ -static inline int wants_signal(int sig, struct task_struct *p) -{ - if (sigismember(&p->blocked, sig)) - return 0; - if (p->flags & PF_EXITING) - return 0; - if (sig == SIGKILL) - return 1; - if (task_is_stopped_or_traced(p)) - return 0; - return task_curr(p) || !signal_pending(p); -} - -static void -__group_complete_signal(int sig, struct task_struct *p) -{ - struct signal_struct *signal = p->signal; - struct task_struct *t; - - /* - * Now find a thread we can wake up to take the signal off the queue. - * - * If the main thread wants the signal, it gets first crack. - * Probably the least surprising to the average bear. - */ - if (wants_signal(sig, p)) - t = p; - else if (thread_group_empty(p)) - /* - * There is just one thread and it does not need to be woken. - * It will dequeue unblocked signals before it runs again. - */ - return; - else { - /* - * Otherwise try to find a suitable thread. - */ - t = signal->curr_target; - while (!wants_signal(sig, t)) { - t = next_thread(t); - if (t == signal->curr_target) - /* - * No thread needs to be woken. - * Any eligible threads will see - * the signal in the queue soon. - */ - return; - } - signal->curr_target = t; - } - - /* - * Found a killable thread. If the signal will be fatal, - * then start taking the whole group down immediately. - */ - if (sig_fatal(p, sig) && !(signal->flags & SIGNAL_GROUP_EXIT) && - !sigismember(&t->real_blocked, sig) && - (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { - /* - * This signal will be fatal to the whole group. - */ - if (!sig_kernel_coredump(sig)) { - /* - * Start a group exit and wake everybody up. - * This way we don't have other threads - * running and doing things after a slower - * thread has the fatal signal pending. - */ - signal->flags = SIGNAL_GROUP_EXIT; - signal->group_exit_code = sig; - signal->group_stop_count = 0; - t = p; - do { - sigaddset(&t->pending.signal, SIGKILL); - signal_wake_up(t, 1); - } while_each_thread(p, t); - return; - } - } - - /* - * The signal is already in the shared-pending queue. - * Tell the chosen thread to wake up and dequeue it. - */ - signal_wake_up(t, sig == SIGKILL); - return; -} - -int -__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) -{ - int ret; - - /* - * Put this signal on the shared-pending queue, or fail with EAGAIN. - * We always use the shared queue for process-wide signals, - * to avoid several races. - */ - ret = send_signal(sig, info, p, &p->signal->shared_pending); - if (ret <= 0) - return ret; - - __group_complete_signal(sig, p); - return 0; -} - /* * Nuke all other threads in the group. */ @@ -1244,89 +1274,60 @@ void sigqueue_free(struct sigqueue *q) BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); /* - * If the signal is still pending remove it from the - * pending queue. We must hold ->siglock while testing - * q->list to serialize with collect_signal(). + * We must hold ->siglock while testing q->list + * to serialize with collect_signal() or with + * __exit_signal()->flush_sigqueue(). */ spin_lock_irqsave(lock, flags); + q->flags &= ~SIGQUEUE_PREALLOC; + /* + * If it is queued it will be freed when dequeued, + * like the "regular" sigqueue. + */ if (!list_empty(&q->list)) - list_del_init(&q->list); + q = NULL; spin_unlock_irqrestore(lock, flags); - q->flags &= ~SIGQUEUE_PREALLOC; - __sigqueue_free(q); + if (q) + __sigqueue_free(q); } -static int do_send_sigqueue(int sig, struct sigqueue *q, struct task_struct *t, - struct sigpending *pending) +int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) { - handle_stop_signal(sig, t); + int sig = q->info.si_signo; + struct sigpending *pending; + unsigned long flags; + int ret; + + BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); + ret = -1; + if (!likely(lock_task_sighand(t, &flags))) + goto ret; + + ret = 1; /* the signal is ignored */ + if (!prepare_signal(sig, t)) + goto out; + + ret = 0; if (unlikely(!list_empty(&q->list))) { /* * If an SI_TIMER entry is already queue just increment * the overrun count. */ - BUG_ON(q->info.si_code != SI_TIMER); q->info.si_overrun++; - return 0; + goto out; } - if (sig_ignored(t, sig)) - return 1; - signalfd_notify(t, sig); + pending = group ? &t->signal->shared_pending : &t->pending; list_add_tail(&q->list, &pending->list); sigaddset(&pending->signal, sig); - return 0; -} - -int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) -{ - unsigned long flags; - int ret = -1; - - BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); - - /* - * The rcu based delayed sighand destroy makes it possible to - * run this without tasklist lock held. The task struct itself - * cannot go away as create_timer did get_task_struct(). - * - * We return -1, when the task is marked exiting, so - * posix_timer_event can redirect it to the group leader - */ - if (!likely(lock_task_sighand(p, &flags))) - goto out_err; - - ret = do_send_sigqueue(sig, q, p, &p->pending); - - if (!sigismember(&p->blocked, sig)) - signal_wake_up(p, sig == SIGKILL); - - unlock_task_sighand(p, &flags); -out_err: - return ret; -} - -int -send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) -{ - unsigned long flags; - int ret; - - BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); - - /* Since it_lock is held, p->sighand cannot be NULL. */ - spin_lock_irqsave(&p->sighand->siglock, flags); - - ret = do_send_sigqueue(sig, q, p, &p->signal->shared_pending); - - __group_complete_signal(sig, p); - - spin_unlock_irqrestore(&p->sighand->siglock, flags); - + complete_signal(sig, t, group); +out: + unlock_task_sighand(t, &flags); +ret: return ret; } @@ -1657,7 +1658,8 @@ static int do_signal_stop(int signr) } else { struct task_struct *t; - if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || + if (unlikely((sig->flags & (SIGNAL_STOP_DEQUEUED | SIGNAL_UNKILLABLE)) + != SIGNAL_STOP_DEQUEUED) || unlikely(signal_group_exit(sig))) return 0; /* @@ -1747,7 +1749,11 @@ relock: try_to_freeze(); spin_lock_irq(&sighand->siglock); - + /* + * Every stopped thread goes here after wakeup. Check to see if + * we should notify the parent, prepare_signal(SIGCONT) encodes + * the CLD_ si_code into SIGNAL_CLD_MASK bits. + */ if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { int why = (signal->flags & SIGNAL_STOP_CONTINUED) ? CLD_CONTINUED : CLD_STOPPED; @@ -1799,7 +1805,8 @@ relock: /* * Global init gets no signals it doesn't want. */ - if (is_global_init(current)) + if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && + !signal_group_exit(signal)) continue; if (sig_kernel_stop(signr)) { @@ -1842,9 +1849,10 @@ relock: * Anything else is fatal, maybe with a core dump. */ current->flags |= PF_SIGNALED; - if ((signr != SIGKILL) && print_fatal_signals) - print_fatal_signal(regs, signr); + if (sig_kernel_coredump(signr)) { + if (print_fatal_signals) + print_fatal_signal(regs, signr); /* * If it was able to dump core, this kills all * other threads in the group and synchronizes with @@ -2572,7 +2580,7 @@ asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize) current->state = TASK_INTERRUPTIBLE; schedule(); - set_thread_flag(TIF_RESTORE_SIGMASK); + set_restore_sigmask(); return -ERESTARTNOHAND; } #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */