]> err.no Git - linux-2.6/blob - kernel/signal.c
[PATCH] introduce sig_needs_tasklist() helper
[linux-2.6] / kernel / signal.c
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *              Changes to use preallocated sigqueue structures
10  *              to allow signals to be sent reliably.
11  */
12
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/fs.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/posix-timers.h>
26 #include <linux/signal.h>
27 #include <linux/audit.h>
28 #include <linux/capability.h>
29 #include <asm/param.h>
30 #include <asm/uaccess.h>
31 #include <asm/unistd.h>
32 #include <asm/siginfo.h>
33
34 /*
35  * SLAB caches for signal bits.
36  */
37
38 static kmem_cache_t *sigqueue_cachep;
39
40 /*
41  * In POSIX a signal is sent either to a specific thread (Linux task)
42  * or to the process as a whole (Linux thread group).  How the signal
43  * is sent determines whether it's to one thread or the whole group,
44  * which determines which signal mask(s) are involved in blocking it
45  * from being delivered until later.  When the signal is delivered,
46  * either it's caught or ignored by a user handler or it has a default
47  * effect that applies to the whole thread group (POSIX process).
48  *
49  * The possible effects an unblocked signal set to SIG_DFL can have are:
50  *   ignore     - Nothing Happens
51  *   terminate  - kill the process, i.e. all threads in the group,
52  *                similar to exit_group.  The group leader (only) reports
53  *                WIFSIGNALED status to its parent.
54  *   coredump   - write a core dump file describing all threads using
55  *                the same mm and then kill all those threads
56  *   stop       - stop all the threads in the group, i.e. TASK_STOPPED state
57  *
58  * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
59  * Other signals when not blocked and set to SIG_DFL behaves as follows.
60  * The job control signals also have other special effects.
61  *
62  *      +--------------------+------------------+
63  *      |  POSIX signal      |  default action  |
64  *      +--------------------+------------------+
65  *      |  SIGHUP            |  terminate       |
66  *      |  SIGINT            |  terminate       |
67  *      |  SIGQUIT           |  coredump        |
68  *      |  SIGILL            |  coredump        |
69  *      |  SIGTRAP           |  coredump        |
70  *      |  SIGABRT/SIGIOT    |  coredump        |
71  *      |  SIGBUS            |  coredump        |
72  *      |  SIGFPE            |  coredump        |
73  *      |  SIGKILL           |  terminate(+)    |
74  *      |  SIGUSR1           |  terminate       |
75  *      |  SIGSEGV           |  coredump        |
76  *      |  SIGUSR2           |  terminate       |
77  *      |  SIGPIPE           |  terminate       |
78  *      |  SIGALRM           |  terminate       |
79  *      |  SIGTERM           |  terminate       |
80  *      |  SIGCHLD           |  ignore          |
81  *      |  SIGCONT           |  ignore(*)       |
82  *      |  SIGSTOP           |  stop(*)(+)      |
83  *      |  SIGTSTP           |  stop(*)         |
84  *      |  SIGTTIN           |  stop(*)         |
85  *      |  SIGTTOU           |  stop(*)         |
86  *      |  SIGURG            |  ignore          |
87  *      |  SIGXCPU           |  coredump        |
88  *      |  SIGXFSZ           |  coredump        |
89  *      |  SIGVTALRM         |  terminate       |
90  *      |  SIGPROF           |  terminate       |
91  *      |  SIGPOLL/SIGIO     |  terminate       |
92  *      |  SIGSYS/SIGUNUSED  |  coredump        |
93  *      |  SIGSTKFLT         |  terminate       |
94  *      |  SIGWINCH          |  ignore          |
95  *      |  SIGPWR            |  terminate       |
96  *      |  SIGRTMIN-SIGRTMAX |  terminate       |
97  *      +--------------------+------------------+
98  *      |  non-POSIX signal  |  default action  |
99  *      +--------------------+------------------+
100  *      |  SIGEMT            |  coredump        |
101  *      +--------------------+------------------+
102  *
103  * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
104  * (*) Special job control effects:
105  * When SIGCONT is sent, it resumes the process (all threads in the group)
106  * from TASK_STOPPED state and also clears any pending/queued stop signals
107  * (any of those marked with "stop(*)").  This happens regardless of blocking,
108  * catching, or ignoring SIGCONT.  When any stop signal is sent, it clears
109  * any pending/queued SIGCONT signals; this happens regardless of blocking,
110  * catching, or ignored the stop signal, though (except for SIGSTOP) the
111  * default action of stopping the process may happen later or never.
112  */
113
114 #ifdef SIGEMT
115 #define M_SIGEMT        M(SIGEMT)
116 #else
117 #define M_SIGEMT        0
118 #endif
119
120 #if SIGRTMIN > BITS_PER_LONG
121 #define M(sig) (1ULL << ((sig)-1))
122 #else
123 #define M(sig) (1UL << ((sig)-1))
124 #endif
125 #define T(sig, mask) (M(sig) & (mask))
126
127 #define SIG_KERNEL_ONLY_MASK (\
128         M(SIGKILL)   |  M(SIGSTOP)                                   )
129
130 #define SIG_KERNEL_STOP_MASK (\
131         M(SIGSTOP)   |  M(SIGTSTP)   |  M(SIGTTIN)   |  M(SIGTTOU)   )
132
133 #define SIG_KERNEL_COREDUMP_MASK (\
134         M(SIGQUIT)   |  M(SIGILL)    |  M(SIGTRAP)   |  M(SIGABRT)   | \
135         M(SIGFPE)    |  M(SIGSEGV)   |  M(SIGBUS)    |  M(SIGSYS)    | \
136         M(SIGXCPU)   |  M(SIGXFSZ)   |  M_SIGEMT                     )
137
138 #define SIG_KERNEL_IGNORE_MASK (\
139         M(SIGCONT)   |  M(SIGCHLD)   |  M(SIGWINCH)  |  M(SIGURG)    )
140
141 #define sig_kernel_only(sig) \
142                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_ONLY_MASK))
143 #define sig_kernel_coredump(sig) \
144                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_COREDUMP_MASK))
145 #define sig_kernel_ignore(sig) \
146                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_IGNORE_MASK))
147 #define sig_kernel_stop(sig) \
148                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_STOP_MASK))
149
150 #define sig_needs_tasklist(sig) \
151                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_STOP_MASK | M(SIGCONT)))
152
153 #define sig_user_defined(t, signr) \
154         (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) &&  \
155          ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
156
157 #define sig_fatal(t, signr) \
158         (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
159          (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
160
161 static int sig_ignored(struct task_struct *t, int sig)
162 {
163         void __user * handler;
164
165         /*
166          * Tracers always want to know about signals..
167          */
168         if (t->ptrace & PT_PTRACED)
169                 return 0;
170
171         /*
172          * Blocked signals are never ignored, since the
173          * signal handler may change by the time it is
174          * unblocked.
175          */
176         if (sigismember(&t->blocked, sig))
177                 return 0;
178
179         /* Is it explicitly or implicitly ignored? */
180         handler = t->sighand->action[sig-1].sa.sa_handler;
181         return   handler == SIG_IGN ||
182                 (handler == SIG_DFL && sig_kernel_ignore(sig));
183 }
184
185 /*
186  * Re-calculate pending state from the set of locally pending
187  * signals, globally pending signals, and blocked signals.
188  */
189 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
190 {
191         unsigned long ready;
192         long i;
193
194         switch (_NSIG_WORDS) {
195         default:
196                 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
197                         ready |= signal->sig[i] &~ blocked->sig[i];
198                 break;
199
200         case 4: ready  = signal->sig[3] &~ blocked->sig[3];
201                 ready |= signal->sig[2] &~ blocked->sig[2];
202                 ready |= signal->sig[1] &~ blocked->sig[1];
203                 ready |= signal->sig[0] &~ blocked->sig[0];
204                 break;
205
206         case 2: ready  = signal->sig[1] &~ blocked->sig[1];
207                 ready |= signal->sig[0] &~ blocked->sig[0];
208                 break;
209
210         case 1: ready  = signal->sig[0] &~ blocked->sig[0];
211         }
212         return ready != 0;
213 }
214
215 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
216
217 fastcall void recalc_sigpending_tsk(struct task_struct *t)
218 {
219         if (t->signal->group_stop_count > 0 ||
220             (freezing(t)) ||
221             PENDING(&t->pending, &t->blocked) ||
222             PENDING(&t->signal->shared_pending, &t->blocked))
223                 set_tsk_thread_flag(t, TIF_SIGPENDING);
224         else
225                 clear_tsk_thread_flag(t, TIF_SIGPENDING);
226 }
227
228 void recalc_sigpending(void)
229 {
230         recalc_sigpending_tsk(current);
231 }
232
233 /* Given the mask, find the first available signal that should be serviced. */
234
235 static int
236 next_signal(struct sigpending *pending, sigset_t *mask)
237 {
238         unsigned long i, *s, *m, x;
239         int sig = 0;
240         
241         s = pending->signal.sig;
242         m = mask->sig;
243         switch (_NSIG_WORDS) {
244         default:
245                 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
246                         if ((x = *s &~ *m) != 0) {
247                                 sig = ffz(~x) + i*_NSIG_BPW + 1;
248                                 break;
249                         }
250                 break;
251
252         case 2: if ((x = s[0] &~ m[0]) != 0)
253                         sig = 1;
254                 else if ((x = s[1] &~ m[1]) != 0)
255                         sig = _NSIG_BPW + 1;
256                 else
257                         break;
258                 sig += ffz(~x);
259                 break;
260
261         case 1: if ((x = *s &~ *m) != 0)
262                         sig = ffz(~x) + 1;
263                 break;
264         }
265         
266         return sig;
267 }
268
269 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
270                                          int override_rlimit)
271 {
272         struct sigqueue *q = NULL;
273
274         atomic_inc(&t->user->sigpending);
275         if (override_rlimit ||
276             atomic_read(&t->user->sigpending) <=
277                         t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
278                 q = kmem_cache_alloc(sigqueue_cachep, flags);
279         if (unlikely(q == NULL)) {
280                 atomic_dec(&t->user->sigpending);
281         } else {
282                 INIT_LIST_HEAD(&q->list);
283                 q->flags = 0;
284                 q->user = get_uid(t->user);
285         }
286         return(q);
287 }
288
289 static void __sigqueue_free(struct sigqueue *q)
290 {
291         if (q->flags & SIGQUEUE_PREALLOC)
292                 return;
293         atomic_dec(&q->user->sigpending);
294         free_uid(q->user);
295         kmem_cache_free(sigqueue_cachep, q);
296 }
297
298 static void flush_sigqueue(struct sigpending *queue)
299 {
300         struct sigqueue *q;
301
302         sigemptyset(&queue->signal);
303         while (!list_empty(&queue->list)) {
304                 q = list_entry(queue->list.next, struct sigqueue , list);
305                 list_del_init(&q->list);
306                 __sigqueue_free(q);
307         }
308 }
309
310 /*
311  * Flush all pending signals for a task.
312  */
313
314 void
315 flush_signals(struct task_struct *t)
316 {
317         unsigned long flags;
318
319         spin_lock_irqsave(&t->sighand->siglock, flags);
320         clear_tsk_thread_flag(t,TIF_SIGPENDING);
321         flush_sigqueue(&t->pending);
322         flush_sigqueue(&t->signal->shared_pending);
323         spin_unlock_irqrestore(&t->sighand->siglock, flags);
324 }
325
326 /*
327  * This function expects the tasklist_lock write-locked.
328  */
329 void __exit_sighand(struct task_struct *tsk)
330 {
331         struct sighand_struct * sighand = tsk->sighand;
332
333         /* Ok, we're done with the signal handlers */
334         tsk->sighand = NULL;
335         if (atomic_dec_and_test(&sighand->count))
336                 kmem_cache_free(sighand_cachep, sighand);
337 }
338
339 void exit_sighand(struct task_struct *tsk)
340 {
341         write_lock_irq(&tasklist_lock);
342         rcu_read_lock();
343         if (tsk->sighand != NULL) {
344                 struct sighand_struct *sighand = rcu_dereference(tsk->sighand);
345                 spin_lock(&sighand->siglock);
346                 __exit_sighand(tsk);
347                 spin_unlock(&sighand->siglock);
348         }
349         rcu_read_unlock();
350         write_unlock_irq(&tasklist_lock);
351 }
352
353 /*
354  * This function expects the tasklist_lock write-locked.
355  */
356 void __exit_signal(struct task_struct *tsk)
357 {
358         struct signal_struct * sig = tsk->signal;
359         struct sighand_struct * sighand;
360
361         if (!sig)
362                 BUG();
363         if (!atomic_read(&sig->count))
364                 BUG();
365         rcu_read_lock();
366         sighand = rcu_dereference(tsk->sighand);
367         spin_lock(&sighand->siglock);
368         posix_cpu_timers_exit(tsk);
369         if (atomic_dec_and_test(&sig->count)) {
370                 posix_cpu_timers_exit_group(tsk);
371                 tsk->signal = NULL;
372                 __exit_sighand(tsk);
373                 spin_unlock(&sighand->siglock);
374                 flush_sigqueue(&sig->shared_pending);
375         } else {
376                 /*
377                  * If there is any task waiting for the group exit
378                  * then notify it:
379                  */
380                 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
381                         wake_up_process(sig->group_exit_task);
382                         sig->group_exit_task = NULL;
383                 }
384                 if (tsk == sig->curr_target)
385                         sig->curr_target = next_thread(tsk);
386                 tsk->signal = NULL;
387                 /*
388                  * Accumulate here the counters for all threads but the
389                  * group leader as they die, so they can be added into
390                  * the process-wide totals when those are taken.
391                  * The group leader stays around as a zombie as long
392                  * as there are other threads.  When it gets reaped,
393                  * the exit.c code will add its counts into these totals.
394                  * We won't ever get here for the group leader, since it
395                  * will have been the last reference on the signal_struct.
396                  */
397                 sig->utime = cputime_add(sig->utime, tsk->utime);
398                 sig->stime = cputime_add(sig->stime, tsk->stime);
399                 sig->min_flt += tsk->min_flt;
400                 sig->maj_flt += tsk->maj_flt;
401                 sig->nvcsw += tsk->nvcsw;
402                 sig->nivcsw += tsk->nivcsw;
403                 sig->sched_time += tsk->sched_time;
404                 __exit_sighand(tsk);
405                 spin_unlock(&sighand->siglock);
406                 sig = NULL;     /* Marker for below.  */
407         }
408         rcu_read_unlock();
409         clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
410         flush_sigqueue(&tsk->pending);
411         if (sig) {
412                 /*
413                  * We are cleaning up the signal_struct here.
414                  */
415                 exit_thread_group_keys(sig);
416                 kmem_cache_free(signal_cachep, sig);
417         }
418 }
419
420 void exit_signal(struct task_struct *tsk)
421 {
422         atomic_dec(&tsk->signal->live);
423
424         write_lock_irq(&tasklist_lock);
425         __exit_signal(tsk);
426         write_unlock_irq(&tasklist_lock);
427 }
428
429 /*
430  * Flush all handlers for a task.
431  */
432
433 void
434 flush_signal_handlers(struct task_struct *t, int force_default)
435 {
436         int i;
437         struct k_sigaction *ka = &t->sighand->action[0];
438         for (i = _NSIG ; i != 0 ; i--) {
439                 if (force_default || ka->sa.sa_handler != SIG_IGN)
440                         ka->sa.sa_handler = SIG_DFL;
441                 ka->sa.sa_flags = 0;
442                 sigemptyset(&ka->sa.sa_mask);
443                 ka++;
444         }
445 }
446
447
448 /* Notify the system that a driver wants to block all signals for this
449  * process, and wants to be notified if any signals at all were to be
450  * sent/acted upon.  If the notifier routine returns non-zero, then the
451  * signal will be acted upon after all.  If the notifier routine returns 0,
452  * then then signal will be blocked.  Only one block per process is
453  * allowed.  priv is a pointer to private data that the notifier routine
454  * can use to determine if the signal should be blocked or not.  */
455
456 void
457 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
458 {
459         unsigned long flags;
460
461         spin_lock_irqsave(&current->sighand->siglock, flags);
462         current->notifier_mask = mask;
463         current->notifier_data = priv;
464         current->notifier = notifier;
465         spin_unlock_irqrestore(&current->sighand->siglock, flags);
466 }
467
468 /* Notify the system that blocking has ended. */
469
470 void
471 unblock_all_signals(void)
472 {
473         unsigned long flags;
474
475         spin_lock_irqsave(&current->sighand->siglock, flags);
476         current->notifier = NULL;
477         current->notifier_data = NULL;
478         recalc_sigpending();
479         spin_unlock_irqrestore(&current->sighand->siglock, flags);
480 }
481
482 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
483 {
484         struct sigqueue *q, *first = NULL;
485         int still_pending = 0;
486
487         if (unlikely(!sigismember(&list->signal, sig)))
488                 return 0;
489
490         /*
491          * Collect the siginfo appropriate to this signal.  Check if
492          * there is another siginfo for the same signal.
493         */
494         list_for_each_entry(q, &list->list, list) {
495                 if (q->info.si_signo == sig) {
496                         if (first) {
497                                 still_pending = 1;
498                                 break;
499                         }
500                         first = q;
501                 }
502         }
503         if (first) {
504                 list_del_init(&first->list);
505                 copy_siginfo(info, &first->info);
506                 __sigqueue_free(first);
507                 if (!still_pending)
508                         sigdelset(&list->signal, sig);
509         } else {
510
511                 /* Ok, it wasn't in the queue.  This must be
512                    a fast-pathed signal or we must have been
513                    out of queue space.  So zero out the info.
514                  */
515                 sigdelset(&list->signal, sig);
516                 info->si_signo = sig;
517                 info->si_errno = 0;
518                 info->si_code = 0;
519                 info->si_pid = 0;
520                 info->si_uid = 0;
521         }
522         return 1;
523 }
524
525 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
526                         siginfo_t *info)
527 {
528         int sig = 0;
529
530         sig = next_signal(pending, mask);
531         if (sig) {
532                 if (current->notifier) {
533                         if (sigismember(current->notifier_mask, sig)) {
534                                 if (!(current->notifier)(current->notifier_data)) {
535                                         clear_thread_flag(TIF_SIGPENDING);
536                                         return 0;
537                                 }
538                         }
539                 }
540
541                 if (!collect_signal(sig, pending, info))
542                         sig = 0;
543                                 
544         }
545         recalc_sigpending();
546
547         return sig;
548 }
549
550 /*
551  * Dequeue a signal and return the element to the caller, which is 
552  * expected to free it.
553  *
554  * All callers have to hold the siglock.
555  */
556 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
557 {
558         int signr = __dequeue_signal(&tsk->pending, mask, info);
559         if (!signr)
560                 signr = __dequeue_signal(&tsk->signal->shared_pending,
561                                          mask, info);
562         if (signr && unlikely(sig_kernel_stop(signr))) {
563                 /*
564                  * Set a marker that we have dequeued a stop signal.  Our
565                  * caller might release the siglock and then the pending
566                  * stop signal it is about to process is no longer in the
567                  * pending bitmasks, but must still be cleared by a SIGCONT
568                  * (and overruled by a SIGKILL).  So those cases clear this
569                  * shared flag after we've set it.  Note that this flag may
570                  * remain set after the signal we return is ignored or
571                  * handled.  That doesn't matter because its only purpose
572                  * is to alert stop-signal processing code when another
573                  * processor has come along and cleared the flag.
574                  */
575                 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
576                         tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
577         }
578         if ( signr &&
579              ((info->si_code & __SI_MASK) == __SI_TIMER) &&
580              info->si_sys_private){
581                 /*
582                  * Release the siglock to ensure proper locking order
583                  * of timer locks outside of siglocks.  Note, we leave
584                  * irqs disabled here, since the posix-timers code is
585                  * about to disable them again anyway.
586                  */
587                 spin_unlock(&tsk->sighand->siglock);
588                 do_schedule_next_timer(info);
589                 spin_lock(&tsk->sighand->siglock);
590         }
591         return signr;
592 }
593
594 /*
595  * Tell a process that it has a new active signal..
596  *
597  * NOTE! we rely on the previous spin_lock to
598  * lock interrupts for us! We can only be called with
599  * "siglock" held, and the local interrupt must
600  * have been disabled when that got acquired!
601  *
602  * No need to set need_resched since signal event passing
603  * goes through ->blocked
604  */
605 void signal_wake_up(struct task_struct *t, int resume)
606 {
607         unsigned int mask;
608
609         set_tsk_thread_flag(t, TIF_SIGPENDING);
610
611         /*
612          * For SIGKILL, we want to wake it up in the stopped/traced case.
613          * We don't check t->state here because there is a race with it
614          * executing another processor and just now entering stopped state.
615          * By using wake_up_state, we ensure the process will wake up and
616          * handle its death signal.
617          */
618         mask = TASK_INTERRUPTIBLE;
619         if (resume)
620                 mask |= TASK_STOPPED | TASK_TRACED;
621         if (!wake_up_state(t, mask))
622                 kick_process(t);
623 }
624
625 /*
626  * Remove signals in mask from the pending set and queue.
627  * Returns 1 if any signals were found.
628  *
629  * All callers must be holding the siglock.
630  *
631  * This version takes a sigset mask and looks at all signals,
632  * not just those in the first mask word.
633  */
634 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
635 {
636         struct sigqueue *q, *n;
637         sigset_t m;
638
639         sigandsets(&m, mask, &s->signal);
640         if (sigisemptyset(&m))
641                 return 0;
642
643         signandsets(&s->signal, &s->signal, mask);
644         list_for_each_entry_safe(q, n, &s->list, list) {
645                 if (sigismember(mask, q->info.si_signo)) {
646                         list_del_init(&q->list);
647                         __sigqueue_free(q);
648                 }
649         }
650         return 1;
651 }
652 /*
653  * Remove signals in mask from the pending set and queue.
654  * Returns 1 if any signals were found.
655  *
656  * All callers must be holding the siglock.
657  */
658 static int rm_from_queue(unsigned long mask, struct sigpending *s)
659 {
660         struct sigqueue *q, *n;
661
662         if (!sigtestsetmask(&s->signal, mask))
663                 return 0;
664
665         sigdelsetmask(&s->signal, mask);
666         list_for_each_entry_safe(q, n, &s->list, list) {
667                 if (q->info.si_signo < SIGRTMIN &&
668                     (mask & sigmask(q->info.si_signo))) {
669                         list_del_init(&q->list);
670                         __sigqueue_free(q);
671                 }
672         }
673         return 1;
674 }
675
676 /*
677  * Bad permissions for sending the signal
678  */
679 static int check_kill_permission(int sig, struct siginfo *info,
680                                  struct task_struct *t)
681 {
682         int error = -EINVAL;
683         if (!valid_signal(sig))
684                 return error;
685         error = -EPERM;
686         if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
687             && ((sig != SIGCONT) ||
688                 (current->signal->session != t->signal->session))
689             && (current->euid ^ t->suid) && (current->euid ^ t->uid)
690             && (current->uid ^ t->suid) && (current->uid ^ t->uid)
691             && !capable(CAP_KILL))
692                 return error;
693
694         error = security_task_kill(t, info, sig);
695         if (!error)
696                 audit_signal_info(sig, t); /* Let audit system see the signal */
697         return error;
698 }
699
700 /* forward decl */
701 static void do_notify_parent_cldstop(struct task_struct *tsk,
702                                      int to_self,
703                                      int why);
704
705 /*
706  * Handle magic process-wide effects of stop/continue signals.
707  * Unlike the signal actions, these happen immediately at signal-generation
708  * time regardless of blocking, ignoring, or handling.  This does the
709  * actual continuing for SIGCONT, but not the actual stopping for stop
710  * signals.  The process stop is done as a signal action for SIG_DFL.
711  */
712 static void handle_stop_signal(int sig, struct task_struct *p)
713 {
714         struct task_struct *t;
715
716         if (p->signal->flags & SIGNAL_GROUP_EXIT)
717                 /*
718                  * The process is in the middle of dying already.
719                  */
720                 return;
721
722         if (sig_kernel_stop(sig)) {
723                 /*
724                  * This is a stop signal.  Remove SIGCONT from all queues.
725                  */
726                 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
727                 t = p;
728                 do {
729                         rm_from_queue(sigmask(SIGCONT), &t->pending);
730                         t = next_thread(t);
731                 } while (t != p);
732         } else if (sig == SIGCONT) {
733                 /*
734                  * Remove all stop signals from all queues,
735                  * and wake all threads.
736                  */
737                 if (unlikely(p->signal->group_stop_count > 0)) {
738                         /*
739                          * There was a group stop in progress.  We'll
740                          * pretend it finished before we got here.  We are
741                          * obliged to report it to the parent: if the
742                          * SIGSTOP happened "after" this SIGCONT, then it
743                          * would have cleared this pending SIGCONT.  If it
744                          * happened "before" this SIGCONT, then the parent
745                          * got the SIGCHLD about the stop finishing before
746                          * the continue happened.  We do the notification
747                          * now, and it's as if the stop had finished and
748                          * the SIGCHLD was pending on entry to this kill.
749                          */
750                         p->signal->group_stop_count = 0;
751                         p->signal->flags = SIGNAL_STOP_CONTINUED;
752                         spin_unlock(&p->sighand->siglock);
753                         do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
754                         spin_lock(&p->sighand->siglock);
755                 }
756                 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
757                 t = p;
758                 do {
759                         unsigned int state;
760                         rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
761                         
762                         /*
763                          * If there is a handler for SIGCONT, we must make
764                          * sure that no thread returns to user mode before
765                          * we post the signal, in case it was the only
766                          * thread eligible to run the signal handler--then
767                          * it must not do anything between resuming and
768                          * running the handler.  With the TIF_SIGPENDING
769                          * flag set, the thread will pause and acquire the
770                          * siglock that we hold now and until we've queued
771                          * the pending signal. 
772                          *
773                          * Wake up the stopped thread _after_ setting
774                          * TIF_SIGPENDING
775                          */
776                         state = TASK_STOPPED;
777                         if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
778                                 set_tsk_thread_flag(t, TIF_SIGPENDING);
779                                 state |= TASK_INTERRUPTIBLE;
780                         }
781                         wake_up_state(t, state);
782
783                         t = next_thread(t);
784                 } while (t != p);
785
786                 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
787                         /*
788                          * We were in fact stopped, and are now continued.
789                          * Notify the parent with CLD_CONTINUED.
790                          */
791                         p->signal->flags = SIGNAL_STOP_CONTINUED;
792                         p->signal->group_exit_code = 0;
793                         spin_unlock(&p->sighand->siglock);
794                         do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
795                         spin_lock(&p->sighand->siglock);
796                 } else {
797                         /*
798                          * We are not stopped, but there could be a stop
799                          * signal in the middle of being processed after
800                          * being removed from the queue.  Clear that too.
801                          */
802                         p->signal->flags = 0;
803                 }
804         } else if (sig == SIGKILL) {
805                 /*
806                  * Make sure that any pending stop signal already dequeued
807                  * is undone by the wakeup for SIGKILL.
808                  */
809                 p->signal->flags = 0;
810         }
811 }
812
813 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
814                         struct sigpending *signals)
815 {
816         struct sigqueue * q = NULL;
817         int ret = 0;
818
819         /*
820          * fast-pathed signals for kernel-internal things like SIGSTOP
821          * or SIGKILL.
822          */
823         if (info == SEND_SIG_FORCED)
824                 goto out_set;
825
826         /* Real-time signals must be queued if sent by sigqueue, or
827            some other real-time mechanism.  It is implementation
828            defined whether kill() does so.  We attempt to do so, on
829            the principle of least surprise, but since kill is not
830            allowed to fail with EAGAIN when low on memory we just
831            make sure at least one signal gets delivered and don't
832            pass on the info struct.  */
833
834         q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
835                                              (is_si_special(info) ||
836                                               info->si_code >= 0)));
837         if (q) {
838                 list_add_tail(&q->list, &signals->list);
839                 switch ((unsigned long) info) {
840                 case (unsigned long) SEND_SIG_NOINFO:
841                         q->info.si_signo = sig;
842                         q->info.si_errno = 0;
843                         q->info.si_code = SI_USER;
844                         q->info.si_pid = current->pid;
845                         q->info.si_uid = current->uid;
846                         break;
847                 case (unsigned long) SEND_SIG_PRIV:
848                         q->info.si_signo = sig;
849                         q->info.si_errno = 0;
850                         q->info.si_code = SI_KERNEL;
851                         q->info.si_pid = 0;
852                         q->info.si_uid = 0;
853                         break;
854                 default:
855                         copy_siginfo(&q->info, info);
856                         break;
857                 }
858         } else if (!is_si_special(info)) {
859                 if (sig >= SIGRTMIN && info->si_code != SI_USER)
860                 /*
861                  * Queue overflow, abort.  We may abort if the signal was rt
862                  * and sent by user using something other than kill().
863                  */
864                         return -EAGAIN;
865         }
866
867 out_set:
868         sigaddset(&signals->signal, sig);
869         return ret;
870 }
871
872 #define LEGACY_QUEUE(sigptr, sig) \
873         (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
874
875
876 static int
877 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
878 {
879         int ret = 0;
880
881         if (!irqs_disabled())
882                 BUG();
883         assert_spin_locked(&t->sighand->siglock);
884
885         /* Short-circuit ignored signals.  */
886         if (sig_ignored(t, sig))
887                 goto out;
888
889         /* Support queueing exactly one non-rt signal, so that we
890            can get more detailed information about the cause of
891            the signal. */
892         if (LEGACY_QUEUE(&t->pending, sig))
893                 goto out;
894
895         ret = send_signal(sig, info, t, &t->pending);
896         if (!ret && !sigismember(&t->blocked, sig))
897                 signal_wake_up(t, sig == SIGKILL);
898 out:
899         return ret;
900 }
901
902 /*
903  * Force a signal that the process can't ignore: if necessary
904  * we unblock the signal and change any SIG_IGN to SIG_DFL.
905  */
906
907 int
908 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
909 {
910         unsigned long int flags;
911         int ret;
912
913         spin_lock_irqsave(&t->sighand->siglock, flags);
914         if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
915                 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
916         }
917         if (sigismember(&t->blocked, sig)) {
918                 sigdelset(&t->blocked, sig);
919         }
920         recalc_sigpending_tsk(t);
921         ret = specific_send_sig_info(sig, info, t);
922         spin_unlock_irqrestore(&t->sighand->siglock, flags);
923
924         return ret;
925 }
926
927 void
928 force_sig_specific(int sig, struct task_struct *t)
929 {
930         force_sig_info(sig, SEND_SIG_FORCED, t);
931 }
932
933 /*
934  * Test if P wants to take SIG.  After we've checked all threads with this,
935  * it's equivalent to finding no threads not blocking SIG.  Any threads not
936  * blocking SIG were ruled out because they are not running and already
937  * have pending signals.  Such threads will dequeue from the shared queue
938  * as soon as they're available, so putting the signal on the shared queue
939  * will be equivalent to sending it to one such thread.
940  */
941 static inline int wants_signal(int sig, struct task_struct *p)
942 {
943         if (sigismember(&p->blocked, sig))
944                 return 0;
945         if (p->flags & PF_EXITING)
946                 return 0;
947         if (sig == SIGKILL)
948                 return 1;
949         if (p->state & (TASK_STOPPED | TASK_TRACED))
950                 return 0;
951         return task_curr(p) || !signal_pending(p);
952 }
953
954 static void
955 __group_complete_signal(int sig, struct task_struct *p)
956 {
957         struct task_struct *t;
958
959         /*
960          * Now find a thread we can wake up to take the signal off the queue.
961          *
962          * If the main thread wants the signal, it gets first crack.
963          * Probably the least surprising to the average bear.
964          */
965         if (wants_signal(sig, p))
966                 t = p;
967         else if (thread_group_empty(p))
968                 /*
969                  * There is just one thread and it does not need to be woken.
970                  * It will dequeue unblocked signals before it runs again.
971                  */
972                 return;
973         else {
974                 /*
975                  * Otherwise try to find a suitable thread.
976                  */
977                 t = p->signal->curr_target;
978                 if (t == NULL)
979                         /* restart balancing at this thread */
980                         t = p->signal->curr_target = p;
981                 BUG_ON(t->tgid != p->tgid);
982
983                 while (!wants_signal(sig, t)) {
984                         t = next_thread(t);
985                         if (t == p->signal->curr_target)
986                                 /*
987                                  * No thread needs to be woken.
988                                  * Any eligible threads will see
989                                  * the signal in the queue soon.
990                                  */
991                                 return;
992                 }
993                 p->signal->curr_target = t;
994         }
995
996         /*
997          * Found a killable thread.  If the signal will be fatal,
998          * then start taking the whole group down immediately.
999          */
1000         if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
1001             !sigismember(&t->real_blocked, sig) &&
1002             (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
1003                 /*
1004                  * This signal will be fatal to the whole group.
1005                  */
1006                 if (!sig_kernel_coredump(sig)) {
1007                         /*
1008                          * Start a group exit and wake everybody up.
1009                          * This way we don't have other threads
1010                          * running and doing things after a slower
1011                          * thread has the fatal signal pending.
1012                          */
1013                         p->signal->flags = SIGNAL_GROUP_EXIT;
1014                         p->signal->group_exit_code = sig;
1015                         p->signal->group_stop_count = 0;
1016                         t = p;
1017                         do {
1018                                 sigaddset(&t->pending.signal, SIGKILL);
1019                                 signal_wake_up(t, 1);
1020                                 t = next_thread(t);
1021                         } while (t != p);
1022                         return;
1023                 }
1024
1025                 /*
1026                  * There will be a core dump.  We make all threads other
1027                  * than the chosen one go into a group stop so that nothing
1028                  * happens until it gets scheduled, takes the signal off
1029                  * the shared queue, and does the core dump.  This is a
1030                  * little more complicated than strictly necessary, but it
1031                  * keeps the signal state that winds up in the core dump
1032                  * unchanged from the death state, e.g. which thread had
1033                  * the core-dump signal unblocked.
1034                  */
1035                 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1036                 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1037                 p->signal->group_stop_count = 0;
1038                 p->signal->group_exit_task = t;
1039                 t = p;
1040                 do {
1041                         p->signal->group_stop_count++;
1042                         signal_wake_up(t, 0);
1043                         t = next_thread(t);
1044                 } while (t != p);
1045                 wake_up_process(p->signal->group_exit_task);
1046                 return;
1047         }
1048
1049         /*
1050          * The signal is already in the shared-pending queue.
1051          * Tell the chosen thread to wake up and dequeue it.
1052          */
1053         signal_wake_up(t, sig == SIGKILL);
1054         return;
1055 }
1056
1057 int
1058 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1059 {
1060         int ret = 0;
1061
1062         assert_spin_locked(&p->sighand->siglock);
1063         handle_stop_signal(sig, p);
1064
1065         /* Short-circuit ignored signals.  */
1066         if (sig_ignored(p, sig))
1067                 return ret;
1068
1069         if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1070                 /* This is a non-RT signal and we already have one queued.  */
1071                 return ret;
1072
1073         /*
1074          * Put this signal on the shared-pending queue, or fail with EAGAIN.
1075          * We always use the shared queue for process-wide signals,
1076          * to avoid several races.
1077          */
1078         ret = send_signal(sig, info, p, &p->signal->shared_pending);
1079         if (unlikely(ret))
1080                 return ret;
1081
1082         __group_complete_signal(sig, p);
1083         return 0;
1084 }
1085
1086 /*
1087  * Nuke all other threads in the group.
1088  */
1089 void zap_other_threads(struct task_struct *p)
1090 {
1091         struct task_struct *t;
1092
1093         p->signal->flags = SIGNAL_GROUP_EXIT;
1094         p->signal->group_stop_count = 0;
1095
1096         if (thread_group_empty(p))
1097                 return;
1098
1099         for (t = next_thread(p); t != p; t = next_thread(t)) {
1100                 /*
1101                  * Don't bother with already dead threads
1102                  */
1103                 if (t->exit_state)
1104                         continue;
1105
1106                 /*
1107                  * We don't want to notify the parent, since we are
1108                  * killed as part of a thread group due to another
1109                  * thread doing an execve() or similar. So set the
1110                  * exit signal to -1 to allow immediate reaping of
1111                  * the process.  But don't detach the thread group
1112                  * leader.
1113                  */
1114                 if (t != p->group_leader)
1115                         t->exit_signal = -1;
1116
1117                 /* SIGKILL will be handled before any pending SIGSTOP */
1118                 sigaddset(&t->pending.signal, SIGKILL);
1119                 signal_wake_up(t, 1);
1120         }
1121 }
1122
1123 /*
1124  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1125  */
1126 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1127 {
1128         struct sighand_struct *sighand;
1129
1130         for (;;) {
1131                 sighand = rcu_dereference(tsk->sighand);
1132                 if (unlikely(sighand == NULL))
1133                         break;
1134
1135                 spin_lock_irqsave(&sighand->siglock, *flags);
1136                 if (likely(sighand == tsk->sighand))
1137                         break;
1138                 spin_unlock_irqrestore(&sighand->siglock, *flags);
1139         }
1140
1141         return sighand;
1142 }
1143
1144 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1145 {
1146         unsigned long flags;
1147         int ret;
1148
1149         ret = check_kill_permission(sig, info, p);
1150
1151         if (!ret && sig) {
1152                 ret = -ESRCH;
1153                 if (lock_task_sighand(p, &flags)) {
1154                         ret = __group_send_sig_info(sig, info, p);
1155                         unlock_task_sighand(p, &flags);
1156                 }
1157         }
1158
1159         return ret;
1160 }
1161
1162 /*
1163  * kill_pg_info() sends a signal to a process group: this is what the tty
1164  * control characters do (^C, ^Z etc)
1165  */
1166
1167 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1168 {
1169         struct task_struct *p = NULL;
1170         int retval, success;
1171
1172         if (pgrp <= 0)
1173                 return -EINVAL;
1174
1175         success = 0;
1176         retval = -ESRCH;
1177         do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1178                 int err = group_send_sig_info(sig, info, p);
1179                 success |= !err;
1180                 retval = err;
1181         } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1182         return success ? 0 : retval;
1183 }
1184
1185 int
1186 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1187 {
1188         int retval;
1189
1190         read_lock(&tasklist_lock);
1191         retval = __kill_pg_info(sig, info, pgrp);
1192         read_unlock(&tasklist_lock);
1193
1194         return retval;
1195 }
1196
1197 int
1198 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1199 {
1200         int error;
1201         int acquired_tasklist_lock = 0;
1202         struct task_struct *p;
1203
1204         rcu_read_lock();
1205         if (unlikely(sig_needs_tasklist(sig))) {
1206                 read_lock(&tasklist_lock);
1207                 acquired_tasklist_lock = 1;
1208         }
1209         p = find_task_by_pid(pid);
1210         error = -ESRCH;
1211         if (p)
1212                 error = group_send_sig_info(sig, info, p);
1213         if (unlikely(acquired_tasklist_lock))
1214                 read_unlock(&tasklist_lock);
1215         rcu_read_unlock();
1216         return error;
1217 }
1218
1219 /* like kill_proc_info(), but doesn't use uid/euid of "current" */
1220 int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1221                       uid_t uid, uid_t euid)
1222 {
1223         int ret = -EINVAL;
1224         struct task_struct *p;
1225
1226         if (!valid_signal(sig))
1227                 return ret;
1228
1229         read_lock(&tasklist_lock);
1230         p = find_task_by_pid(pid);
1231         if (!p) {
1232                 ret = -ESRCH;
1233                 goto out_unlock;
1234         }
1235         if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1236             && (euid != p->suid) && (euid != p->uid)
1237             && (uid != p->suid) && (uid != p->uid)) {
1238                 ret = -EPERM;
1239                 goto out_unlock;
1240         }
1241         if (sig && p->sighand) {
1242                 unsigned long flags;
1243                 spin_lock_irqsave(&p->sighand->siglock, flags);
1244                 ret = __group_send_sig_info(sig, info, p);
1245                 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1246         }
1247 out_unlock:
1248         read_unlock(&tasklist_lock);
1249         return ret;
1250 }
1251 EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
1252
1253 /*
1254  * kill_something_info() interprets pid in interesting ways just like kill(2).
1255  *
1256  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1257  * is probably wrong.  Should make it like BSD or SYSV.
1258  */
1259
1260 static int kill_something_info(int sig, struct siginfo *info, int pid)
1261 {
1262         if (!pid) {
1263                 return kill_pg_info(sig, info, process_group(current));
1264         } else if (pid == -1) {
1265                 int retval = 0, count = 0;
1266                 struct task_struct * p;
1267
1268                 read_lock(&tasklist_lock);
1269                 for_each_process(p) {
1270                         if (p->pid > 1 && p->tgid != current->tgid) {
1271                                 int err = group_send_sig_info(sig, info, p);
1272                                 ++count;
1273                                 if (err != -EPERM)
1274                                         retval = err;
1275                         }
1276                 }
1277                 read_unlock(&tasklist_lock);
1278                 return count ? retval : -ESRCH;
1279         } else if (pid < 0) {
1280                 return kill_pg_info(sig, info, -pid);
1281         } else {
1282                 return kill_proc_info(sig, info, pid);
1283         }
1284 }
1285
1286 /*
1287  * These are for backward compatibility with the rest of the kernel source.
1288  */
1289
1290 /*
1291  * These two are the most common entry points.  They send a signal
1292  * just to the specific thread.
1293  */
1294 int
1295 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1296 {
1297         int ret;
1298         unsigned long flags;
1299
1300         /*
1301          * Make sure legacy kernel users don't send in bad values
1302          * (normal paths check this in check_kill_permission).
1303          */
1304         if (!valid_signal(sig))
1305                 return -EINVAL;
1306
1307         /*
1308          * We need the tasklist lock even for the specific
1309          * thread case (when we don't need to follow the group
1310          * lists) in order to avoid races with "p->sighand"
1311          * going away or changing from under us.
1312          */
1313         read_lock(&tasklist_lock);  
1314         spin_lock_irqsave(&p->sighand->siglock, flags);
1315         ret = specific_send_sig_info(sig, info, p);
1316         spin_unlock_irqrestore(&p->sighand->siglock, flags);
1317         read_unlock(&tasklist_lock);
1318         return ret;
1319 }
1320
1321 #define __si_special(priv) \
1322         ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1323
1324 int
1325 send_sig(int sig, struct task_struct *p, int priv)
1326 {
1327         return send_sig_info(sig, __si_special(priv), p);
1328 }
1329
1330 /*
1331  * This is the entry point for "process-wide" signals.
1332  * They will go to an appropriate thread in the thread group.
1333  */
1334 int
1335 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1336 {
1337         int ret;
1338         read_lock(&tasklist_lock);
1339         ret = group_send_sig_info(sig, info, p);
1340         read_unlock(&tasklist_lock);
1341         return ret;
1342 }
1343
1344 void
1345 force_sig(int sig, struct task_struct *p)
1346 {
1347         force_sig_info(sig, SEND_SIG_PRIV, p);
1348 }
1349
1350 /*
1351  * When things go south during signal handling, we
1352  * will force a SIGSEGV. And if the signal that caused
1353  * the problem was already a SIGSEGV, we'll want to
1354  * make sure we don't even try to deliver the signal..
1355  */
1356 int
1357 force_sigsegv(int sig, struct task_struct *p)
1358 {
1359         if (sig == SIGSEGV) {
1360                 unsigned long flags;
1361                 spin_lock_irqsave(&p->sighand->siglock, flags);
1362                 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1363                 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1364         }
1365         force_sig(SIGSEGV, p);
1366         return 0;
1367 }
1368
1369 int
1370 kill_pg(pid_t pgrp, int sig, int priv)
1371 {
1372         return kill_pg_info(sig, __si_special(priv), pgrp);
1373 }
1374
1375 int
1376 kill_proc(pid_t pid, int sig, int priv)
1377 {
1378         return kill_proc_info(sig, __si_special(priv), pid);
1379 }
1380
1381 /*
1382  * These functions support sending signals using preallocated sigqueue
1383  * structures.  This is needed "because realtime applications cannot
1384  * afford to lose notifications of asynchronous events, like timer
1385  * expirations or I/O completions".  In the case of Posix Timers 
1386  * we allocate the sigqueue structure from the timer_create.  If this
1387  * allocation fails we are able to report the failure to the application
1388  * with an EAGAIN error.
1389  */
1390  
1391 struct sigqueue *sigqueue_alloc(void)
1392 {
1393         struct sigqueue *q;
1394
1395         if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1396                 q->flags |= SIGQUEUE_PREALLOC;
1397         return(q);
1398 }
1399
1400 void sigqueue_free(struct sigqueue *q)
1401 {
1402         unsigned long flags;
1403         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1404         /*
1405          * If the signal is still pending remove it from the
1406          * pending queue.
1407          */
1408         if (unlikely(!list_empty(&q->list))) {
1409                 spinlock_t *lock = &current->sighand->siglock;
1410                 read_lock(&tasklist_lock);
1411                 spin_lock_irqsave(lock, flags);
1412                 if (!list_empty(&q->list))
1413                         list_del_init(&q->list);
1414                 spin_unlock_irqrestore(lock, flags);
1415                 read_unlock(&tasklist_lock);
1416         }
1417         q->flags &= ~SIGQUEUE_PREALLOC;
1418         __sigqueue_free(q);
1419 }
1420
1421 int
1422 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1423 {
1424         unsigned long flags;
1425         int ret = 0;
1426         struct sighand_struct *sh;
1427
1428         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1429
1430         /*
1431          * The rcu based delayed sighand destroy makes it possible to
1432          * run this without tasklist lock held. The task struct itself
1433          * cannot go away as create_timer did get_task_struct().
1434          *
1435          * We return -1, when the task is marked exiting, so
1436          * posix_timer_event can redirect it to the group leader
1437          */
1438         rcu_read_lock();
1439
1440         if (unlikely(p->flags & PF_EXITING)) {
1441                 ret = -1;
1442                 goto out_err;
1443         }
1444
1445 retry:
1446         sh = rcu_dereference(p->sighand);
1447
1448         spin_lock_irqsave(&sh->siglock, flags);
1449         if (p->sighand != sh) {
1450                 /* We raced with exec() in a multithreaded process... */
1451                 spin_unlock_irqrestore(&sh->siglock, flags);
1452                 goto retry;
1453         }
1454
1455         /*
1456          * We do the check here again to handle the following scenario:
1457          *
1458          * CPU 0                CPU 1
1459          * send_sigqueue
1460          * check PF_EXITING
1461          * interrupt            exit code running
1462          *                      __exit_signal
1463          *                      lock sighand->siglock
1464          *                      unlock sighand->siglock
1465          * lock sh->siglock
1466          * add(tsk->pending)    flush_sigqueue(tsk->pending)
1467          *
1468          */
1469
1470         if (unlikely(p->flags & PF_EXITING)) {
1471                 ret = -1;
1472                 goto out;
1473         }
1474
1475         if (unlikely(!list_empty(&q->list))) {
1476                 /*
1477                  * If an SI_TIMER entry is already queue just increment
1478                  * the overrun count.
1479                  */
1480                 if (q->info.si_code != SI_TIMER)
1481                         BUG();
1482                 q->info.si_overrun++;
1483                 goto out;
1484         }
1485         /* Short-circuit ignored signals.  */
1486         if (sig_ignored(p, sig)) {
1487                 ret = 1;
1488                 goto out;
1489         }
1490
1491         list_add_tail(&q->list, &p->pending.list);
1492         sigaddset(&p->pending.signal, sig);
1493         if (!sigismember(&p->blocked, sig))
1494                 signal_wake_up(p, sig == SIGKILL);
1495
1496 out:
1497         spin_unlock_irqrestore(&sh->siglock, flags);
1498 out_err:
1499         rcu_read_unlock();
1500
1501         return ret;
1502 }
1503
1504 int
1505 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1506 {
1507         unsigned long flags;
1508         int ret = 0;
1509
1510         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1511
1512         read_lock(&tasklist_lock);
1513         /* Since it_lock is held, p->sighand cannot be NULL. */
1514         spin_lock_irqsave(&p->sighand->siglock, flags);
1515         handle_stop_signal(sig, p);
1516
1517         /* Short-circuit ignored signals.  */
1518         if (sig_ignored(p, sig)) {
1519                 ret = 1;
1520                 goto out;
1521         }
1522
1523         if (unlikely(!list_empty(&q->list))) {
1524                 /*
1525                  * If an SI_TIMER entry is already queue just increment
1526                  * the overrun count.  Other uses should not try to
1527                  * send the signal multiple times.
1528                  */
1529                 if (q->info.si_code != SI_TIMER)
1530                         BUG();
1531                 q->info.si_overrun++;
1532                 goto out;
1533         } 
1534
1535         /*
1536          * Put this signal on the shared-pending queue.
1537          * We always use the shared queue for process-wide signals,
1538          * to avoid several races.
1539          */
1540         list_add_tail(&q->list, &p->signal->shared_pending.list);
1541         sigaddset(&p->signal->shared_pending.signal, sig);
1542
1543         __group_complete_signal(sig, p);
1544 out:
1545         spin_unlock_irqrestore(&p->sighand->siglock, flags);
1546         read_unlock(&tasklist_lock);
1547         return ret;
1548 }
1549
1550 /*
1551  * Wake up any threads in the parent blocked in wait* syscalls.
1552  */
1553 static inline void __wake_up_parent(struct task_struct *p,
1554                                     struct task_struct *parent)
1555 {
1556         wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1557 }
1558
1559 /*
1560  * Let a parent know about the death of a child.
1561  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1562  */
1563
1564 void do_notify_parent(struct task_struct *tsk, int sig)
1565 {
1566         struct siginfo info;
1567         unsigned long flags;
1568         struct sighand_struct *psig;
1569
1570         BUG_ON(sig == -1);
1571
1572         /* do_notify_parent_cldstop should have been called instead.  */
1573         BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1574
1575         BUG_ON(!tsk->ptrace &&
1576                (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1577
1578         info.si_signo = sig;
1579         info.si_errno = 0;
1580         info.si_pid = tsk->pid;
1581         info.si_uid = tsk->uid;
1582
1583         /* FIXME: find out whether or not this is supposed to be c*time. */
1584         info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1585                                                        tsk->signal->utime));
1586         info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1587                                                        tsk->signal->stime));
1588
1589         info.si_status = tsk->exit_code & 0x7f;
1590         if (tsk->exit_code & 0x80)
1591                 info.si_code = CLD_DUMPED;
1592         else if (tsk->exit_code & 0x7f)
1593                 info.si_code = CLD_KILLED;
1594         else {
1595                 info.si_code = CLD_EXITED;
1596                 info.si_status = tsk->exit_code >> 8;
1597         }
1598
1599         psig = tsk->parent->sighand;
1600         spin_lock_irqsave(&psig->siglock, flags);
1601         if (!tsk->ptrace && sig == SIGCHLD &&
1602             (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1603              (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1604                 /*
1605                  * We are exiting and our parent doesn't care.  POSIX.1
1606                  * defines special semantics for setting SIGCHLD to SIG_IGN
1607                  * or setting the SA_NOCLDWAIT flag: we should be reaped
1608                  * automatically and not left for our parent's wait4 call.
1609                  * Rather than having the parent do it as a magic kind of
1610                  * signal handler, we just set this to tell do_exit that we
1611                  * can be cleaned up without becoming a zombie.  Note that
1612                  * we still call __wake_up_parent in this case, because a
1613                  * blocked sys_wait4 might now return -ECHILD.
1614                  *
1615                  * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1616                  * is implementation-defined: we do (if you don't want
1617                  * it, just use SIG_IGN instead).
1618                  */
1619                 tsk->exit_signal = -1;
1620                 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1621                         sig = 0;
1622         }
1623         if (valid_signal(sig) && sig > 0)
1624                 __group_send_sig_info(sig, &info, tsk->parent);
1625         __wake_up_parent(tsk, tsk->parent);
1626         spin_unlock_irqrestore(&psig->siglock, flags);
1627 }
1628
1629 static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
1630 {
1631         struct siginfo info;
1632         unsigned long flags;
1633         struct task_struct *parent;
1634         struct sighand_struct *sighand;
1635
1636         if (to_self)
1637                 parent = tsk->parent;
1638         else {
1639                 tsk = tsk->group_leader;
1640                 parent = tsk->real_parent;
1641         }
1642
1643         info.si_signo = SIGCHLD;
1644         info.si_errno = 0;
1645         info.si_pid = tsk->pid;
1646         info.si_uid = tsk->uid;
1647
1648         /* FIXME: find out whether or not this is supposed to be c*time. */
1649         info.si_utime = cputime_to_jiffies(tsk->utime);
1650         info.si_stime = cputime_to_jiffies(tsk->stime);
1651
1652         info.si_code = why;
1653         switch (why) {
1654         case CLD_CONTINUED:
1655                 info.si_status = SIGCONT;
1656                 break;
1657         case CLD_STOPPED:
1658                 info.si_status = tsk->signal->group_exit_code & 0x7f;
1659                 break;
1660         case CLD_TRAPPED:
1661                 info.si_status = tsk->exit_code & 0x7f;
1662                 break;
1663         default:
1664                 BUG();
1665         }
1666
1667         sighand = parent->sighand;
1668         spin_lock_irqsave(&sighand->siglock, flags);
1669         if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1670             !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1671                 __group_send_sig_info(SIGCHLD, &info, parent);
1672         /*
1673          * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1674          */
1675         __wake_up_parent(tsk, parent);
1676         spin_unlock_irqrestore(&sighand->siglock, flags);
1677 }
1678
1679 /*
1680  * This must be called with current->sighand->siglock held.
1681  *
1682  * This should be the path for all ptrace stops.
1683  * We always set current->last_siginfo while stopped here.
1684  * That makes it a way to test a stopped process for
1685  * being ptrace-stopped vs being job-control-stopped.
1686  *
1687  * If we actually decide not to stop at all because the tracer is gone,
1688  * we leave nostop_code in current->exit_code.
1689  */
1690 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1691 {
1692         /*
1693          * If there is a group stop in progress,
1694          * we must participate in the bookkeeping.
1695          */
1696         if (current->signal->group_stop_count > 0)
1697                 --current->signal->group_stop_count;
1698
1699         current->last_siginfo = info;
1700         current->exit_code = exit_code;
1701
1702         /* Let the debugger run.  */
1703         set_current_state(TASK_TRACED);
1704         spin_unlock_irq(&current->sighand->siglock);
1705         read_lock(&tasklist_lock);
1706         if (likely(current->ptrace & PT_PTRACED) &&
1707             likely(current->parent != current->real_parent ||
1708                    !(current->ptrace & PT_ATTACHED)) &&
1709             (likely(current->parent->signal != current->signal) ||
1710              !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1711                 do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
1712                 read_unlock(&tasklist_lock);
1713                 schedule();
1714         } else {
1715                 /*
1716                  * By the time we got the lock, our tracer went away.
1717                  * Don't stop here.
1718                  */
1719                 read_unlock(&tasklist_lock);
1720                 set_current_state(TASK_RUNNING);
1721                 current->exit_code = nostop_code;
1722         }
1723
1724         /*
1725          * We are back.  Now reacquire the siglock before touching
1726          * last_siginfo, so that we are sure to have synchronized with
1727          * any signal-sending on another CPU that wants to examine it.
1728          */
1729         spin_lock_irq(&current->sighand->siglock);
1730         current->last_siginfo = NULL;
1731
1732         /*
1733          * Queued signals ignored us while we were stopped for tracing.
1734          * So check for any that we should take before resuming user mode.
1735          */
1736         recalc_sigpending();
1737 }
1738
1739 void ptrace_notify(int exit_code)
1740 {
1741         siginfo_t info;
1742
1743         BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1744
1745         memset(&info, 0, sizeof info);
1746         info.si_signo = SIGTRAP;
1747         info.si_code = exit_code;
1748         info.si_pid = current->pid;
1749         info.si_uid = current->uid;
1750
1751         /* Let the debugger run.  */
1752         spin_lock_irq(&current->sighand->siglock);
1753         ptrace_stop(exit_code, 0, &info);
1754         spin_unlock_irq(&current->sighand->siglock);
1755 }
1756
1757 static void
1758 finish_stop(int stop_count)
1759 {
1760         int to_self;
1761
1762         /*
1763          * If there are no other threads in the group, or if there is
1764          * a group stop in progress and we are the last to stop,
1765          * report to the parent.  When ptraced, every thread reports itself.
1766          */
1767         if (stop_count < 0 || (current->ptrace & PT_PTRACED))
1768                 to_self = 1;
1769         else if (stop_count == 0)
1770                 to_self = 0;
1771         else
1772                 goto out;
1773
1774         read_lock(&tasklist_lock);
1775         do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1776         read_unlock(&tasklist_lock);
1777
1778 out:
1779         schedule();
1780         /*
1781          * Now we don't run again until continued.
1782          */
1783         current->exit_code = 0;
1784 }
1785
1786 /*
1787  * This performs the stopping for SIGSTOP and other stop signals.
1788  * We have to stop all threads in the thread group.
1789  * Returns nonzero if we've actually stopped and released the siglock.
1790  * Returns zero if we didn't stop and still hold the siglock.
1791  */
1792 static int
1793 do_signal_stop(int signr)
1794 {
1795         struct signal_struct *sig = current->signal;
1796         struct sighand_struct *sighand = current->sighand;
1797         int stop_count = -1;
1798
1799         if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1800                 return 0;
1801
1802         if (sig->group_stop_count > 0) {
1803                 /*
1804                  * There is a group stop in progress.  We don't need to
1805                  * start another one.
1806                  */
1807                 signr = sig->group_exit_code;
1808                 stop_count = --sig->group_stop_count;
1809                 current->exit_code = signr;
1810                 set_current_state(TASK_STOPPED);
1811                 if (stop_count == 0)
1812                         sig->flags = SIGNAL_STOP_STOPPED;
1813                 spin_unlock_irq(&sighand->siglock);
1814         }
1815         else if (thread_group_empty(current)) {
1816                 /*
1817                  * Lock must be held through transition to stopped state.
1818                  */
1819                 current->exit_code = current->signal->group_exit_code = signr;
1820                 set_current_state(TASK_STOPPED);
1821                 sig->flags = SIGNAL_STOP_STOPPED;
1822                 spin_unlock_irq(&sighand->siglock);
1823         }
1824         else {
1825                 /*
1826                  * There is no group stop already in progress.
1827                  * We must initiate one now, but that requires
1828                  * dropping siglock to get both the tasklist lock
1829                  * and siglock again in the proper order.  Note that
1830                  * this allows an intervening SIGCONT to be posted.
1831                  * We need to check for that and bail out if necessary.
1832                  */
1833                 struct task_struct *t;
1834
1835                 spin_unlock_irq(&sighand->siglock);
1836
1837                 /* signals can be posted during this window */
1838
1839                 read_lock(&tasklist_lock);
1840                 spin_lock_irq(&sighand->siglock);
1841
1842                 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1843                         /*
1844                          * Another stop or continue happened while we
1845                          * didn't have the lock.  We can just swallow this
1846                          * signal now.  If we raced with a SIGCONT, that
1847                          * should have just cleared it now.  If we raced
1848                          * with another processor delivering a stop signal,
1849                          * then the SIGCONT that wakes us up should clear it.
1850                          */
1851                         read_unlock(&tasklist_lock);
1852                         return 0;
1853                 }
1854
1855                 if (sig->group_stop_count == 0) {
1856                         sig->group_exit_code = signr;
1857                         stop_count = 0;
1858                         for (t = next_thread(current); t != current;
1859                              t = next_thread(t))
1860                                 /*
1861                                  * Setting state to TASK_STOPPED for a group
1862                                  * stop is always done with the siglock held,
1863                                  * so this check has no races.
1864                                  */
1865                                 if (!t->exit_state &&
1866                                     !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1867                                         stop_count++;
1868                                         signal_wake_up(t, 0);
1869                                 }
1870                         sig->group_stop_count = stop_count;
1871                 }
1872                 else {
1873                         /* A race with another thread while unlocked.  */
1874                         signr = sig->group_exit_code;
1875                         stop_count = --sig->group_stop_count;
1876                 }
1877
1878                 current->exit_code = signr;
1879                 set_current_state(TASK_STOPPED);
1880                 if (stop_count == 0)
1881                         sig->flags = SIGNAL_STOP_STOPPED;
1882
1883                 spin_unlock_irq(&sighand->siglock);
1884                 read_unlock(&tasklist_lock);
1885         }
1886
1887         finish_stop(stop_count);
1888         return 1;
1889 }
1890
1891 /*
1892  * Do appropriate magic when group_stop_count > 0.
1893  * We return nonzero if we stopped, after releasing the siglock.
1894  * We return zero if we still hold the siglock and should look
1895  * for another signal without checking group_stop_count again.
1896  */
1897 static int handle_group_stop(void)
1898 {
1899         int stop_count;
1900
1901         if (current->signal->group_exit_task == current) {
1902                 /*
1903                  * Group stop is so we can do a core dump,
1904                  * We are the initiating thread, so get on with it.
1905                  */
1906                 current->signal->group_exit_task = NULL;
1907                 return 0;
1908         }
1909
1910         if (current->signal->flags & SIGNAL_GROUP_EXIT)
1911                 /*
1912                  * Group stop is so another thread can do a core dump,
1913                  * or else we are racing against a death signal.
1914                  * Just punt the stop so we can get the next signal.
1915                  */
1916                 return 0;
1917
1918         /*
1919          * There is a group stop in progress.  We stop
1920          * without any associated signal being in our queue.
1921          */
1922         stop_count = --current->signal->group_stop_count;
1923         if (stop_count == 0)
1924                 current->signal->flags = SIGNAL_STOP_STOPPED;
1925         current->exit_code = current->signal->group_exit_code;
1926         set_current_state(TASK_STOPPED);
1927         spin_unlock_irq(&current->sighand->siglock);
1928         finish_stop(stop_count);
1929         return 1;
1930 }
1931
1932 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1933                           struct pt_regs *regs, void *cookie)
1934 {
1935         sigset_t *mask = &current->blocked;
1936         int signr = 0;
1937
1938         try_to_freeze();
1939
1940 relock:
1941         spin_lock_irq(&current->sighand->siglock);
1942         for (;;) {
1943                 struct k_sigaction *ka;
1944
1945                 if (unlikely(current->signal->group_stop_count > 0) &&
1946                     handle_group_stop())
1947                         goto relock;
1948
1949                 signr = dequeue_signal(current, mask, info);
1950
1951                 if (!signr)
1952                         break; /* will return 0 */
1953
1954                 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1955                         ptrace_signal_deliver(regs, cookie);
1956
1957                         /* Let the debugger run.  */
1958                         ptrace_stop(signr, signr, info);
1959
1960                         /* We're back.  Did the debugger cancel the sig or group_exit? */
1961                         signr = current->exit_code;
1962                         if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT)
1963                                 continue;
1964
1965                         current->exit_code = 0;
1966
1967                         /* Update the siginfo structure if the signal has
1968                            changed.  If the debugger wanted something
1969                            specific in the siginfo structure then it should
1970                            have updated *info via PTRACE_SETSIGINFO.  */
1971                         if (signr != info->si_signo) {
1972                                 info->si_signo = signr;
1973                                 info->si_errno = 0;
1974                                 info->si_code = SI_USER;
1975                                 info->si_pid = current->parent->pid;
1976                                 info->si_uid = current->parent->uid;
1977                         }
1978
1979                         /* If the (new) signal is now blocked, requeue it.  */
1980                         if (sigismember(&current->blocked, signr)) {
1981                                 specific_send_sig_info(signr, info, current);
1982                                 continue;
1983                         }
1984                 }
1985
1986                 ka = &current->sighand->action[signr-1];
1987                 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1988                         continue;
1989                 if (ka->sa.sa_handler != SIG_DFL) {
1990                         /* Run the handler.  */
1991                         *return_ka = *ka;
1992
1993                         if (ka->sa.sa_flags & SA_ONESHOT)
1994                                 ka->sa.sa_handler = SIG_DFL;
1995
1996                         break; /* will return non-zero "signr" value */
1997                 }
1998
1999                 /*
2000                  * Now we are doing the default action for this signal.
2001                  */
2002                 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2003                         continue;
2004
2005                 /* Init gets no signals it doesn't want.  */
2006                 if (current == child_reaper)
2007                         continue;
2008
2009                 if (sig_kernel_stop(signr)) {
2010                         /*
2011                          * The default action is to stop all threads in
2012                          * the thread group.  The job control signals
2013                          * do nothing in an orphaned pgrp, but SIGSTOP
2014                          * always works.  Note that siglock needs to be
2015                          * dropped during the call to is_orphaned_pgrp()
2016                          * because of lock ordering with tasklist_lock.
2017                          * This allows an intervening SIGCONT to be posted.
2018                          * We need to check for that and bail out if necessary.
2019                          */
2020                         if (signr != SIGSTOP) {
2021                                 spin_unlock_irq(&current->sighand->siglock);
2022
2023                                 /* signals can be posted during this window */
2024
2025                                 if (is_orphaned_pgrp(process_group(current)))
2026                                         goto relock;
2027
2028                                 spin_lock_irq(&current->sighand->siglock);
2029                         }
2030
2031                         if (likely(do_signal_stop(signr))) {
2032                                 /* It released the siglock.  */
2033                                 goto relock;
2034                         }
2035
2036                         /*
2037                          * We didn't actually stop, due to a race
2038                          * with SIGCONT or something like that.
2039                          */
2040                         continue;
2041                 }
2042
2043                 spin_unlock_irq(&current->sighand->siglock);
2044
2045                 /*
2046                  * Anything else is fatal, maybe with a core dump.
2047                  */
2048                 current->flags |= PF_SIGNALED;
2049                 if (sig_kernel_coredump(signr)) {
2050                         /*
2051                          * If it was able to dump core, this kills all
2052                          * other threads in the group and synchronizes with
2053                          * their demise.  If we lost the race with another
2054                          * thread getting here, it set group_exit_code
2055                          * first and our do_group_exit call below will use
2056                          * that value and ignore the one we pass it.
2057                          */
2058                         do_coredump((long)signr, signr, regs);
2059                 }
2060
2061                 /*
2062                  * Death signals, no core dump.
2063                  */
2064                 do_group_exit(signr);
2065                 /* NOTREACHED */
2066         }
2067         spin_unlock_irq(&current->sighand->siglock);
2068         return signr;
2069 }
2070
2071 EXPORT_SYMBOL(recalc_sigpending);
2072 EXPORT_SYMBOL_GPL(dequeue_signal);
2073 EXPORT_SYMBOL(flush_signals);
2074 EXPORT_SYMBOL(force_sig);
2075 EXPORT_SYMBOL(kill_pg);
2076 EXPORT_SYMBOL(kill_proc);
2077 EXPORT_SYMBOL(ptrace_notify);
2078 EXPORT_SYMBOL(send_sig);
2079 EXPORT_SYMBOL(send_sig_info);
2080 EXPORT_SYMBOL(sigprocmask);
2081 EXPORT_SYMBOL(block_all_signals);
2082 EXPORT_SYMBOL(unblock_all_signals);
2083
2084
2085 /*
2086  * System call entry points.
2087  */
2088
2089 asmlinkage long sys_restart_syscall(void)
2090 {
2091         struct restart_block *restart = &current_thread_info()->restart_block;
2092         return restart->fn(restart);
2093 }
2094
2095 long do_no_restart_syscall(struct restart_block *param)
2096 {
2097         return -EINTR;
2098 }
2099
2100 /*
2101  * We don't need to get the kernel lock - this is all local to this
2102  * particular thread.. (and that's good, because this is _heavily_
2103  * used by various programs)
2104  */
2105
2106 /*
2107  * This is also useful for kernel threads that want to temporarily
2108  * (or permanently) block certain signals.
2109  *
2110  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2111  * interface happily blocks "unblockable" signals like SIGKILL
2112  * and friends.
2113  */
2114 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2115 {
2116         int error;
2117
2118         spin_lock_irq(&current->sighand->siglock);
2119         if (oldset)
2120                 *oldset = current->blocked;
2121
2122         error = 0;
2123         switch (how) {
2124         case SIG_BLOCK:
2125                 sigorsets(&current->blocked, &current->blocked, set);
2126                 break;
2127         case SIG_UNBLOCK:
2128                 signandsets(&current->blocked, &current->blocked, set);
2129                 break;
2130         case SIG_SETMASK:
2131                 current->blocked = *set;
2132                 break;
2133         default:
2134                 error = -EINVAL;
2135         }
2136         recalc_sigpending();
2137         spin_unlock_irq(&current->sighand->siglock);
2138
2139         return error;
2140 }
2141
2142 asmlinkage long
2143 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2144 {
2145         int error = -EINVAL;
2146         sigset_t old_set, new_set;
2147
2148         /* XXX: Don't preclude handling different sized sigset_t's.  */
2149         if (sigsetsize != sizeof(sigset_t))
2150                 goto out;
2151
2152         if (set) {
2153                 error = -EFAULT;
2154                 if (copy_from_user(&new_set, set, sizeof(*set)))
2155                         goto out;
2156                 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2157
2158                 error = sigprocmask(how, &new_set, &old_set);
2159                 if (error)
2160                         goto out;
2161                 if (oset)
2162                         goto set_old;
2163         } else if (oset) {
2164                 spin_lock_irq(&current->sighand->siglock);
2165                 old_set = current->blocked;
2166                 spin_unlock_irq(&current->sighand->siglock);
2167
2168         set_old:
2169                 error = -EFAULT;
2170                 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2171                         goto out;
2172         }
2173         error = 0;
2174 out:
2175         return error;
2176 }
2177
2178 long do_sigpending(void __user *set, unsigned long sigsetsize)
2179 {
2180         long error = -EINVAL;
2181         sigset_t pending;
2182
2183         if (sigsetsize > sizeof(sigset_t))
2184                 goto out;
2185
2186         spin_lock_irq(&current->sighand->siglock);
2187         sigorsets(&pending, &current->pending.signal,
2188                   &current->signal->shared_pending.signal);
2189         spin_unlock_irq(&current->sighand->siglock);
2190
2191         /* Outside the lock because only this thread touches it.  */
2192         sigandsets(&pending, &current->blocked, &pending);
2193
2194         error = -EFAULT;
2195         if (!copy_to_user(set, &pending, sigsetsize))
2196                 error = 0;
2197
2198 out:
2199         return error;
2200 }       
2201
2202 asmlinkage long
2203 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2204 {
2205         return do_sigpending(set, sigsetsize);
2206 }
2207
2208 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2209
2210 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2211 {
2212         int err;
2213
2214         if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2215                 return -EFAULT;
2216         if (from->si_code < 0)
2217                 return __copy_to_user(to, from, sizeof(siginfo_t))
2218                         ? -EFAULT : 0;
2219         /*
2220          * If you change siginfo_t structure, please be sure
2221          * this code is fixed accordingly.
2222          * It should never copy any pad contained in the structure
2223          * to avoid security leaks, but must copy the generic
2224          * 3 ints plus the relevant union member.
2225          */
2226         err = __put_user(from->si_signo, &to->si_signo);
2227         err |= __put_user(from->si_errno, &to->si_errno);
2228         err |= __put_user((short)from->si_code, &to->si_code);
2229         switch (from->si_code & __SI_MASK) {
2230         case __SI_KILL:
2231                 err |= __put_user(from->si_pid, &to->si_pid);
2232                 err |= __put_user(from->si_uid, &to->si_uid);
2233                 break;
2234         case __SI_TIMER:
2235                  err |= __put_user(from->si_tid, &to->si_tid);
2236                  err |= __put_user(from->si_overrun, &to->si_overrun);
2237                  err |= __put_user(from->si_ptr, &to->si_ptr);
2238                 break;
2239         case __SI_POLL:
2240                 err |= __put_user(from->si_band, &to->si_band);
2241                 err |= __put_user(from->si_fd, &to->si_fd);
2242                 break;
2243         case __SI_FAULT:
2244                 err |= __put_user(from->si_addr, &to->si_addr);
2245 #ifdef __ARCH_SI_TRAPNO
2246                 err |= __put_user(from->si_trapno, &to->si_trapno);
2247 #endif
2248                 break;
2249         case __SI_CHLD:
2250                 err |= __put_user(from->si_pid, &to->si_pid);
2251                 err |= __put_user(from->si_uid, &to->si_uid);
2252                 err |= __put_user(from->si_status, &to->si_status);
2253                 err |= __put_user(from->si_utime, &to->si_utime);
2254                 err |= __put_user(from->si_stime, &to->si_stime);
2255                 break;
2256         case __SI_RT: /* This is not generated by the kernel as of now. */
2257         case __SI_MESGQ: /* But this is */
2258                 err |= __put_user(from->si_pid, &to->si_pid);
2259                 err |= __put_user(from->si_uid, &to->si_uid);
2260                 err |= __put_user(from->si_ptr, &to->si_ptr);
2261                 break;
2262         default: /* this is just in case for now ... */
2263                 err |= __put_user(from->si_pid, &to->si_pid);
2264                 err |= __put_user(from->si_uid, &to->si_uid);
2265                 break;
2266         }
2267         return err;
2268 }
2269
2270 #endif
2271
2272 asmlinkage long
2273 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2274                     siginfo_t __user *uinfo,
2275                     const struct timespec __user *uts,
2276                     size_t sigsetsize)
2277 {
2278         int ret, sig;
2279         sigset_t these;
2280         struct timespec ts;
2281         siginfo_t info;
2282         long timeout = 0;
2283
2284         /* XXX: Don't preclude handling different sized sigset_t's.  */
2285         if (sigsetsize != sizeof(sigset_t))
2286                 return -EINVAL;
2287
2288         if (copy_from_user(&these, uthese, sizeof(these)))
2289                 return -EFAULT;
2290                 
2291         /*
2292          * Invert the set of allowed signals to get those we
2293          * want to block.
2294          */
2295         sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2296         signotset(&these);
2297
2298         if (uts) {
2299                 if (copy_from_user(&ts, uts, sizeof(ts)))
2300                         return -EFAULT;
2301                 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2302                     || ts.tv_sec < 0)
2303                         return -EINVAL;
2304         }
2305
2306         spin_lock_irq(&current->sighand->siglock);
2307         sig = dequeue_signal(current, &these, &info);
2308         if (!sig) {
2309                 timeout = MAX_SCHEDULE_TIMEOUT;
2310                 if (uts)
2311                         timeout = (timespec_to_jiffies(&ts)
2312                                    + (ts.tv_sec || ts.tv_nsec));
2313
2314                 if (timeout) {
2315                         /* None ready -- temporarily unblock those we're
2316                          * interested while we are sleeping in so that we'll
2317                          * be awakened when they arrive.  */
2318                         current->real_blocked = current->blocked;
2319                         sigandsets(&current->blocked, &current->blocked, &these);
2320                         recalc_sigpending();
2321                         spin_unlock_irq(&current->sighand->siglock);
2322
2323                         timeout = schedule_timeout_interruptible(timeout);
2324
2325                         spin_lock_irq(&current->sighand->siglock);
2326                         sig = dequeue_signal(current, &these, &info);
2327                         current->blocked = current->real_blocked;
2328                         siginitset(&current->real_blocked, 0);
2329                         recalc_sigpending();
2330                 }
2331         }
2332         spin_unlock_irq(&current->sighand->siglock);
2333
2334         if (sig) {
2335                 ret = sig;
2336                 if (uinfo) {
2337                         if (copy_siginfo_to_user(uinfo, &info))
2338                                 ret = -EFAULT;
2339                 }
2340         } else {
2341                 ret = -EAGAIN;
2342                 if (timeout)
2343                         ret = -EINTR;
2344         }
2345
2346         return ret;
2347 }
2348
2349 asmlinkage long
2350 sys_kill(int pid, int sig)
2351 {
2352         struct siginfo info;
2353
2354         info.si_signo = sig;
2355         info.si_errno = 0;
2356         info.si_code = SI_USER;
2357         info.si_pid = current->tgid;
2358         info.si_uid = current->uid;
2359
2360         return kill_something_info(sig, &info, pid);
2361 }
2362
2363 static int do_tkill(int tgid, int pid, int sig)
2364 {
2365         int error;
2366         struct siginfo info;
2367         struct task_struct *p;
2368
2369         error = -ESRCH;
2370         info.si_signo = sig;
2371         info.si_errno = 0;
2372         info.si_code = SI_TKILL;
2373         info.si_pid = current->tgid;
2374         info.si_uid = current->uid;
2375
2376         read_lock(&tasklist_lock);
2377         p = find_task_by_pid(pid);
2378         if (p && (tgid <= 0 || p->tgid == tgid)) {
2379                 error = check_kill_permission(sig, &info, p);
2380                 /*
2381                  * The null signal is a permissions and process existence
2382                  * probe.  No signal is actually delivered.
2383                  */
2384                 if (!error && sig && p->sighand) {
2385                         spin_lock_irq(&p->sighand->siglock);
2386                         handle_stop_signal(sig, p);
2387                         error = specific_send_sig_info(sig, &info, p);
2388                         spin_unlock_irq(&p->sighand->siglock);
2389                 }
2390         }
2391         read_unlock(&tasklist_lock);
2392
2393         return error;
2394 }
2395
2396 /**
2397  *  sys_tgkill - send signal to one specific thread
2398  *  @tgid: the thread group ID of the thread
2399  *  @pid: the PID of the thread
2400  *  @sig: signal to be sent
2401  *
2402  *  This syscall also checks the tgid and returns -ESRCH even if the PID
2403  *  exists but it's not belonging to the target process anymore. This
2404  *  method solves the problem of threads exiting and PIDs getting reused.
2405  */
2406 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2407 {
2408         /* This is only valid for single tasks */
2409         if (pid <= 0 || tgid <= 0)
2410                 return -EINVAL;
2411
2412         return do_tkill(tgid, pid, sig);
2413 }
2414
2415 /*
2416  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2417  */
2418 asmlinkage long
2419 sys_tkill(int pid, int sig)
2420 {
2421         /* This is only valid for single tasks */
2422         if (pid <= 0)
2423                 return -EINVAL;
2424
2425         return do_tkill(0, pid, sig);
2426 }
2427
2428 asmlinkage long
2429 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2430 {
2431         siginfo_t info;
2432
2433         if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2434                 return -EFAULT;
2435
2436         /* Not even root can pretend to send signals from the kernel.
2437            Nor can they impersonate a kill(), which adds source info.  */
2438         if (info.si_code >= 0)
2439                 return -EPERM;
2440         info.si_signo = sig;
2441
2442         /* POSIX.1b doesn't mention process groups.  */
2443         return kill_proc_info(sig, &info, pid);
2444 }
2445
2446 int
2447 do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2448 {
2449         struct k_sigaction *k;
2450         sigset_t mask;
2451
2452         if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2453                 return -EINVAL;
2454
2455         k = &current->sighand->action[sig-1];
2456
2457         spin_lock_irq(&current->sighand->siglock);
2458         if (signal_pending(current)) {
2459                 /*
2460                  * If there might be a fatal signal pending on multiple
2461                  * threads, make sure we take it before changing the action.
2462                  */
2463                 spin_unlock_irq(&current->sighand->siglock);
2464                 return -ERESTARTNOINTR;
2465         }
2466
2467         if (oact)
2468                 *oact = *k;
2469
2470         if (act) {
2471                 sigdelsetmask(&act->sa.sa_mask,
2472                               sigmask(SIGKILL) | sigmask(SIGSTOP));
2473                 /*
2474                  * POSIX 3.3.1.3:
2475                  *  "Setting a signal action to SIG_IGN for a signal that is
2476                  *   pending shall cause the pending signal to be discarded,
2477                  *   whether or not it is blocked."
2478                  *
2479                  *  "Setting a signal action to SIG_DFL for a signal that is
2480                  *   pending and whose default action is to ignore the signal
2481                  *   (for example, SIGCHLD), shall cause the pending signal to
2482                  *   be discarded, whether or not it is blocked"
2483                  */
2484                 if (act->sa.sa_handler == SIG_IGN ||
2485                     (act->sa.sa_handler == SIG_DFL &&
2486                      sig_kernel_ignore(sig))) {
2487                         /*
2488                          * This is a fairly rare case, so we only take the
2489                          * tasklist_lock once we're sure we'll need it.
2490                          * Now we must do this little unlock and relock
2491                          * dance to maintain the lock hierarchy.
2492                          */
2493                         struct task_struct *t = current;
2494                         spin_unlock_irq(&t->sighand->siglock);
2495                         read_lock(&tasklist_lock);
2496                         spin_lock_irq(&t->sighand->siglock);
2497                         *k = *act;
2498                         sigemptyset(&mask);
2499                         sigaddset(&mask, sig);
2500                         rm_from_queue_full(&mask, &t->signal->shared_pending);
2501                         do {
2502                                 rm_from_queue_full(&mask, &t->pending);
2503                                 recalc_sigpending_tsk(t);
2504                                 t = next_thread(t);
2505                         } while (t != current);
2506                         spin_unlock_irq(&current->sighand->siglock);
2507                         read_unlock(&tasklist_lock);
2508                         return 0;
2509                 }
2510
2511                 *k = *act;
2512         }
2513
2514         spin_unlock_irq(&current->sighand->siglock);
2515         return 0;
2516 }
2517
2518 int 
2519 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2520 {
2521         stack_t oss;
2522         int error;
2523
2524         if (uoss) {
2525                 oss.ss_sp = (void __user *) current->sas_ss_sp;
2526                 oss.ss_size = current->sas_ss_size;
2527                 oss.ss_flags = sas_ss_flags(sp);
2528         }
2529
2530         if (uss) {
2531                 void __user *ss_sp;
2532                 size_t ss_size;
2533                 int ss_flags;
2534
2535                 error = -EFAULT;
2536                 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2537                     || __get_user(ss_sp, &uss->ss_sp)
2538                     || __get_user(ss_flags, &uss->ss_flags)
2539                     || __get_user(ss_size, &uss->ss_size))
2540                         goto out;
2541
2542                 error = -EPERM;
2543                 if (on_sig_stack(sp))
2544                         goto out;
2545
2546                 error = -EINVAL;
2547                 /*
2548                  *
2549                  * Note - this code used to test ss_flags incorrectly
2550                  *        old code may have been written using ss_flags==0
2551                  *        to mean ss_flags==SS_ONSTACK (as this was the only
2552                  *        way that worked) - this fix preserves that older
2553                  *        mechanism
2554                  */
2555                 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2556                         goto out;
2557
2558                 if (ss_flags == SS_DISABLE) {
2559                         ss_size = 0;
2560                         ss_sp = NULL;
2561                 } else {
2562                         error = -ENOMEM;
2563                         if (ss_size < MINSIGSTKSZ)
2564                                 goto out;
2565                 }
2566
2567                 current->sas_ss_sp = (unsigned long) ss_sp;
2568                 current->sas_ss_size = ss_size;
2569         }
2570
2571         if (uoss) {
2572                 error = -EFAULT;
2573                 if (copy_to_user(uoss, &oss, sizeof(oss)))
2574                         goto out;
2575         }
2576
2577         error = 0;
2578 out:
2579         return error;
2580 }
2581
2582 #ifdef __ARCH_WANT_SYS_SIGPENDING
2583
2584 asmlinkage long
2585 sys_sigpending(old_sigset_t __user *set)
2586 {
2587         return do_sigpending(set, sizeof(*set));
2588 }
2589
2590 #endif
2591
2592 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2593 /* Some platforms have their own version with special arguments others
2594    support only sys_rt_sigprocmask.  */
2595
2596 asmlinkage long
2597 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2598 {
2599         int error;
2600         old_sigset_t old_set, new_set;
2601
2602         if (set) {
2603                 error = -EFAULT;
2604                 if (copy_from_user(&new_set, set, sizeof(*set)))
2605                         goto out;
2606                 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2607
2608                 spin_lock_irq(&current->sighand->siglock);
2609                 old_set = current->blocked.sig[0];
2610
2611                 error = 0;
2612                 switch (how) {
2613                 default:
2614                         error = -EINVAL;
2615                         break;
2616                 case SIG_BLOCK:
2617                         sigaddsetmask(&current->blocked, new_set);
2618                         break;
2619                 case SIG_UNBLOCK:
2620                         sigdelsetmask(&current->blocked, new_set);
2621                         break;
2622                 case SIG_SETMASK:
2623                         current->blocked.sig[0] = new_set;
2624                         break;
2625                 }
2626
2627                 recalc_sigpending();
2628                 spin_unlock_irq(&current->sighand->siglock);
2629                 if (error)
2630                         goto out;
2631                 if (oset)
2632                         goto set_old;
2633         } else if (oset) {
2634                 old_set = current->blocked.sig[0];
2635         set_old:
2636                 error = -EFAULT;
2637                 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2638                         goto out;
2639         }
2640         error = 0;
2641 out:
2642         return error;
2643 }
2644 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2645
2646 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2647 asmlinkage long
2648 sys_rt_sigaction(int sig,
2649                  const struct sigaction __user *act,
2650                  struct sigaction __user *oact,
2651                  size_t sigsetsize)
2652 {
2653         struct k_sigaction new_sa, old_sa;
2654         int ret = -EINVAL;
2655
2656         /* XXX: Don't preclude handling different sized sigset_t's.  */
2657         if (sigsetsize != sizeof(sigset_t))
2658                 goto out;
2659
2660         if (act) {
2661                 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2662                         return -EFAULT;
2663         }
2664
2665         ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2666
2667         if (!ret && oact) {
2668                 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2669                         return -EFAULT;
2670         }
2671 out:
2672         return ret;
2673 }
2674 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2675
2676 #ifdef __ARCH_WANT_SYS_SGETMASK
2677
2678 /*
2679  * For backwards compatibility.  Functionality superseded by sigprocmask.
2680  */
2681 asmlinkage long
2682 sys_sgetmask(void)
2683 {
2684         /* SMP safe */
2685         return current->blocked.sig[0];
2686 }
2687
2688 asmlinkage long
2689 sys_ssetmask(int newmask)
2690 {
2691         int old;
2692
2693         spin_lock_irq(&current->sighand->siglock);
2694         old = current->blocked.sig[0];
2695
2696         siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2697                                                   sigmask(SIGSTOP)));
2698         recalc_sigpending();
2699         spin_unlock_irq(&current->sighand->siglock);
2700
2701         return old;
2702 }
2703 #endif /* __ARCH_WANT_SGETMASK */
2704
2705 #ifdef __ARCH_WANT_SYS_SIGNAL
2706 /*
2707  * For backwards compatibility.  Functionality superseded by sigaction.
2708  */
2709 asmlinkage unsigned long
2710 sys_signal(int sig, __sighandler_t handler)
2711 {
2712         struct k_sigaction new_sa, old_sa;
2713         int ret;
2714
2715         new_sa.sa.sa_handler = handler;
2716         new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2717         sigemptyset(&new_sa.sa.sa_mask);
2718
2719         ret = do_sigaction(sig, &new_sa, &old_sa);
2720
2721         return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2722 }
2723 #endif /* __ARCH_WANT_SYS_SIGNAL */
2724
2725 #ifdef __ARCH_WANT_SYS_PAUSE
2726
2727 asmlinkage long
2728 sys_pause(void)
2729 {
2730         current->state = TASK_INTERRUPTIBLE;
2731         schedule();
2732         return -ERESTARTNOHAND;
2733 }
2734
2735 #endif
2736
2737 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2738 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2739 {
2740         sigset_t newset;
2741
2742         /* XXX: Don't preclude handling different sized sigset_t's.  */
2743         if (sigsetsize != sizeof(sigset_t))
2744                 return -EINVAL;
2745
2746         if (copy_from_user(&newset, unewset, sizeof(newset)))
2747                 return -EFAULT;
2748         sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2749
2750         spin_lock_irq(&current->sighand->siglock);
2751         current->saved_sigmask = current->blocked;
2752         current->blocked = newset;
2753         recalc_sigpending();
2754         spin_unlock_irq(&current->sighand->siglock);
2755
2756         current->state = TASK_INTERRUPTIBLE;
2757         schedule();
2758         set_thread_flag(TIF_RESTORE_SIGMASK);
2759         return -ERESTARTNOHAND;
2760 }
2761 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2762
2763 void __init signals_init(void)
2764 {
2765         sigqueue_cachep =
2766                 kmem_cache_create("sigqueue",
2767                                   sizeof(struct sigqueue),
2768                                   __alignof__(struct sigqueue),
2769                                   SLAB_PANIC, NULL, NULL);
2770 }