X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=kernel%2Ffork.c;h=0c969f4fade05391a8faaf6a2ff30ad1837861db;hb=1020387f5f3b52929b387103cf976321981f8e26;hp=ddafdfac9456e151b2d8731b52475a5af4ca68e1;hpb=cfa76f024f7c9e65169425804e5b32e71f66d0ee;p=linux-2.6 diff --git a/kernel/fork.c b/kernel/fork.c index ddafdfac94..0c969f4fad 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1045,6 +1045,10 @@ static struct task_struct *copy_process(unsigned long clone_flags, copy_flags(clone_flags, p); INIT_LIST_HEAD(&p->children); INIT_LIST_HEAD(&p->sibling); +#ifdef CONFIG_PREEMPT_RCU + p->rcu_read_lock_nesting = 0; + p->rcu_flipctr_idx = 0; +#endif /* #ifdef CONFIG_PREEMPT_RCU */ p->vfork_done = NULL; spin_lock_init(&p->alloc_lock); @@ -1056,6 +1060,13 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->gtime = cputime_zero; p->utimescaled = cputime_zero; p->stimescaled = cputime_zero; + p->prev_utime = cputime_zero; + p->prev_stime = cputime_zero; + +#ifdef CONFIG_DETECT_SOFTLOCKUP + p->last_switch_count = 0; + p->last_switch_timestamp = 0; +#endif #ifdef CONFIG_TASK_XACCT p->rchar = 0; /* I/O counter: bytes read */ @@ -1121,6 +1132,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->blocked_on = NULL; /* not blocked yet */ #endif + /* Perform scheduler related setup. Assign this task to a CPU. */ + sched_fork(p, clone_flags); + if ((retval = security_task_alloc(p))) goto bad_fork_cleanup_policy; if ((retval = audit_alloc(p))) @@ -1210,9 +1224,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, INIT_LIST_HEAD(&p->ptrace_children); INIT_LIST_HEAD(&p->ptrace_list); - /* Perform scheduler related setup. Assign this task to a CPU. */ - sched_fork(p, clone_flags); - /* Now that the task is set up, run cgroup callbacks if * necessary. We need to run them before the task is visible * on the tasklist. */ @@ -1235,6 +1246,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, * parent's CPU). This avoids alot of nasty races. */ p->cpus_allowed = current->cpus_allowed; + p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed; if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) || !cpu_online(task_cpu(p)))) set_task_cpu(p, smp_processor_id()); @@ -1290,23 +1302,14 @@ static struct task_struct *copy_process(unsigned long clone_flags, __ptrace_link(p, current->parent); if (thread_group_leader(p)) { - if (clone_flags & CLONE_NEWPID) { + if (clone_flags & CLONE_NEWPID) p->nsproxy->pid_ns->child_reaper = p; - p->signal->tty = NULL; - set_task_pgrp(p, p->pid); - set_task_session(p, p->pid); - attach_pid(p, PIDTYPE_PGID, pid); - attach_pid(p, PIDTYPE_SID, pid); - } else { - p->signal->tty = current->signal->tty; - set_task_pgrp(p, task_pgrp_nr(current)); - set_task_session(p, task_session_nr(current)); - attach_pid(p, PIDTYPE_PGID, - task_pgrp(current)); - attach_pid(p, PIDTYPE_SID, - task_session(current)); - } + p->signal->tty = current->signal->tty; + set_task_pgrp(p, task_pgrp_nr(current)); + set_task_session(p, task_session_nr(current)); + attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); + attach_pid(p, PIDTYPE_SID, task_session(current)); list_add_tail_rcu(&p->tasks, &init_task.tasks); __get_cpu_var(process_counts)++; }