2 * linux/arch/x86-64/kernel/process.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
12 * CPU hotplug support - ashok.raj@intel.com
13 * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
17 * This file handles the architecture-dependent parts of process handling..
22 #include <linux/cpu.h>
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
27 #include <linux/elfcore.h>
28 #include <linux/smp.h>
29 #include <linux/slab.h>
30 #include <linux/user.h>
31 #include <linux/module.h>
32 #include <linux/a.out.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/ptrace.h>
36 #include <linux/utsname.h>
37 #include <linux/random.h>
38 #include <linux/notifier.h>
39 #include <linux/kprobes.h>
41 #include <asm/uaccess.h>
42 #include <asm/pgtable.h>
43 #include <asm/system.h>
45 #include <asm/processor.h>
47 #include <asm/mmu_context.h>
49 #include <asm/prctl.h>
50 #include <asm/kdebug.h>
52 #include <asm/proto.h>
56 asmlinkage extern void ret_from_fork(void);
58 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
60 unsigned long boot_option_idle_override = 0;
61 EXPORT_SYMBOL(boot_option_idle_override);
64 * Powermanagement idle function, if any..
66 void (*pm_idle)(void);
67 static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
69 static struct notifier_block *idle_notifier;
70 static DEFINE_SPINLOCK(idle_notifier_lock);
72 void idle_notifier_register(struct notifier_block *n)
75 spin_lock_irqsave(&idle_notifier_lock, flags);
76 notifier_chain_register(&idle_notifier, n);
77 spin_unlock_irqrestore(&idle_notifier_lock, flags);
79 EXPORT_SYMBOL_GPL(idle_notifier_register);
81 void idle_notifier_unregister(struct notifier_block *n)
84 spin_lock_irqsave(&idle_notifier_lock, flags);
85 notifier_chain_unregister(&idle_notifier, n);
86 spin_unlock_irqrestore(&idle_notifier_lock, flags);
88 EXPORT_SYMBOL(idle_notifier_unregister);
90 enum idle_state { CPU_IDLE, CPU_NOT_IDLE };
91 static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
95 __get_cpu_var(idle_state) = CPU_IDLE;
96 notifier_call_chain(&idle_notifier, IDLE_START, NULL);
99 static void __exit_idle(void)
101 __get_cpu_var(idle_state) = CPU_NOT_IDLE;
102 notifier_call_chain(&idle_notifier, IDLE_END, NULL);
105 /* Called from interrupts to signify idle end */
108 if (current->pid | read_pda(irqcount))
114 * We use this if we don't have any better
117 static void default_idle(void)
121 clear_thread_flag(TIF_POLLING_NRFLAG);
122 smp_mb__after_clear_bit();
123 while (!need_resched()) {
130 set_thread_flag(TIF_POLLING_NRFLAG);
134 * On SMP it's slightly faster (but much more power-consuming!)
135 * to poll the ->need_resched flag instead of waiting for the
136 * cross-CPU IPI to arrive. Use this option with caution.
138 static void poll_idle (void)
148 "i" (_TIF_NEED_RESCHED),
149 "m" (current_thread_info()->flags));
152 void cpu_idle_wait(void)
154 unsigned int cpu, this_cpu = get_cpu();
157 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
161 for_each_online_cpu(cpu) {
162 per_cpu(cpu_idle_state, cpu) = 1;
166 __get_cpu_var(cpu_idle_state) = 0;
171 for_each_online_cpu(cpu) {
172 if (cpu_isset(cpu, map) &&
173 !per_cpu(cpu_idle_state, cpu))
176 cpus_and(map, map, cpu_online_map);
177 } while (!cpus_empty(map));
179 EXPORT_SYMBOL_GPL(cpu_idle_wait);
181 #ifdef CONFIG_HOTPLUG_CPU
182 DECLARE_PER_CPU(int, cpu_state);
185 /* We halt the CPU with physical CPU hotplug */
186 static inline void play_dead(void)
192 __get_cpu_var(cpu_state) = CPU_DEAD;
199 static inline void play_dead(void)
203 #endif /* CONFIG_HOTPLUG_CPU */
206 * The idle thread. There's no useful work to be
207 * done, so just try to conserve power and have a
208 * low exit latency (ie sit in a loop waiting for
209 * somebody to say that they'd like to reschedule)
213 set_thread_flag(TIF_POLLING_NRFLAG);
215 /* endless idle loop with no priority at all */
217 while (!need_resched()) {
220 if (__get_cpu_var(cpu_idle_state))
221 __get_cpu_var(cpu_idle_state) = 0;
227 if (cpu_is_offline(smp_processor_id()))
234 preempt_enable_no_resched();
241 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
242 * which can obviate IPI to trigger checking of need_resched.
243 * We execute MONITOR against need_resched and enter optimized wait state
244 * through MWAIT. Whenever someone changes need_resched, we would be woken
245 * up from MWAIT (without an IPI).
247 static void mwait_idle(void)
251 while (!need_resched()) {
252 __monitor((void *)¤t_thread_info()->flags, 0, 0);
260 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
263 if (cpu_has(c, X86_FEATURE_MWAIT)) {
265 * Skip, if setup has overridden idle.
266 * One CPU supports mwait => All CPUs supports mwait
270 printk("using mwait in idle threads.\n");
273 pm_idle = mwait_idle;
278 static int __init idle_setup (char *str)
280 if (!strncmp(str, "poll", 4)) {
281 printk("using polling idle threads.\n");
285 boot_option_idle_override = 1;
289 __setup("idle=", idle_setup);
291 /* Prints also some state that isn't saved in the pt_regs */
292 void __show_regs(struct pt_regs * regs)
294 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
295 unsigned int fsindex,gsindex;
296 unsigned int ds,cs,es;
300 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
301 current->pid, current->comm, print_tainted(),
302 system_utsname.release,
303 (int)strcspn(system_utsname.version, " "),
304 system_utsname.version);
305 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
306 printk_address(regs->rip);
307 printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp,
309 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
310 regs->rax, regs->rbx, regs->rcx);
311 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
312 regs->rdx, regs->rsi, regs->rdi);
313 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
314 regs->rbp, regs->r8, regs->r9);
315 printk("R10: %016lx R11: %016lx R12: %016lx\n",
316 regs->r10, regs->r11, regs->r12);
317 printk("R13: %016lx R14: %016lx R15: %016lx\n",
318 regs->r13, regs->r14, regs->r15);
320 asm("movl %%ds,%0" : "=r" (ds));
321 asm("movl %%cs,%0" : "=r" (cs));
322 asm("movl %%es,%0" : "=r" (es));
323 asm("movl %%fs,%0" : "=r" (fsindex));
324 asm("movl %%gs,%0" : "=r" (gsindex));
326 rdmsrl(MSR_FS_BASE, fs);
327 rdmsrl(MSR_GS_BASE, gs);
328 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
330 asm("movq %%cr0, %0": "=r" (cr0));
331 asm("movq %%cr2, %0": "=r" (cr2));
332 asm("movq %%cr3, %0": "=r" (cr3));
333 asm("movq %%cr4, %0": "=r" (cr4));
335 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
336 fs,fsindex,gs,gsindex,shadowgs);
337 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
338 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
341 void show_regs(struct pt_regs *regs)
343 printk("CPU %d:", smp_processor_id());
345 show_trace(®s->rsp);
349 * Free current thread data structures etc..
351 void exit_thread(void)
353 struct task_struct *me = current;
354 struct thread_struct *t = &me->thread;
356 if (me->thread.io_bitmap_ptr) {
357 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
359 kfree(t->io_bitmap_ptr);
360 t->io_bitmap_ptr = NULL;
362 * Careful, clear this in the TSS too:
364 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
365 t->io_bitmap_max = 0;
370 void flush_thread(void)
372 struct task_struct *tsk = current;
373 struct thread_info *t = current_thread_info();
375 if (t->flags & _TIF_ABI_PENDING)
376 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
378 tsk->thread.debugreg0 = 0;
379 tsk->thread.debugreg1 = 0;
380 tsk->thread.debugreg2 = 0;
381 tsk->thread.debugreg3 = 0;
382 tsk->thread.debugreg6 = 0;
383 tsk->thread.debugreg7 = 0;
384 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
386 * Forget coprocessor state..
392 void release_thread(struct task_struct *dead_task)
395 if (dead_task->mm->context.size) {
396 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
398 dead_task->mm->context.ldt,
399 dead_task->mm->context.size);
405 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
407 struct user_desc ud = {
414 struct n_desc_struct *desc = (void *)t->thread.tls_array;
416 desc->a = LDT_entry_a(&ud);
417 desc->b = LDT_entry_b(&ud);
420 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
422 struct desc_struct *desc = (void *)t->thread.tls_array;
425 (((u32)desc->base1) << 16) |
426 (((u32)desc->base2) << 24);
430 * This gets called before we allocate a new thread and copy
431 * the current task into it.
433 void prepare_to_copy(struct task_struct *tsk)
438 int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
439 unsigned long unused,
440 struct task_struct * p, struct pt_regs * regs)
443 struct pt_regs * childregs;
444 struct task_struct *me = current;
446 childregs = ((struct pt_regs *)
447 (THREAD_SIZE + task_stack_page(p))) - 1;
451 childregs->rsp = rsp;
453 childregs->rsp = (unsigned long)childregs;
455 p->thread.rsp = (unsigned long) childregs;
456 p->thread.rsp0 = (unsigned long) (childregs+1);
457 p->thread.userrsp = me->thread.userrsp;
459 set_tsk_thread_flag(p, TIF_FORK);
461 p->thread.fs = me->thread.fs;
462 p->thread.gs = me->thread.gs;
464 asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
465 asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
466 asm("mov %%es,%0" : "=m" (p->thread.es));
467 asm("mov %%ds,%0" : "=m" (p->thread.ds));
469 if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
470 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
471 if (!p->thread.io_bitmap_ptr) {
472 p->thread.io_bitmap_max = 0;
475 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
480 * Set a new TLS for the child thread?
482 if (clone_flags & CLONE_SETTLS) {
483 #ifdef CONFIG_IA32_EMULATION
484 if (test_thread_flag(TIF_IA32))
485 err = ia32_child_tls(p, childregs);
488 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
494 if (err && p->thread.io_bitmap_ptr) {
495 kfree(p->thread.io_bitmap_ptr);
496 p->thread.io_bitmap_max = 0;
502 * This special macro can be used to load a debugging register
504 #define loaddebug(thread,r) set_debugreg(thread->debugreg ## r, r)
507 * switch_to(x,y) should switch tasks from x to y.
509 * This could still be optimized:
510 * - fold all the options into a flag word and test it with a single test.
511 * - could test fs/gs bitsliced
513 * Kprobes not supported here. Set the probe on schedule instead.
515 __kprobes struct task_struct *
516 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
518 struct thread_struct *prev = &prev_p->thread,
519 *next = &next_p->thread;
520 int cpu = smp_processor_id();
521 struct tss_struct *tss = &per_cpu(init_tss, cpu);
524 * Reload esp0, LDT and the page table pointer:
526 tss->rsp0 = next->rsp0;
530 * This won't pick up thread selector changes, but I guess that is ok.
532 asm volatile("mov %%es,%0" : "=m" (prev->es));
533 if (unlikely(next->es | prev->es))
534 loadsegment(es, next->es);
536 asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
537 if (unlikely(next->ds | prev->ds))
538 loadsegment(ds, next->ds);
547 asm volatile("movl %%fs,%0" : "=r" (fsindex));
548 /* segment register != 0 always requires a reload.
549 also reload when it has changed.
550 when prev process used 64bit base always reload
551 to avoid an information leak. */
552 if (unlikely(fsindex | next->fsindex | prev->fs)) {
553 loadsegment(fs, next->fsindex);
554 /* check if the user used a selector != 0
555 * if yes clear 64bit base, since overloaded base
556 * is always mapped to the Null selector
561 /* when next process has a 64bit base use it */
563 wrmsrl(MSR_FS_BASE, next->fs);
564 prev->fsindex = fsindex;
568 asm volatile("movl %%gs,%0" : "=r" (gsindex));
569 if (unlikely(gsindex | next->gsindex | prev->gs)) {
570 load_gs_index(next->gsindex);
575 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
576 prev->gsindex = gsindex;
580 * Switch the PDA and FPU contexts.
582 prev->userrsp = read_pda(oldrsp);
583 write_pda(oldrsp, next->userrsp);
584 write_pda(pcurrent, next_p);
585 /* This must be here to ensure both math_state_restore() and
586 kernel_fpu_begin() work consistently. */
588 write_pda(kernelstack,
589 task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
592 * Now maybe reload the debug registers
594 if (unlikely(next->debugreg7)) {
606 * Handle the IO bitmap
608 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
609 if (next->io_bitmap_ptr)
611 * Copy the relevant range of the IO bitmap.
612 * Normally this is 128 bytes or less:
614 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
615 max(prev->io_bitmap_max, next->io_bitmap_max));
618 * Clear any possible leftover bits:
620 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
628 * sys_execve() executes a new program.
631 long sys_execve(char __user *name, char __user * __user *argv,
632 char __user * __user *envp, struct pt_regs regs)
637 filename = getname(name);
638 error = PTR_ERR(filename);
639 if (IS_ERR(filename))
641 error = do_execve(filename, argv, envp, ®s);
644 current->ptrace &= ~PT_DTRACE;
645 task_unlock(current);
651 void set_personality_64bit(void)
653 /* inherit personality from parent */
655 /* Make sure to be in 64bit mode */
656 clear_thread_flag(TIF_IA32);
658 /* TBD: overwrites user setup. Should have two bits.
659 But 64bit processes have always behaved this way,
660 so it's not too bad. The main problem is just that
661 32bit childs are affected again. */
662 current->personality &= ~READ_IMPLIES_EXEC;
665 asmlinkage long sys_fork(struct pt_regs *regs)
667 return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
671 sys_clone(unsigned long clone_flags, unsigned long newsp,
672 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
676 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
680 * This is trivial, and on the face of it looks like it
681 * could equally well be done in user mode.
683 * Not so, for quite unobvious reasons - register pressure.
684 * In user mode vfork() cannot have a stack frame, and if
685 * done by calling the "clone()" system call directly, you
686 * do not have enough call-clobbered registers to hold all
687 * the information you need.
689 asmlinkage long sys_vfork(struct pt_regs *regs)
691 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
695 unsigned long get_wchan(struct task_struct *p)
701 if (!p || p == current || p->state==TASK_RUNNING)
703 stack = (unsigned long)task_stack_page(p);
704 if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
706 fp = *(u64 *)(p->thread.rsp);
708 if (fp < (unsigned long)stack ||
709 fp > (unsigned long)stack+THREAD_SIZE)
711 rip = *(u64 *)(fp+8);
712 if (!in_sched_functions(rip))
715 } while (count++ < 16);
719 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
722 int doit = task == current;
727 if (addr >= TASK_SIZE_OF(task))
730 /* handle small bases via the GDT because that's faster to
732 if (addr <= 0xffffffff) {
733 set_32bit_tls(task, GS_TLS, addr);
735 load_TLS(&task->thread, cpu);
736 load_gs_index(GS_TLS_SEL);
738 task->thread.gsindex = GS_TLS_SEL;
741 task->thread.gsindex = 0;
742 task->thread.gs = addr;
745 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
751 /* Not strictly needed for fs, but do it for symmetry
753 if (addr >= TASK_SIZE_OF(task))
756 /* handle small bases via the GDT because that's faster to
758 if (addr <= 0xffffffff) {
759 set_32bit_tls(task, FS_TLS, addr);
761 load_TLS(&task->thread, cpu);
762 asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
764 task->thread.fsindex = FS_TLS_SEL;
767 task->thread.fsindex = 0;
768 task->thread.fs = addr;
770 /* set the selector to 0 to not confuse
772 asm volatile("movl %0,%%fs" :: "r" (0));
773 ret = checking_wrmsrl(MSR_FS_BASE, addr);
780 if (task->thread.fsindex == FS_TLS_SEL)
781 base = read_32bit_tls(task, FS_TLS);
783 rdmsrl(MSR_FS_BASE, base);
785 base = task->thread.fs;
786 ret = put_user(base, (unsigned long __user *)addr);
791 if (task->thread.gsindex == GS_TLS_SEL)
792 base = read_32bit_tls(task, GS_TLS);
794 rdmsrl(MSR_KERNEL_GS_BASE, base);
796 base = task->thread.gs;
797 ret = put_user(base, (unsigned long __user *)addr);
809 long sys_arch_prctl(int code, unsigned long addr)
811 return do_arch_prctl(current, code, addr);
815 * Capture the user space registers if the task is not running (in user space)
817 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
819 struct pt_regs *pp, ptregs;
821 pp = task_pt_regs(tsk);
827 elf_core_copy_regs(regs, &ptregs);
832 unsigned long arch_align_stack(unsigned long sp)
834 if (randomize_va_space)
835 sp -= get_random_int() % 8192;