2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
19 #include <linux/cpu.h>
20 #include <linux/errno.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
25 #include <linux/elfcore.h>
26 #include <linux/smp.h>
27 #include <linux/slab.h>
28 #include <linux/user.h>
29 #include <linux/module.h>
30 #include <linux/a.out.h>
31 #include <linux/interrupt.h>
32 #include <linux/delay.h>
33 #include <linux/ptrace.h>
34 #include <linux/utsname.h>
35 #include <linux/random.h>
36 #include <linux/notifier.h>
37 #include <linux/kprobes.h>
38 #include <linux/kdebug.h>
39 #include <linux/tick.h>
41 #include <asm/uaccess.h>
42 #include <asm/pgtable.h>
43 #include <asm/system.h>
45 #include <asm/processor.h>
47 #include <asm/mmu_context.h>
49 #include <asm/prctl.h>
51 #include <asm/proto.h>
55 asmlinkage extern void ret_from_fork(void);
57 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
59 unsigned long boot_option_idle_override = 0;
60 EXPORT_SYMBOL(boot_option_idle_override);
63 * Powermanagement idle function, if any..
65 void (*pm_idle)(void);
66 EXPORT_SYMBOL(pm_idle);
67 static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
69 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
71 void idle_notifier_register(struct notifier_block *n)
73 atomic_notifier_chain_register(&idle_notifier, n);
75 EXPORT_SYMBOL_GPL(idle_notifier_register);
77 void idle_notifier_unregister(struct notifier_block *n)
79 atomic_notifier_chain_unregister(&idle_notifier, n);
81 EXPORT_SYMBOL(idle_notifier_unregister);
86 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
89 static void __exit_idle(void)
91 if (test_and_clear_bit_pda(0, isidle) == 0)
93 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
96 /* Called from interrupts to signify idle end */
99 /* idle loop has pid 0 */
106 * We use this if we don't have any better
109 static void default_idle(void)
111 current_thread_info()->status &= ~TS_POLLING;
113 * TS_POLLING-cleared state must be visible before we
118 if (!need_resched()) {
123 t0n = ktime_to_ns(t0);
124 safe_halt(); /* enables interrupts racelessly */
127 t1n = ktime_to_ns(t1);
128 sched_clock_idle_wakeup_event(t1n - t0n);
131 current_thread_info()->status |= TS_POLLING;
135 * On SMP it's slightly faster (but much more power-consuming!)
136 * to poll the ->need_resched flag instead of waiting for the
137 * cross-CPU IPI to arrive. Use this option with caution.
139 static void poll_idle (void)
145 static void do_nothing(void *unused)
149 void cpu_idle_wait(void)
151 unsigned int cpu, this_cpu = get_cpu();
152 cpumask_t map, tmp = current->cpus_allowed;
154 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
158 for_each_online_cpu(cpu) {
159 per_cpu(cpu_idle_state, cpu) = 1;
163 __get_cpu_var(cpu_idle_state) = 0;
168 for_each_online_cpu(cpu) {
169 if (cpu_isset(cpu, map) &&
170 !per_cpu(cpu_idle_state, cpu))
173 cpus_and(map, map, cpu_online_map);
175 * We waited 1 sec, if a CPU still did not call idle
176 * it may be because it is in idle and not waking up
177 * because it has nothing to do.
178 * Give all the remaining CPUS a kick.
180 smp_call_function_mask(map, do_nothing, 0, 0);
181 } while (!cpus_empty(map));
183 set_cpus_allowed(current, tmp);
185 EXPORT_SYMBOL_GPL(cpu_idle_wait);
187 #ifdef CONFIG_HOTPLUG_CPU
188 DECLARE_PER_CPU(int, cpu_state);
191 /* We halt the CPU with physical CPU hotplug */
192 static inline void play_dead(void)
198 __get_cpu_var(cpu_state) = CPU_DEAD;
205 static inline void play_dead(void)
209 #endif /* CONFIG_HOTPLUG_CPU */
212 * The idle thread. There's no useful work to be
213 * done, so just try to conserve power and have a
214 * low exit latency (ie sit in a loop waiting for
215 * somebody to say that they'd like to reschedule)
219 current_thread_info()->status |= TS_POLLING;
220 /* endless idle loop with no priority at all */
222 while (!need_resched()) {
225 if (__get_cpu_var(cpu_idle_state))
226 __get_cpu_var(cpu_idle_state) = 0;
228 tick_nohz_stop_sched_tick();
234 if (cpu_is_offline(smp_processor_id()))
237 * Idle routines should keep interrupts disabled
238 * from here on, until they go to idle.
239 * Otherwise, idle callbacks can misfire.
244 /* In many cases the interrupt that ended idle
245 has already called exit_idle. But some idle
246 loops can be woken up without interrupt. */
250 tick_nohz_restart_sched_tick();
251 preempt_enable_no_resched();
258 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
259 * which can obviate IPI to trigger checking of need_resched.
260 * We execute MONITOR against need_resched and enter optimized wait state
261 * through MWAIT. Whenever someone changes need_resched, we would be woken
262 * up from MWAIT (without an IPI).
264 * New with Core Duo processors, MWAIT can take some hints based on CPU
267 void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
269 if (!need_resched()) {
270 __monitor((void *)¤t_thread_info()->flags, 0, 0);
277 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
278 static void mwait_idle(void)
280 if (!need_resched()) {
281 __monitor((void *)¤t_thread_info()->flags, 0, 0);
292 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
295 if (cpu_has(c, X86_FEATURE_MWAIT)) {
297 * Skip, if setup has overridden idle.
298 * One CPU supports mwait => All CPUs supports mwait
302 printk(KERN_INFO "using mwait in idle threads.\n");
305 pm_idle = mwait_idle;
310 static int __init idle_setup (char *str)
312 if (!strcmp(str, "poll")) {
313 printk("using polling idle threads.\n");
315 } else if (!strcmp(str, "mwait"))
320 boot_option_idle_override = 1;
323 early_param("idle", idle_setup);
325 /* Prints also some state that isn't saved in the pt_regs */
326 void __show_regs(struct pt_regs * regs)
328 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
329 unsigned long d0, d1, d2, d3, d6, d7;
330 unsigned int fsindex,gsindex;
331 unsigned int ds,cs,es;
335 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
336 current->pid, current->comm, print_tainted(),
337 init_utsname()->release,
338 (int)strcspn(init_utsname()->version, " "),
339 init_utsname()->version);
340 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
341 printk_address(regs->rip);
342 printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp,
344 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
345 regs->rax, regs->rbx, regs->rcx);
346 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
347 regs->rdx, regs->rsi, regs->rdi);
348 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
349 regs->rbp, regs->r8, regs->r9);
350 printk("R10: %016lx R11: %016lx R12: %016lx\n",
351 regs->r10, regs->r11, regs->r12);
352 printk("R13: %016lx R14: %016lx R15: %016lx\n",
353 regs->r13, regs->r14, regs->r15);
355 asm("movl %%ds,%0" : "=r" (ds));
356 asm("movl %%cs,%0" : "=r" (cs));
357 asm("movl %%es,%0" : "=r" (es));
358 asm("movl %%fs,%0" : "=r" (fsindex));
359 asm("movl %%gs,%0" : "=r" (gsindex));
361 rdmsrl(MSR_FS_BASE, fs);
362 rdmsrl(MSR_GS_BASE, gs);
363 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
370 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
371 fs,fsindex,gs,gsindex,shadowgs);
372 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
373 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
378 printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
382 printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
385 void show_regs(struct pt_regs *regs)
387 printk("CPU %d:", smp_processor_id());
389 show_trace(NULL, regs, (void *)(regs + 1));
393 * Free current thread data structures etc..
395 void exit_thread(void)
397 struct task_struct *me = current;
398 struct thread_struct *t = &me->thread;
400 if (me->thread.io_bitmap_ptr) {
401 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
403 kfree(t->io_bitmap_ptr);
404 t->io_bitmap_ptr = NULL;
405 clear_thread_flag(TIF_IO_BITMAP);
407 * Careful, clear this in the TSS too:
409 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
410 t->io_bitmap_max = 0;
415 void flush_thread(void)
417 struct task_struct *tsk = current;
419 if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
420 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
421 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
422 clear_tsk_thread_flag(tsk, TIF_IA32);
424 set_tsk_thread_flag(tsk, TIF_IA32);
425 current_thread_info()->status |= TS_COMPAT;
428 clear_tsk_thread_flag(tsk, TIF_DEBUG);
430 tsk->thread.debugreg0 = 0;
431 tsk->thread.debugreg1 = 0;
432 tsk->thread.debugreg2 = 0;
433 tsk->thread.debugreg3 = 0;
434 tsk->thread.debugreg6 = 0;
435 tsk->thread.debugreg7 = 0;
436 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
438 * Forget coprocessor state..
444 void release_thread(struct task_struct *dead_task)
447 if (dead_task->mm->context.size) {
448 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
450 dead_task->mm->context.ldt,
451 dead_task->mm->context.size);
457 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
459 struct user_desc ud = {
466 struct n_desc_struct *desc = (void *)t->thread.tls_array;
468 desc->a = LDT_entry_a(&ud);
469 desc->b = LDT_entry_b(&ud);
472 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
474 struct desc_struct *desc = (void *)t->thread.tls_array;
477 (((u32)desc->base1) << 16) |
478 (((u32)desc->base2) << 24);
482 * This gets called before we allocate a new thread and copy
483 * the current task into it.
485 void prepare_to_copy(struct task_struct *tsk)
490 int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
491 unsigned long unused,
492 struct task_struct * p, struct pt_regs * regs)
495 struct pt_regs * childregs;
496 struct task_struct *me = current;
498 childregs = ((struct pt_regs *)
499 (THREAD_SIZE + task_stack_page(p))) - 1;
503 childregs->rsp = rsp;
505 childregs->rsp = (unsigned long)childregs;
507 p->thread.rsp = (unsigned long) childregs;
508 p->thread.rsp0 = (unsigned long) (childregs+1);
509 p->thread.userrsp = me->thread.userrsp;
511 set_tsk_thread_flag(p, TIF_FORK);
513 p->thread.fs = me->thread.fs;
514 p->thread.gs = me->thread.gs;
516 asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
517 asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
518 asm("mov %%es,%0" : "=m" (p->thread.es));
519 asm("mov %%ds,%0" : "=m" (p->thread.ds));
521 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
522 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
523 if (!p->thread.io_bitmap_ptr) {
524 p->thread.io_bitmap_max = 0;
527 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
529 set_tsk_thread_flag(p, TIF_IO_BITMAP);
533 * Set a new TLS for the child thread?
535 if (clone_flags & CLONE_SETTLS) {
536 #ifdef CONFIG_IA32_EMULATION
537 if (test_thread_flag(TIF_IA32))
538 err = ia32_child_tls(p, childregs);
541 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
547 if (err && p->thread.io_bitmap_ptr) {
548 kfree(p->thread.io_bitmap_ptr);
549 p->thread.io_bitmap_max = 0;
555 * This special macro can be used to load a debugging register
557 #define loaddebug(thread,r) set_debugreg(thread->debugreg ## r, r)
559 static inline void __switch_to_xtra(struct task_struct *prev_p,
560 struct task_struct *next_p,
561 struct tss_struct *tss)
563 struct thread_struct *prev, *next;
565 prev = &prev_p->thread,
566 next = &next_p->thread;
568 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
578 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
580 * Copy the relevant range of the IO bitmap.
581 * Normally this is 128 bytes or less:
583 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
584 max(prev->io_bitmap_max, next->io_bitmap_max));
585 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
587 * Clear any possible leftover bits:
589 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
594 * switch_to(x,y) should switch tasks from x to y.
596 * This could still be optimized:
597 * - fold all the options into a flag word and test it with a single test.
598 * - could test fs/gs bitsliced
600 * Kprobes not supported here. Set the probe on schedule instead.
603 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
605 struct thread_struct *prev = &prev_p->thread,
606 *next = &next_p->thread;
607 int cpu = smp_processor_id();
608 struct tss_struct *tss = &per_cpu(init_tss, cpu);
610 /* we're going to use this soon, after a few expensive things */
611 if (next_p->fpu_counter>5)
612 prefetch(&next->i387.fxsave);
615 * Reload esp0, LDT and the page table pointer:
617 tss->rsp0 = next->rsp0;
621 * This won't pick up thread selector changes, but I guess that is ok.
623 asm volatile("mov %%es,%0" : "=m" (prev->es));
624 if (unlikely(next->es | prev->es))
625 loadsegment(es, next->es);
627 asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
628 if (unlikely(next->ds | prev->ds))
629 loadsegment(ds, next->ds);
638 asm volatile("movl %%fs,%0" : "=r" (fsindex));
639 /* segment register != 0 always requires a reload.
640 also reload when it has changed.
641 when prev process used 64bit base always reload
642 to avoid an information leak. */
643 if (unlikely(fsindex | next->fsindex | prev->fs)) {
644 loadsegment(fs, next->fsindex);
645 /* check if the user used a selector != 0
646 * if yes clear 64bit base, since overloaded base
647 * is always mapped to the Null selector
652 /* when next process has a 64bit base use it */
654 wrmsrl(MSR_FS_BASE, next->fs);
655 prev->fsindex = fsindex;
659 asm volatile("movl %%gs,%0" : "=r" (gsindex));
660 if (unlikely(gsindex | next->gsindex | prev->gs)) {
661 load_gs_index(next->gsindex);
666 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
667 prev->gsindex = gsindex;
670 /* Must be after DS reload */
674 * Switch the PDA and FPU contexts.
676 prev->userrsp = read_pda(oldrsp);
677 write_pda(oldrsp, next->userrsp);
678 write_pda(pcurrent, next_p);
680 write_pda(kernelstack,
681 (unsigned long)task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
682 #ifdef CONFIG_CC_STACKPROTECTOR
683 write_pda(stack_canary, next_p->stack_canary);
685 * Build time only check to make sure the stack_canary is at
686 * offset 40 in the pda; this is a gcc ABI requirement
688 BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
692 * Now maybe reload the debug registers and handle I/O bitmaps
694 if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
695 || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))
696 __switch_to_xtra(prev_p, next_p, tss);
698 /* If the task has used fpu the last 5 timeslices, just do a full
699 * restore of the math state immediately to avoid the trap; the
700 * chances of needing FPU soon are obviously high now
702 if (next_p->fpu_counter>5)
703 math_state_restore();
708 * sys_execve() executes a new program.
711 long sys_execve(char __user *name, char __user * __user *argv,
712 char __user * __user *envp, struct pt_regs regs)
717 filename = getname(name);
718 error = PTR_ERR(filename);
719 if (IS_ERR(filename))
721 error = do_execve(filename, argv, envp, ®s);
724 current->ptrace &= ~PT_DTRACE;
725 task_unlock(current);
731 void set_personality_64bit(void)
733 /* inherit personality from parent */
735 /* Make sure to be in 64bit mode */
736 clear_thread_flag(TIF_IA32);
738 /* TBD: overwrites user setup. Should have two bits.
739 But 64bit processes have always behaved this way,
740 so it's not too bad. The main problem is just that
741 32bit childs are affected again. */
742 current->personality &= ~READ_IMPLIES_EXEC;
745 asmlinkage long sys_fork(struct pt_regs *regs)
747 return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
751 sys_clone(unsigned long clone_flags, unsigned long newsp,
752 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
756 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
760 * This is trivial, and on the face of it looks like it
761 * could equally well be done in user mode.
763 * Not so, for quite unobvious reasons - register pressure.
764 * In user mode vfork() cannot have a stack frame, and if
765 * done by calling the "clone()" system call directly, you
766 * do not have enough call-clobbered registers to hold all
767 * the information you need.
769 asmlinkage long sys_vfork(struct pt_regs *regs)
771 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
775 unsigned long get_wchan(struct task_struct *p)
781 if (!p || p == current || p->state==TASK_RUNNING)
783 stack = (unsigned long)task_stack_page(p);
784 if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
786 fp = *(u64 *)(p->thread.rsp);
788 if (fp < (unsigned long)stack ||
789 fp > (unsigned long)stack+THREAD_SIZE)
791 rip = *(u64 *)(fp+8);
792 if (!in_sched_functions(rip))
795 } while (count++ < 16);
799 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
802 int doit = task == current;
807 if (addr >= TASK_SIZE_OF(task))
810 /* handle small bases via the GDT because that's faster to
812 if (addr <= 0xffffffff) {
813 set_32bit_tls(task, GS_TLS, addr);
815 load_TLS(&task->thread, cpu);
816 load_gs_index(GS_TLS_SEL);
818 task->thread.gsindex = GS_TLS_SEL;
821 task->thread.gsindex = 0;
822 task->thread.gs = addr;
825 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
831 /* Not strictly needed for fs, but do it for symmetry
833 if (addr >= TASK_SIZE_OF(task))
836 /* handle small bases via the GDT because that's faster to
838 if (addr <= 0xffffffff) {
839 set_32bit_tls(task, FS_TLS, addr);
841 load_TLS(&task->thread, cpu);
842 asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
844 task->thread.fsindex = FS_TLS_SEL;
847 task->thread.fsindex = 0;
848 task->thread.fs = addr;
850 /* set the selector to 0 to not confuse
852 asm volatile("movl %0,%%fs" :: "r" (0));
853 ret = checking_wrmsrl(MSR_FS_BASE, addr);
860 if (task->thread.fsindex == FS_TLS_SEL)
861 base = read_32bit_tls(task, FS_TLS);
863 rdmsrl(MSR_FS_BASE, base);
865 base = task->thread.fs;
866 ret = put_user(base, (unsigned long __user *)addr);
872 if (task->thread.gsindex == GS_TLS_SEL)
873 base = read_32bit_tls(task, GS_TLS);
875 asm("movl %%gs,%0" : "=r" (gsindex));
877 rdmsrl(MSR_KERNEL_GS_BASE, base);
879 base = task->thread.gs;
882 base = task->thread.gs;
883 ret = put_user(base, (unsigned long __user *)addr);
895 long sys_arch_prctl(int code, unsigned long addr)
897 return do_arch_prctl(current, code, addr);
901 * Capture the user space registers if the task is not running (in user space)
903 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
905 struct pt_regs *pp, ptregs;
907 pp = task_pt_regs(tsk);
913 elf_core_copy_regs(regs, &ptregs);
918 unsigned long arch_align_stack(unsigned long sp)
920 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
921 sp -= get_random_int() % 8192;