]> err.no Git - linux-2.6/blob - arch/um/kernel/process.c
uml: separate timer initialization
[linux-2.6] / arch / um / kernel / process.c
1 /*
2  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3  * Copyright 2003 PathScale, Inc.
4  * Licensed under the GPL
5  */
6
7 #include "linux/stddef.h"
8 #include "linux/err.h"
9 #include "linux/hardirq.h"
10 #include "linux/mm.h"
11 #include "linux/personality.h"
12 #include "linux/proc_fs.h"
13 #include "linux/ptrace.h"
14 #include "linux/random.h"
15 #include "linux/sched.h"
16 #include "linux/threads.h"
17 #include "asm/pgtable.h"
18 #include "asm/uaccess.h"
19 #include "as-layout.h"
20 #include "kern_util.h"
21 #include "os.h"
22 #include "skas.h"
23 #include "tlb.h"
24
25 /*
26  * This is a per-cpu array.  A processor only modifies its entry and it only
27  * cares about its entry, so it's OK if another processor is modifying its
28  * entry.
29  */
30 struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
31
32 static inline int external_pid(struct task_struct *task)
33 {
34         /* FIXME: Need to look up userspace_pid by cpu */
35         return userspace_pid[0];
36 }
37
38 int pid_to_processor_id(int pid)
39 {
40         int i;
41
42         for(i = 0; i < ncpus; i++) {
43                 if (cpu_tasks[i].pid == pid)
44                         return i;
45         }
46         return -1;
47 }
48
49 void free_stack(unsigned long stack, int order)
50 {
51         free_pages(stack, order);
52 }
53
54 unsigned long alloc_stack(int order, int atomic)
55 {
56         unsigned long page;
57         gfp_t flags = GFP_KERNEL;
58
59         if (atomic)
60                 flags = GFP_ATOMIC;
61         page = __get_free_pages(flags, order);
62         if (page == 0)
63                 return 0;
64
65         return page;
66 }
67
68 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
69 {
70         int pid;
71
72         current->thread.request.u.thread.proc = fn;
73         current->thread.request.u.thread.arg = arg;
74         pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0,
75                       &current->thread.regs, 0, NULL, NULL);
76         return pid;
77 }
78
79 static inline void set_current(struct task_struct *task)
80 {
81         cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
82                 { external_pid(task), task });
83 }
84
85 extern void arch_switch_to(struct task_struct *from, struct task_struct *to);
86
87 void *_switch_to(void *prev, void *next, void *last)
88 {
89         struct task_struct *from = prev;
90         struct task_struct *to= next;
91
92         to->thread.prev_sched = from;
93         set_current(to);
94
95         do {
96                 current->thread.saved_task = NULL;
97
98                 switch_threads(&from->thread.switch_buf,
99                                &to->thread.switch_buf);
100
101                 arch_switch_to(current->thread.prev_sched, current);
102
103                 if (current->thread.saved_task)
104                         show_regs(&(current->thread.regs));
105                 next= current->thread.saved_task;
106                 prev= current;
107         } while(current->thread.saved_task);
108
109         return current->thread.prev_sched;
110
111 }
112
113 void interrupt_end(void)
114 {
115         if (need_resched())
116                 schedule();
117         if (test_tsk_thread_flag(current, TIF_SIGPENDING))
118                 do_signal();
119 }
120
121 void exit_thread(void)
122 {
123 }
124
125 void *get_current(void)
126 {
127         return current;
128 }
129
130 extern void schedule_tail(struct task_struct *prev);
131
132 /*
133  * This is called magically, by its address being stuffed in a jmp_buf
134  * and being longjmp-d to.
135  */
136 void new_thread_handler(void)
137 {
138         int (*fn)(void *), n;
139         void *arg;
140
141         if (current->thread.prev_sched != NULL)
142                 schedule_tail(current->thread.prev_sched);
143         current->thread.prev_sched = NULL;
144
145         fn = current->thread.request.u.thread.proc;
146         arg = current->thread.request.u.thread.arg;
147
148         /*
149          * The return value is 1 if the kernel thread execs a process,
150          * 0 if it just exits
151          */
152         n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
153         if (n == 1) {
154                 /* Handle any immediate reschedules or signals */
155                 interrupt_end();
156                 userspace(&current->thread.regs.regs);
157         }
158         else do_exit(0);
159 }
160
161 /* Called magically, see new_thread_handler above */
162 void fork_handler(void)
163 {
164         force_flush_all();
165         if (current->thread.prev_sched == NULL)
166                 panic("blech");
167
168         schedule_tail(current->thread.prev_sched);
169
170         /*
171          * XXX: if interrupt_end() calls schedule, this call to
172          * arch_switch_to isn't needed. We could want to apply this to
173          * improve performance. -bb
174          */
175         arch_switch_to(current->thread.prev_sched, current);
176
177         current->thread.prev_sched = NULL;
178
179         /* Handle any immediate reschedules or signals */
180         interrupt_end();
181
182         userspace(&current->thread.regs.regs);
183 }
184
185 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
186                 unsigned long stack_top, struct task_struct * p,
187                 struct pt_regs *regs)
188 {
189         void (*handler)(void);
190         int ret = 0;
191
192         p->thread = (struct thread_struct) INIT_THREAD;
193
194         if (current->thread.forking) {
195                 memcpy(&p->thread.regs.regs, &regs->regs,
196                        sizeof(p->thread.regs.regs));
197                 REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.gp, 0);
198                 if (sp != 0)
199                         REGS_SP(p->thread.regs.regs.gp) = sp;
200
201                 handler = fork_handler;
202
203                 arch_copy_thread(&current->thread.arch, &p->thread.arch);
204         }
205         else {
206                 init_thread_registers(&p->thread.regs.regs);
207                 p->thread.request.u.thread = current->thread.request.u.thread;
208                 handler = new_thread_handler;
209         }
210
211         new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
212
213         if (current->thread.forking) {
214                 clear_flushed_tls(p);
215
216                 /*
217                  * Set a new TLS for the child thread?
218                  */
219                 if (clone_flags & CLONE_SETTLS)
220                         ret = arch_copy_tls(p);
221         }
222
223         return ret;
224 }
225
226 void initial_thread_cb(void (*proc)(void *), void *arg)
227 {
228         int save_kmalloc_ok = kmalloc_ok;
229
230         kmalloc_ok = 0;
231         initial_thread_cb_skas(proc, arg);
232         kmalloc_ok = save_kmalloc_ok;
233 }
234
235 void default_idle(void)
236 {
237         while(1) {
238                 /* endless idle loop with no priority at all */
239
240                 /*
241                  * although we are an idle CPU, we do not want to
242                  * get into the scheduler unnecessarily.
243                  */
244                 if (need_resched())
245                         schedule();
246
247                 switch_timers(1);
248                 idle_sleep(10);
249                 switch_timers(0);
250         }
251 }
252
253 void cpu_idle(void)
254 {
255         cpu_tasks[current_thread->cpu].pid = os_getpid();
256         default_idle();
257 }
258
259 void *um_virt_to_phys(struct task_struct *task, unsigned long addr,
260                       pte_t *pte_out)
261 {
262         pgd_t *pgd;
263         pud_t *pud;
264         pmd_t *pmd;
265         pte_t *pte;
266         pte_t ptent;
267
268         if (task->mm == NULL)
269                 return ERR_PTR(-EINVAL);
270         pgd = pgd_offset(task->mm, addr);
271         if (!pgd_present(*pgd))
272                 return ERR_PTR(-EINVAL);
273
274         pud = pud_offset(pgd, addr);
275         if (!pud_present(*pud))
276                 return ERR_PTR(-EINVAL);
277
278         pmd = pmd_offset(pud, addr);
279         if (!pmd_present(*pmd))
280                 return ERR_PTR(-EINVAL);
281
282         pte = pte_offset_kernel(pmd, addr);
283         ptent = *pte;
284         if (!pte_present(ptent))
285                 return ERR_PTR(-EINVAL);
286
287         if (pte_out != NULL)
288                 *pte_out = ptent;
289         return (void *) (pte_val(ptent) & PAGE_MASK) + (addr & ~PAGE_MASK);
290 }
291
292 char *current_cmd(void)
293 {
294 #if defined(CONFIG_SMP) || defined(CONFIG_HIGHMEM)
295         return "(Unknown)";
296 #else
297         void *addr = um_virt_to_phys(current, current->mm->arg_start, NULL);
298         return IS_ERR(addr) ? "(Unknown)": __va((unsigned long) addr);
299 #endif
300 }
301
302 void dump_thread(struct pt_regs *regs, struct user *u)
303 {
304 }
305
306 int __cant_sleep(void) {
307         return in_atomic() || irqs_disabled() || in_interrupt();
308         /* Is in_interrupt() really needed? */
309 }
310
311 int user_context(unsigned long sp)
312 {
313         unsigned long stack;
314
315         stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
316         return stack != (unsigned long) current_thread;
317 }
318
319 extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
320
321 void do_uml_exitcalls(void)
322 {
323         exitcall_t *call;
324
325         call = &__uml_exitcall_end;
326         while (--call >= &__uml_exitcall_begin)
327                 (*call)();
328 }
329
330 char *uml_strdup(char *string)
331 {
332         return kstrdup(string, GFP_KERNEL);
333 }
334
335 int copy_to_user_proc(void __user *to, void *from, int size)
336 {
337         return copy_to_user(to, from, size);
338 }
339
340 int copy_from_user_proc(void *to, void __user *from, int size)
341 {
342         return copy_from_user(to, from, size);
343 }
344
345 int clear_user_proc(void __user *buf, int size)
346 {
347         return clear_user(buf, size);
348 }
349
350 int strlen_user_proc(char __user *str)
351 {
352         return strlen_user(str);
353 }
354
355 int smp_sigio_handler(void)
356 {
357 #ifdef CONFIG_SMP
358         int cpu = current_thread->cpu;
359         IPI_handler(cpu);
360         if (cpu != 0)
361                 return 1;
362 #endif
363         return 0;
364 }
365
366 int cpu(void)
367 {
368         return current_thread->cpu;
369 }
370
371 static atomic_t using_sysemu = ATOMIC_INIT(0);
372 int sysemu_supported;
373
374 void set_using_sysemu(int value)
375 {
376         if (value > sysemu_supported)
377                 return;
378         atomic_set(&using_sysemu, value);
379 }
380
381 int get_using_sysemu(void)
382 {
383         return atomic_read(&using_sysemu);
384 }
385
386 static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data)
387 {
388         if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size)
389                 /* No overflow */
390                 *eof = 1;
391
392         return strlen(buf);
393 }
394
395 static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned long count,void *data)
396 {
397         char tmp[2];
398
399         if (copy_from_user(tmp, buf, 1))
400                 return -EFAULT;
401
402         if (tmp[0] >= '0' && tmp[0] <= '2')
403                 set_using_sysemu(tmp[0] - '0');
404         /* We use the first char, but pretend to write everything */
405         return count;
406 }
407
408 int __init make_proc_sysemu(void)
409 {
410         struct proc_dir_entry *ent;
411         if (!sysemu_supported)
412                 return 0;
413
414         ent = create_proc_entry("sysemu", 0600, &proc_root);
415
416         if (ent == NULL)
417         {
418                 printk(KERN_WARNING "Failed to register /proc/sysemu\n");
419                 return 0;
420         }
421
422         ent->read_proc  = proc_read_sysemu;
423         ent->write_proc = proc_write_sysemu;
424
425         return 0;
426 }
427
428 late_initcall(make_proc_sysemu);
429
430 int singlestepping(void * t)
431 {
432         struct task_struct *task = t ? t : current;
433
434         if ( ! (task->ptrace & PT_DTRACE) )
435                 return 0;
436
437         if (task->thread.singlestep_syscall)
438                 return 1;
439
440         return 2;
441 }
442
443 /*
444  * Only x86 and x86_64 have an arch_align_stack().
445  * All other arches have "#define arch_align_stack(x) (x)"
446  * in their asm/system.h
447  * As this is included in UML from asm-um/system-generic.h,
448  * we can use it to behave as the subarch does.
449  */
450 #ifndef arch_align_stack
451 unsigned long arch_align_stack(unsigned long sp)
452 {
453         if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
454                 sp -= get_random_int() % 8192;
455         return sp & ~0xf;
456 }
457 #endif