2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
6 #include "linux/delay.h"
8 #include "linux/module.h"
9 #include "linux/seq_file.h"
10 #include "linux/string.h"
11 #include "linux/utsname.h"
12 #include "asm/pgtable.h"
13 #include "asm/processor.h"
14 #include "asm/setup.h"
16 #include "as-layout.h"
23 #define DEFAULT_COMMAND_LINE "root=98:0"
25 /* Changed in add_arg and setup_arch, which run before SMP is started */
26 static char __initdata command_line[COMMAND_LINE_SIZE] = { 0 };
28 static void __init add_arg(char *arg)
30 if (strlen(command_line) + strlen(arg) + 1 > COMMAND_LINE_SIZE) {
31 printf("add_arg: Too many command line arguments!\n");
34 if (strlen(command_line) > 0)
35 strcat(command_line, " ");
36 strcat(command_line, arg);
40 * These fields are initialized at boot time and not changed.
41 * XXX This structure is used only in the non-SMP case. Maybe this
42 * should be moved to smp.c.
44 struct cpuinfo_um boot_cpu_data = {
46 .ipi_pipe = { -1, -1 }
49 unsigned long thread_saved_pc(struct task_struct *task)
51 /* FIXME: Need to look up userspace_pid by cpu */
52 return os_process_pc(userspace_pid[0]);
55 /* Changed in setup_arch, which is called in early boot */
56 static char host_info[(__NEW_UTS_LEN + 1) * 5];
58 static int show_cpuinfo(struct seq_file *m, void *v)
63 index = (struct cpuinfo_um *) v - cpu_data;
64 if (!cpu_online(index))
68 seq_printf(m, "processor\t: %d\n", index);
69 seq_printf(m, "vendor_id\t: User Mode Linux\n");
70 seq_printf(m, "model name\t: UML\n");
71 seq_printf(m, "mode\t\t: skas\n");
72 seq_printf(m, "host\t\t: %s\n", host_info);
73 seq_printf(m, "bogomips\t: %lu.%02lu\n\n",
74 loops_per_jiffy/(500000/HZ),
75 (loops_per_jiffy/(5000/HZ)) % 100);
80 static void *c_start(struct seq_file *m, loff_t *pos)
82 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
85 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
88 return c_start(m, pos);
91 static void c_stop(struct seq_file *m, void *v)
95 const struct seq_operations cpuinfo_op = {
102 /* Set in linux_main */
103 unsigned long host_task_size;
104 unsigned long task_size;
105 unsigned long uml_physmem;
106 unsigned long uml_reserved; /* Also modified in mem_init */
107 unsigned long start_vm;
108 unsigned long end_vm;
110 /* Set in uml_ncpus_setup */
113 /* Set in early boot */
114 static int have_root __initdata = 0;
116 /* Set in uml_mem_setup and modified in linux_main */
117 long long physmem_size = 32 * 1024 * 1024;
119 static char *usage_string =
120 "User Mode Linux v%s\n"
121 " available at http://user-mode-linux.sourceforge.net/\n\n";
123 static int __init uml_version_setup(char *line, int *add)
125 printf("%s\n", init_utsname()->release);
131 __uml_setup("--version", uml_version_setup,
133 " Prints the version number of the kernel.\n\n"
136 static int __init uml_root_setup(char *line, int *add)
142 __uml_setup("root=", uml_root_setup,
143 "root=<file containing the root fs>\n"
144 " This is actually used by the generic kernel in exactly the same\n"
145 " way as in any other kernel. If you configure a number of block\n"
146 " devices and want to boot off something other than ubd0, you \n"
147 " would use something like:\n"
148 " root=/dev/ubd5\n\n"
151 static int __init no_skas_debug_setup(char *line, int *add)
153 printf("'debug' is not necessary to gdb UML in skas mode - run \n");
154 printf("'gdb linux'");
159 __uml_setup("debug", no_skas_debug_setup,
161 " this flag is not needed to run gdb on UML in skas mode\n\n"
165 static int __init uml_ncpus_setup(char *line, int *add)
167 if (!sscanf(line, "%d", &ncpus)) {
168 printf("Couldn't parse [%s]\n", line);
175 __uml_setup("ncpus=", uml_ncpus_setup,
176 "ncpus=<# of desired CPUs>\n"
177 " This tells an SMP kernel how many virtual processors to start.\n\n"
181 static int __init Usage(char *line, int *add)
185 printf(usage_string, init_utsname()->release);
186 p = &__uml_help_start;
187 while (p < &__uml_help_end) {
195 __uml_setup("--help", Usage,
197 " Prints this message.\n\n"
200 static int __init uml_checksetup(char *line, int *add)
204 p = &__uml_setup_start;
205 while(p < &__uml_setup_end) {
209 if (!strncmp(line, p->str, n) && p->setup_func(line + n, add))
216 static void __init uml_postsetup(void)
220 p = &__uml_postsetup_start;
221 while(p < &__uml_postsetup_end) {
228 /* Set during early boot */
229 unsigned long brk_start;
230 unsigned long end_iomem;
231 EXPORT_SYMBOL(end_iomem);
233 #define MIN_VMALLOC (32 * 1024 * 1024)
235 extern char __binary_start;
237 static unsigned long set_task_sizes_skas(unsigned long *task_size_out)
239 /* Round up to the nearest 4M */
240 unsigned long host_task_size = ROUND_4M((unsigned long)
243 if (!skas_needs_stub)
244 *task_size_out = host_task_size;
246 *task_size_out = STUB_START & PGDIR_MASK;
248 return host_task_size;
251 int __init linux_main(int argc, char **argv)
253 unsigned long avail, diff;
254 unsigned long virtmem_size, max_physmem;
258 for (i = 1; i < argc; i++) {
259 if ((i == 1) && (argv[i][0] == ' '))
262 uml_checksetup(argv[i], &add);
267 add_arg(DEFAULT_COMMAND_LINE);
269 /* OS sanity checks that need to happen before the kernel runs */
274 if (proc_mm && ptrace_faultinfo)
279 printf("UML running in %s mode\n", mode);
281 host_task_size = set_task_sizes_skas(&task_size);
284 * Setting up handlers to 'sig_info' struct
286 os_fill_handlinfo(handlinfo_kern);
288 brk_start = (unsigned long) sbrk(0);
291 * Increase physical memory size for exec-shield users
292 * so they actually get what they asked for. This should
293 * add zero for non-exec shield users
296 diff = UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
297 if (diff > 1024 * 1024) {
298 printf("Adding %ld bytes to physical memory to account for "
299 "exec-shield gap\n", diff);
300 physmem_size += UML_ROUND_UP(brk_start) - UML_ROUND_UP(&_end);
303 uml_physmem = (unsigned long) &__binary_start & PAGE_MASK;
305 /* Reserve up to 4M after the current brk */
306 uml_reserved = ROUND_4M(brk_start) + (1 << 22);
308 setup_machinename(init_utsname()->machine);
311 iomem_size = (iomem_size + PAGE_SIZE - 1) & PAGE_MASK;
312 max_physmem = get_kmem_end() - uml_physmem - iomem_size - MIN_VMALLOC;
315 * Zones have to begin on a 1 << MAX_ORDER page boundary,
316 * so this makes sure that's true for highmem
318 max_physmem &= ~((1 << (PAGE_SHIFT + MAX_ORDER)) - 1);
319 if (physmem_size + iomem_size > max_physmem) {
320 highmem = physmem_size + iomem_size - max_physmem;
321 physmem_size -= highmem;
322 #ifndef CONFIG_HIGHMEM
324 printf("CONFIG_HIGHMEM not enabled - physical memory shrunk "
325 "to %Lu bytes\n", physmem_size);
329 high_physmem = uml_physmem + physmem_size;
330 end_iomem = high_physmem + iomem_size;
331 high_memory = (void *) end_iomem;
333 start_vm = VMALLOC_START;
335 setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem);
336 if (init_maps(physmem_size, iomem_size, highmem)) {
337 printf("Failed to allocate mem_map for %Lu bytes of physical "
338 "memory and %Lu bytes of highmem\n", physmem_size,
343 virtmem_size = physmem_size;
344 avail = get_kmem_end() - start_vm;
345 if (physmem_size > avail)
346 virtmem_size = avail;
347 end_vm = start_vm + virtmem_size;
349 if (virtmem_size < physmem_size)
350 printf("Kernel virtual memory size shrunk to %lu bytes\n",
355 stack_protections((unsigned long) &init_thread_info);
361 extern int uml_exitcode;
363 static int panic_exit(struct notifier_block *self, unsigned long unused1,
367 show_regs(&(current->thread.regs));
374 static struct notifier_block panic_exit_notifier = {
375 .notifier_call = panic_exit,
380 void __init setup_arch(char **cmdline_p)
382 atomic_notifier_chain_register(&panic_notifier_list,
383 &panic_exit_notifier);
385 strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
386 *cmdline_p = command_line;
387 setup_hostinfo(host_info, sizeof host_info);
390 void __init check_bugs(void)
396 void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
401 void alternatives_smp_module_add(struct module *mod, char *name,
402 void *locks, void *locks_end,
403 void *text, void *text_end)
407 void alternatives_smp_module_del(struct module *mod)