]> err.no Git - linux-2.6/blobdiff - kernel/kexec.c
kexec jump: in sync with hibernation implementation
[linux-2.6] / kernel / kexec.c
index c8a4370e2a34e94b2e391fada22fec051b162195..17c80fdc453be9c332532aa2deb7bbd9e9167519 100644 (file)
@@ -77,7 +77,7 @@ int kexec_should_crash(struct task_struct *p)
  *
  * The code for the transition from the current kernel to the
  * the new kernel is placed in the control_code_buffer, whose size
- * is given by KEXEC_CONTROL_CODE_SIZE.  In the best case only a single
+ * is given by KEXEC_CONTROL_PAGE_SIZE.  In the best case only a single
  * page of memory is necessary, but some architectures require more.
  * Because this memory must be identity mapped in the transition from
  * virtual to physical addresses it must live in the range
@@ -242,7 +242,7 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
         */
        result = -ENOMEM;
        image->control_code_page = kimage_alloc_control_pages(image,
-                                          get_order(KEXEC_CONTROL_CODE_SIZE));
+                                          get_order(KEXEC_CONTROL_PAGE_SIZE));
        if (!image->control_code_page) {
                printk(KERN_ERR "Could not allocate control_code_buffer\n");
                goto out;
@@ -317,7 +317,7 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
         */
        result = -ENOMEM;
        image->control_code_page = kimage_alloc_control_pages(image,
-                                          get_order(KEXEC_CONTROL_CODE_SIZE));
+                                          get_order(KEXEC_CONTROL_PAGE_SIZE));
        if (!image->control_code_page) {
                printk(KERN_ERR "Could not allocate control_code_buffer\n");
                goto out;
@@ -1426,11 +1426,9 @@ static int __init crash_save_vmcoreinfo_init(void)
 
 module_init(crash_save_vmcoreinfo_init)
 
-/**
- *     kernel_kexec - reboot the system
- *
- *     Move into place and start executing a preloaded standalone
- *     executable.  If nothing was preloaded return an error.
+/*
+ * Move into place and start executing a preloaded standalone
+ * executable.  If nothing was preloaded return an error.
  */
 int kernel_kexec(void)
 {
@@ -1443,8 +1441,8 @@ int kernel_kexec(void)
                goto Unlock;
        }
 
-       if (kexec_image->preserve_context) {
 #ifdef CONFIG_KEXEC_JUMP
+       if (kexec_image->preserve_context) {
                mutex_lock(&pm_mutex);
                pm_prepare_console();
                error = freeze_processes();
@@ -1459,6 +1457,7 @@ int kernel_kexec(void)
                error = disable_nonboot_cpus();
                if (error)
                        goto Resume_devices;
+               device_pm_lock();
                local_irq_disable();
                /* At this point, device_suspend() has been called,
                 * but *not* device_power_down(). We *must*
@@ -1471,25 +1470,23 @@ int kernel_kexec(void)
                if (error)
                        goto Enable_irqs;
                save_processor_state();
+       } else
 #endif
-       } else {
-               blocking_notifier_call_chain(&reboot_notifier_list,
-                                            SYS_RESTART, NULL);
-               system_state = SYSTEM_RESTART;
-               device_shutdown();
-               sysdev_shutdown();
+       {
+               kernel_restart_prepare(NULL);
                printk(KERN_EMERG "Starting new kernel\n");
                machine_shutdown();
        }
 
        machine_kexec(kexec_image);
 
-       if (kexec_image->preserve_context) {
 #ifdef CONFIG_KEXEC_JUMP
+       if (kexec_image->preserve_context) {
                restore_processor_state();
                device_power_up(PMSG_RESTORE);
  Enable_irqs:
                local_irq_enable();
+               device_pm_unlock();
                enable_nonboot_cpus();
  Resume_devices:
                device_resume(PMSG_RESTORE);
@@ -1499,11 +1496,12 @@ int kernel_kexec(void)
  Restore_console:
                pm_restore_console();
                mutex_unlock(&pm_mutex);
-#endif
        }
+#endif
 
  Unlock:
-       xchg(&kexec_lock, 0);
+       if (!xchg(&kexec_lock, 0))
+               BUG();
 
        return error;
 }