2 * Suspend support specific for i386.
4 * Distribute under GPLv2
6 * Copyright (c) 2002 Pavel Machek <pavel@suse.cz>
7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
10 #include <linux/smp.h>
11 #include <linux/suspend.h>
12 #include <asm/proto.h>
14 #include <asm/pgtable.h>
17 /* References to section boundaries */
18 extern const void __nosave_begin, __nosave_end;
20 struct saved_context saved_context;
23 * __save_processor_state - save CPU registers before creating a
24 * hibernation image and before restoring the memory state from it
25 * @ctxt - structure to store the registers contents in
27 * NOTE: If there is a CPU register the modification of which by the
28 * boot kernel (ie. the kernel used for loading the hibernation image)
29 * might affect the operations of the restored target kernel (ie. the one
30 * saved in the hibernation image), then its contents must be saved by this
31 * function. In other words, if kernel A is hibernated and different
32 * kernel B is used for loading the hibernation image into memory, the
33 * kernel A's __save_processor_state() function must save all registers
34 * needed by kernel A, so that it can operate correctly after the resume
35 * regardless of what kernel B does in the meantime.
37 void __save_processor_state(struct saved_context *ctxt)
44 store_gdt((struct desc_ptr *)&ctxt->gdt_limit);
45 store_idt((struct desc_ptr *)&ctxt->idt_limit);
48 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
52 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
53 asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
54 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
55 asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
56 asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
58 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
59 rdmsrl(MSR_GS_BASE, ctxt->gs_base);
60 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
61 mtrr_save_fixed_ranges(NULL);
66 rdmsrl(MSR_EFER, ctxt->efer);
67 ctxt->cr0 = read_cr0();
68 ctxt->cr2 = read_cr2();
69 ctxt->cr3 = read_cr3();
70 ctxt->cr4 = read_cr4();
71 ctxt->cr8 = read_cr8();
74 void save_processor_state(void)
76 __save_processor_state(&saved_context);
79 static void do_fpu_end(void)
82 * Restore FPU regs if necessary
88 * __restore_processor_state - restore the contents of CPU registers saved
89 * by __save_processor_state()
90 * @ctxt - structure to load the registers contents from
92 void __restore_processor_state(struct saved_context *ctxt)
97 wrmsrl(MSR_EFER, ctxt->efer);
100 write_cr3(ctxt->cr3);
101 write_cr2(ctxt->cr2);
102 write_cr0(ctxt->cr0);
105 * now restore the descriptor tables to their proper values
106 * ltr is done i fix_processor_context().
108 load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
109 load_idt((const struct desc_ptr *)&ctxt->idt_limit);
115 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
116 asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
117 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
118 load_gs_index(ctxt->gs);
119 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
121 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
122 wrmsrl(MSR_GS_BASE, ctxt->gs_base);
123 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
125 fix_processor_context();
131 void restore_processor_state(void)
133 __restore_processor_state(&saved_context);
136 void fix_processor_context(void)
138 int cpu = smp_processor_id();
139 struct tss_struct *t = &per_cpu(init_tss, cpu);
141 set_tss_desc(cpu,t); /* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
143 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
145 syscall_init(); /* This sets MSR_*STAR and related */
146 load_TR_desc(); /* This does ltr */
147 load_LDT(¤t->active_mm->context); /* This does lldt */
150 * Now maybe reload the debug registers
152 if (current->thread.debugreg7){
153 loaddebug(¤t->thread, 0);
154 loaddebug(¤t->thread, 1);
155 loaddebug(¤t->thread, 2);
156 loaddebug(¤t->thread, 3);
158 loaddebug(¤t->thread, 6);
159 loaddebug(¤t->thread, 7);
164 #ifdef CONFIG_HIBERNATION
165 /* Defined in arch/x86_64/kernel/suspend_asm.S */
166 extern int restore_image(void);
169 * Address to jump to in the last phase of restore in order to get to the image
170 * kernel's text (this value is passed in the image header).
172 unsigned long restore_jump_address;
175 * Value of the cr3 register from before the hibernation (this value is passed
176 * in the image header).
178 unsigned long restore_cr3;
180 pgd_t *temp_level4_pgt;
182 void *relocated_restore_code;
184 static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
188 i = pud_index(address);
190 for (; i < PTRS_PER_PUD; pud++, i++) {
194 paddr = address + i*PUD_SIZE;
198 pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
201 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
202 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
207 pe = __PAGE_KERNEL_LARGE_EXEC | paddr;
208 pe &= __supported_pte_mask;
209 set_pmd(pmd, __pmd(pe));
215 static int set_up_temporary_mappings(void)
217 unsigned long start, end, next;
220 temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
221 if (!temp_level4_pgt)
224 /* It is safe to reuse the original kernel mapping */
225 set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
226 init_level4_pgt[pgd_index(__START_KERNEL_map)]);
228 /* Set up the direct mapping from scratch */
229 start = (unsigned long)pfn_to_kaddr(0);
230 end = (unsigned long)pfn_to_kaddr(end_pfn);
232 for (; start < end; start = next) {
233 pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC);
236 next = start + PGDIR_SIZE;
239 if ((error = res_phys_pud_init(pud, __pa(start), __pa(next))))
241 set_pgd(temp_level4_pgt + pgd_index(start),
242 mk_kernel_pgd(__pa(pud)));
247 int swsusp_arch_resume(void)
251 /* We have got enough memory and from now on we cannot recover */
252 if ((error = set_up_temporary_mappings()))
255 relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC);
256 if (!relocated_restore_code)
258 memcpy(relocated_restore_code, &core_restore_code,
259 &restore_registers - &core_restore_code);
266 * pfn_is_nosave - check if given pfn is in the 'nosave' section
269 int pfn_is_nosave(unsigned long pfn)
271 unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
272 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
273 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
276 struct restore_data_record {
277 unsigned long jump_address;
282 #define RESTORE_MAGIC 0x0123456789ABCDEFUL
285 * arch_hibernation_header_save - populate the architecture specific part
286 * of a hibernation image header
287 * @addr: address to save the data at
289 int arch_hibernation_header_save(void *addr, unsigned int max_size)
291 struct restore_data_record *rdr = addr;
293 if (max_size < sizeof(struct restore_data_record))
295 rdr->jump_address = restore_jump_address;
296 rdr->cr3 = restore_cr3;
297 rdr->magic = RESTORE_MAGIC;
302 * arch_hibernation_header_restore - read the architecture specific data
303 * from the hibernation image header
304 * @addr: address to read the data from
306 int arch_hibernation_header_restore(void *addr)
308 struct restore_data_record *rdr = addr;
310 restore_jump_address = rdr->jump_address;
311 restore_cr3 = rdr->cr3;
312 return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
314 #endif /* CONFIG_HIBERNATION */