2 * (C) Copyright 2002 Linus Torvalds
3 * Portions based on the vdso-randomization code from exec-shield:
4 * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
6 * This file contains the needed initializations to support sysenter.
9 #include <linux/init.h>
10 #include <linux/smp.h>
11 #include <linux/thread_info.h>
12 #include <linux/sched.h>
13 #include <linux/gfp.h>
14 #include <linux/string.h>
15 #include <linux/elf.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
20 #include <asm/cpufeature.h>
22 #include <asm/pgtable.h>
23 #include <asm/unistd.h>
25 #include <asm/tlbflush.h>
27 #include <asm/proto.h>
35 #ifdef CONFIG_COMPAT_VDSO
36 #define VDSO_DEFAULT VDSO_COMPAT
38 #define VDSO_DEFAULT VDSO_ENABLED
42 #define vdso_enabled sysctl_vsyscall32
43 #define arch_setup_additional_pages syscall32_setup_pages
47 * This is the difference between the prelinked addresses in the vDSO images
48 * and the VDSO_HIGH_BASE address where CONFIG_COMPAT_VDSO places the vDSO
49 * in the user address space.
51 #define VDSO_ADDR_ADJUST (VDSO_HIGH_BASE - (unsigned long)VDSO32_PRELINK)
54 * Should the kernel map a VDSO page into processes and pass its
55 * address down to glibc upon exec()?
57 unsigned int __read_mostly vdso_enabled = VDSO_DEFAULT;
59 static int __init vdso_setup(char *s)
61 vdso_enabled = simple_strtoul(s, NULL, 0);
67 * For consistency, the argument vdso32=[012] affects the 32-bit vDSO
68 * behavior on both 64-bit and 32-bit kernels.
69 * On 32-bit kernels, vdso=[012] means the same thing.
71 __setup("vdso32=", vdso_setup);
74 __setup_param("vdso=", vdso32_setup, vdso_setup, 0);
76 EXPORT_SYMBOL_GPL(vdso_enabled);
79 static __init void reloc_symtab(Elf32_Ehdr *ehdr,
80 unsigned offset, unsigned size)
82 Elf32_Sym *sym = (void *)ehdr + offset;
83 unsigned nsym = size / sizeof(*sym);
86 for(i = 0; i < nsym; i++, sym++) {
87 if (sym->st_shndx == SHN_UNDEF ||
88 sym->st_shndx == SHN_ABS)
91 if (sym->st_shndx > SHN_LORESERVE) {
92 printk(KERN_INFO "VDSO: unexpected st_shndx %x\n",
97 switch(ELF_ST_TYPE(sym->st_info)) {
102 sym->st_value += VDSO_ADDR_ADJUST;
107 static __init void reloc_dyn(Elf32_Ehdr *ehdr, unsigned offset)
109 Elf32_Dyn *dyn = (void *)ehdr + offset;
111 for(; dyn->d_tag != DT_NULL; dyn++)
126 case DT_ADDRRNGLO ... DT_ADDRRNGHI:
127 /* definitely pointers needing relocation */
128 dyn->d_un.d_ptr += VDSO_ADDR_ADJUST;
131 case DT_ENCODING ... OLD_DT_LOOS-1:
132 case DT_LOOS ... DT_HIOS-1:
133 /* Tags above DT_ENCODING are pointers if
135 if (dyn->d_tag >= DT_ENCODING &&
136 (dyn->d_tag & 1) == 0)
137 dyn->d_un.d_ptr += VDSO_ADDR_ADJUST;
145 case DT_VALRNGLO ... DT_VALRNGHI:
146 /* definitely not pointers */
149 case OLD_DT_LOOS ... DT_LOOS-1:
150 case DT_HIOS ... DT_VALRNGLO-1:
152 if (dyn->d_tag > DT_ENCODING)
153 printk(KERN_INFO "VDSO: unexpected DT_tag %x\n",
159 static __init void relocate_vdso(Elf32_Ehdr *ehdr)
165 BUG_ON(memcmp(ehdr->e_ident, ELFMAG, 4) != 0 ||
166 !elf_check_arch_ia32(ehdr) ||
167 ehdr->e_type != ET_DYN);
169 ehdr->e_entry += VDSO_ADDR_ADJUST;
172 phdr = (void *)ehdr + ehdr->e_phoff;
173 for (i = 0; i < ehdr->e_phnum; i++) {
174 phdr[i].p_vaddr += VDSO_ADDR_ADJUST;
176 /* relocate dynamic stuff */
177 if (phdr[i].p_type == PT_DYNAMIC)
178 reloc_dyn(ehdr, phdr[i].p_offset);
181 /* rebase sections */
182 shdr = (void *)ehdr + ehdr->e_shoff;
183 for(i = 0; i < ehdr->e_shnum; i++) {
184 if (!(shdr[i].sh_flags & SHF_ALLOC))
187 shdr[i].sh_addr += VDSO_ADDR_ADJUST;
189 if (shdr[i].sh_type == SHT_SYMTAB ||
190 shdr[i].sh_type == SHT_DYNSYM)
191 reloc_symtab(ehdr, shdr[i].sh_offset,
197 * These symbols are defined by vdso32.S to mark the bounds
198 * of the ELF DSO images included therein.
200 extern const char vdso32_default_start, vdso32_default_end;
201 extern const char vdso32_sysenter_start, vdso32_sysenter_end;
202 static struct page *vdso32_pages[1];
206 static int use_sysenter __read_mostly = -1;
208 #define vdso32_sysenter() (use_sysenter > 0)
210 /* May not be __init: called during resume */
211 void syscall32_cpu_init(void)
213 if (use_sysenter < 0) {
214 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
216 if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR)
220 /* Load these always in case some future AMD CPU supports
221 SYSENTER from compat mode too. */
222 checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
223 checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
224 checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
226 wrmsrl(MSR_CSTAR, ia32_cstar_target);
229 #define compat_uses_vma 1
231 static inline void map_compat_vdso(int map)
235 #else /* CONFIG_X86_32 */
237 #define vdso32_sysenter() (boot_cpu_has(X86_FEATURE_SEP))
239 void enable_sep_cpu(void)
242 struct tss_struct *tss = &per_cpu(init_tss, cpu);
244 if (!boot_cpu_has(X86_FEATURE_SEP)) {
249 tss->x86_tss.ss1 = __KERNEL_CS;
250 tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss;
251 wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
252 wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0);
253 wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0);
257 static struct vm_area_struct gate_vma;
259 static int __init gate_vma_init(void)
261 gate_vma.vm_mm = NULL;
262 gate_vma.vm_start = FIXADDR_USER_START;
263 gate_vma.vm_end = FIXADDR_USER_END;
264 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
265 gate_vma.vm_page_prot = __P101;
267 * Make sure the vDSO gets into every core dump.
268 * Dumping its contents makes post-mortem fully interpretable later
269 * without matching up the same kernel and hardware config to see
270 * what PC values meant.
272 gate_vma.vm_flags |= VM_ALWAYSDUMP;
276 #define compat_uses_vma 0
278 static void map_compat_vdso(int map)
280 static int vdso_mapped;
282 if (map == vdso_mapped)
287 __set_fixmap(FIX_VDSO, page_to_pfn(vdso32_pages[0]) << PAGE_SHIFT,
288 map ? PAGE_READONLY_EXEC : PAGE_NONE);
290 /* flush stray tlbs */
294 #endif /* CONFIG_X86_64 */
296 int __init sysenter_setup(void)
298 void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
299 const void *vsyscall;
302 vdso32_pages[0] = virt_to_page(syscall_page);
307 printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
310 if (!vdso32_sysenter()) {
311 vsyscall = &vdso32_default_start;
312 vsyscall_len = &vdso32_default_end - &vdso32_default_start;
314 vsyscall = &vdso32_sysenter_start;
315 vsyscall_len = &vdso32_sysenter_end - &vdso32_sysenter_start;
318 memcpy(syscall_page, vsyscall, vsyscall_len);
319 relocate_vdso(syscall_page);
324 /* Setup a VMA at program startup for the vsyscall page */
325 int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
327 struct mm_struct *mm = current->mm;
332 if (vdso_enabled == VDSO_DISABLED)
335 down_write(&mm->mmap_sem);
337 /* Test compat mode once here, in case someone
338 changes it via sysctl */
339 compat = (vdso_enabled == VDSO_COMPAT);
341 map_compat_vdso(compat);
344 addr = VDSO_HIGH_BASE;
346 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
347 if (IS_ERR_VALUE(addr)) {
353 if (compat_uses_vma || !compat) {
355 * MAYWRITE to allow gdb to COW and set breakpoints
357 * Make sure the vDSO gets into every core dump.
358 * Dumping its contents makes post-mortem fully
359 * interpretable later without matching up the same
360 * kernel and hardware config to see what PC values
363 ret = install_special_mapping(mm, addr, PAGE_SIZE,
365 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
373 current->mm->context.vdso = (void *)addr;
374 current_thread_info()->sysenter_return =
375 VDSO32_SYMBOL(addr, SYSENTER_RETURN);
378 up_write(&mm->mmap_sem);
385 __initcall(sysenter_setup);
388 /* Register vsyscall32 into the ABI table */
389 #include <linux/sysctl.h>
391 static ctl_table abi_table2[] = {
393 .procname = "vsyscall32",
394 .data = &sysctl_vsyscall32,
395 .maxlen = sizeof(int),
397 .proc_handler = proc_dointvec
402 static ctl_table abi_root_table2[] = {
412 static __init int ia32_binfmt_init(void)
414 register_sysctl_table(abi_root_table2);
417 __initcall(ia32_binfmt_init);
420 #else /* CONFIG_X86_32 */
422 const char *arch_vma_name(struct vm_area_struct *vma)
424 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
429 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
431 struct mm_struct *mm = tsk->mm;
433 /* Check to see if this task was created in compat vdso mode */
434 if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
439 int in_gate_area(struct task_struct *task, unsigned long addr)
441 const struct vm_area_struct *vma = get_gate_vma(task);
443 return vma && addr >= vma->vm_start && addr < vma->vm_end;
446 int in_gate_area_no_task(unsigned long addr)
451 #endif /* CONFIG_X86_64 */