]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - arch/x86/vdso/vma.c
x86, vdso: Move syscall and sysenter setup into kernel/cpu/common.c
[thirdparty/kernel/linux.git] / arch / x86 / vdso / vma.c
CommitLineData
2aae950b
AK
1/*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6#include <linux/mm.h>
4e950f6f 7#include <linux/err.h>
2aae950b 8#include <linux/sched.h>
5a0e3ad6 9#include <linux/slab.h>
2aae950b
AK
10#include <linux/init.h>
11#include <linux/random.h>
3fa89ca7 12#include <linux/elf.h>
2aae950b
AK
13#include <asm/vsyscall.h>
14#include <asm/vgtod.h>
15#include <asm/proto.h>
7f3646aa 16#include <asm/vdso.h>
aafade24 17#include <asm/page.h>
2aae950b 18
b4b541a6 19#if defined(CONFIG_X86_64)
3d7ee969 20unsigned int __read_mostly vdso64_enabled = 1;
7f3646aa 21
b67e612c 22DECLARE_VDSO_IMAGE(vdso);
2aae950b 23extern unsigned short vdso_sync_cpuid;
369c9920 24static unsigned vdso_size;
2aae950b 25
1a21d4e0 26#ifdef CONFIG_X86_X32_ABI
b67e612c 27DECLARE_VDSO_IMAGE(vdsox32);
1a21d4e0 28static unsigned vdsox32_size;
b4b541a6
AL
29#endif
30#endif
1a21d4e0 31
b4b541a6
AL
32#if defined(CONFIG_X86_32) || defined(CONFIG_X86_X32_ABI) || \
33 defined(CONFIG_COMPAT)
34void __init patch_vdso32(void *vdso, size_t len)
1a21d4e0
L
35{
36 Elf32_Ehdr *hdr = vdso;
37 Elf32_Shdr *sechdrs, *alt_sec = 0;
38 char *secstrings;
39 void *alt_data;
40 int i;
41
42 BUG_ON(len < sizeof(Elf32_Ehdr));
43 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
44
45 sechdrs = (void *)hdr + hdr->e_shoff;
46 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
47
48 for (i = 1; i < hdr->e_shnum; i++) {
49 Elf32_Shdr *shdr = &sechdrs[i];
50 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
51 alt_sec = shdr;
52 goto found;
53 }
54 }
55
56 /* If we get here, it's probably a bug. */
b4b541a6 57 pr_warning("patch_vdso32: .altinstructions not found\n");
1a21d4e0
L
58 return; /* nothing to patch */
59
60found:
61 alt_data = (void *)hdr + alt_sec->sh_offset;
62 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
63}
64#endif
65
b4b541a6 66#if defined(CONFIG_X86_64)
1a21d4e0 67static void __init patch_vdso64(void *vdso, size_t len)
1b3f2a72
AL
68{
69 Elf64_Ehdr *hdr = vdso;
70 Elf64_Shdr *sechdrs, *alt_sec = 0;
71 char *secstrings;
72 void *alt_data;
73 int i;
74
75 BUG_ON(len < sizeof(Elf64_Ehdr));
76 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
77
78 sechdrs = (void *)hdr + hdr->e_shoff;
79 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
80
81 for (i = 1; i < hdr->e_shnum; i++) {
82 Elf64_Shdr *shdr = &sechdrs[i];
83 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
84 alt_sec = shdr;
85 goto found;
86 }
87 }
88
89 /* If we get here, it's probably a bug. */
1a21d4e0 90 pr_warning("patch_vdso64: .altinstructions not found\n");
1b3f2a72
AL
91 return; /* nothing to patch */
92
93found:
94 alt_data = (void *)hdr + alt_sec->sh_offset;
95 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
96}
97
aafade24 98static int __init init_vdso(void)
2aae950b
AK
99{
100 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
101 int i;
2aae950b 102
1a21d4e0 103 patch_vdso64(vdso_start, vdso_end - vdso_start);
1b3f2a72 104
369c9920 105 vdso_size = npages << PAGE_SHIFT;
aafade24
AL
106 for (i = 0; i < npages; i++)
107 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
2aae950b 108
1a21d4e0 109#ifdef CONFIG_X86_X32_ABI
b4b541a6 110 patch_vdso32(vdsox32_start, vdsox32_end - vdsox32_start);
1a21d4e0
L
111 npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE;
112 vdsox32_size = npages << PAGE_SHIFT;
113 for (i = 0; i < npages; i++)
114 vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE);
115#endif
116
2aae950b 117 return 0;
2aae950b 118}
aafade24 119subsys_initcall(init_vdso);
2aae950b
AK
120
121struct linux_binprm;
122
123/* Put the vdso above the (randomized) stack with another randomized offset.
124 This way there is no hole in the middle of address space.
125 To save memory make sure it is still in the same PTE as the stack top.
126 This doesn't give that many random bits */
127static unsigned long vdso_addr(unsigned long start, unsigned len)
128{
129 unsigned long addr, end;
130 unsigned offset;
131 end = (start + PMD_SIZE - 1) & PMD_MASK;
d9517346
IM
132 if (end >= TASK_SIZE_MAX)
133 end = TASK_SIZE_MAX;
2aae950b
AK
134 end -= len;
135 /* This loses some more bits than a modulo, but is cheaper */
136 offset = get_random_int() & (PTRS_PER_PTE - 1);
137 addr = start + (offset << PAGE_SHIFT);
138 if (addr >= end)
139 addr = end;
dfb09f9b
BP
140
141 /*
142 * page-align it here so that get_unmapped_area doesn't
143 * align it wrongfully again to the next page. addr can come in 4K
144 * unaligned here as a result of stack start randomization.
145 */
146 addr = PAGE_ALIGN(addr);
f9902472 147 addr = align_vdso_addr(addr);
dfb09f9b 148
2aae950b
AK
149 return addr;
150}
151
152/* Setup a VMA at program startup for the vsyscall page.
153 Not called for compat tasks */
1a21d4e0
L
154static int setup_additional_pages(struct linux_binprm *bprm,
155 int uses_interp,
156 struct page **pages,
157 unsigned size)
2aae950b
AK
158{
159 struct mm_struct *mm = current->mm;
160 unsigned long addr;
161 int ret;
2aae950b 162
3d7ee969 163 if (!vdso64_enabled)
2aae950b
AK
164 return 0;
165
166 down_write(&mm->mmap_sem);
1a21d4e0
L
167 addr = vdso_addr(mm->start_stack, size);
168 addr = get_unmapped_area(NULL, addr, size, 0, 0);
2aae950b
AK
169 if (IS_ERR_VALUE(addr)) {
170 ret = addr;
171 goto up_fail;
172 }
173
f7b6eb3f
PZ
174 current->mm->context.vdso = (void *)addr;
175
1a21d4e0 176 ret = install_special_mapping(mm, addr, size,
2aae950b 177 VM_READ|VM_EXEC|
909af768 178 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
1a21d4e0 179 pages);
f7b6eb3f
PZ
180 if (ret) {
181 current->mm->context.vdso = NULL;
2aae950b 182 goto up_fail;
f7b6eb3f 183 }
2aae950b 184
2aae950b
AK
185up_fail:
186 up_write(&mm->mmap_sem);
187 return ret;
188}
189
1a21d4e0
L
190int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
191{
22e842d4
PA
192 return setup_additional_pages(bprm, uses_interp, vdso_pages,
193 vdso_size);
1a21d4e0
L
194}
195
196#ifdef CONFIG_X86_X32_ABI
197int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
198{
22e842d4
PA
199 return setup_additional_pages(bprm, uses_interp, vdsox32_pages,
200 vdsox32_size);
1a21d4e0
L
201}
202#endif
203
2aae950b
AK
204static __init int vdso_setup(char *s)
205{
3d7ee969 206 vdso64_enabled = simple_strtoul(s, NULL, 0);
2aae950b
AK
207 return 0;
208}
209__setup("vdso=", vdso_setup);
b4b541a6 210#endif