]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - arch/x86/vdso/vma.c
x86-32, vdso: Fix vDSO build error due to missing align_vdso_addr()
[thirdparty/kernel/linux.git] / arch / x86 / vdso / vma.c
CommitLineData
2aae950b
AK
1/*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6#include <linux/mm.h>
4e950f6f 7#include <linux/err.h>
2aae950b 8#include <linux/sched.h>
5a0e3ad6 9#include <linux/slab.h>
2aae950b
AK
10#include <linux/init.h>
11#include <linux/random.h>
3fa89ca7 12#include <linux/elf.h>
2aae950b
AK
13#include <asm/vsyscall.h>
14#include <asm/vgtod.h>
15#include <asm/proto.h>
7f3646aa 16#include <asm/vdso.h>
aafade24 17#include <asm/page.h>
18d0a6fd 18#include <asm/hpet.h>
2aae950b 19
b4b541a6 20#if defined(CONFIG_X86_64)
3d7ee969 21unsigned int __read_mostly vdso64_enabled = 1;
7f3646aa 22
2aae950b 23extern unsigned short vdso_sync_cpuid;
b4b541a6 24#endif
1a21d4e0 25
6f121e54 26void __init init_vdso_image(const struct vdso_image *image)
1a21d4e0 27{
1a21d4e0 28 int i;
6f121e54 29 int npages = (image->size) / PAGE_SIZE;
1a21d4e0 30
6f121e54
AL
31 BUG_ON(image->size % PAGE_SIZE != 0);
32 for (i = 0; i < npages; i++)
a62c34bd
AL
33 image->text_mapping.pages[i] =
34 virt_to_page(image->data + i*PAGE_SIZE);
1a21d4e0 35
6f121e54
AL
36 apply_alternatives((struct alt_instr *)(image->data + image->alt),
37 (struct alt_instr *)(image->data + image->alt +
38 image->alt_len));
1a21d4e0 39}
1b3f2a72 40
6f121e54 41#if defined(CONFIG_X86_64)
aafade24 42static int __init init_vdso(void)
2aae950b 43{
6f121e54 44 init_vdso_image(&vdso_image_64);
2aae950b 45
1a21d4e0 46#ifdef CONFIG_X86_X32_ABI
6f121e54 47 init_vdso_image(&vdso_image_x32);
1a21d4e0
L
48#endif
49
2aae950b 50 return 0;
2aae950b 51}
aafade24 52subsys_initcall(init_vdso);
18d0a6fd 53#endif
2aae950b
AK
54
55struct linux_binprm;
56
57/* Put the vdso above the (randomized) stack with another randomized offset.
58 This way there is no hole in the middle of address space.
59 To save memory make sure it is still in the same PTE as the stack top.
18d0a6fd
AL
60 This doesn't give that many random bits.
61
62 Only used for the 64-bit and x32 vdsos. */
2aae950b
AK
63static unsigned long vdso_addr(unsigned long start, unsigned len)
64{
d093601b
JB
65#ifdef CONFIG_X86_32
66 return 0;
67#else
2aae950b
AK
68 unsigned long addr, end;
69 unsigned offset;
70 end = (start + PMD_SIZE - 1) & PMD_MASK;
d9517346
IM
71 if (end >= TASK_SIZE_MAX)
72 end = TASK_SIZE_MAX;
2aae950b
AK
73 end -= len;
74 /* This loses some more bits than a modulo, but is cheaper */
75 offset = get_random_int() & (PTRS_PER_PTE - 1);
76 addr = start + (offset << PAGE_SHIFT);
77 if (addr >= end)
78 addr = end;
dfb09f9b
BP
79
80 /*
81 * page-align it here so that get_unmapped_area doesn't
82 * align it wrongfully again to the next page. addr can come in 4K
83 * unaligned here as a result of stack start randomization.
84 */
85 addr = PAGE_ALIGN(addr);
f9902472 86 addr = align_vdso_addr(addr);
dfb09f9b 87
2aae950b 88 return addr;
d093601b 89#endif
2aae950b
AK
90}
91
18d0a6fd 92static int map_vdso(const struct vdso_image *image, bool calculate_addr)
2aae950b
AK
93{
94 struct mm_struct *mm = current->mm;
18d0a6fd 95 struct vm_area_struct *vma;
2aae950b 96 unsigned long addr;
18d0a6fd 97 int ret = 0;
1e844fb4 98 static struct page *no_pages[] = {NULL};
a62c34bd
AL
99 static struct vm_special_mapping vvar_mapping = {
100 .name = "[vvar]",
101 .pages = no_pages,
102 };
2aae950b 103
18d0a6fd
AL
104 if (calculate_addr) {
105 addr = vdso_addr(current->mm->start_stack,
106 image->sym_end_mapping);
107 } else {
108 addr = 0;
109 }
2aae950b
AK
110
111 down_write(&mm->mmap_sem);
18d0a6fd
AL
112
113 addr = get_unmapped_area(NULL, addr, image->sym_end_mapping, 0, 0);
2aae950b
AK
114 if (IS_ERR_VALUE(addr)) {
115 ret = addr;
116 goto up_fail;
117 }
118
6f121e54 119 current->mm->context.vdso = (void __user *)addr;
f7b6eb3f 120
18d0a6fd
AL
121 /*
122 * MAYWRITE to allow gdb to COW and set breakpoints
123 */
a62c34bd
AL
124 vma = _install_special_mapping(mm,
125 addr,
126 image->size,
127 VM_READ|VM_EXEC|
128 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
129 &image->text_mapping);
18d0a6fd 130
a62c34bd
AL
131 if (IS_ERR(vma)) {
132 ret = PTR_ERR(vma);
18d0a6fd 133 goto up_fail;
a62c34bd 134 }
18d0a6fd
AL
135
136 vma = _install_special_mapping(mm,
137 addr + image->size,
138 image->sym_end_mapping - image->size,
139 VM_READ,
a62c34bd 140 &vvar_mapping);
18d0a6fd
AL
141
142 if (IS_ERR(vma)) {
143 ret = PTR_ERR(vma);
2aae950b 144 goto up_fail;
f7b6eb3f 145 }
2aae950b 146
18d0a6fd
AL
147 if (image->sym_vvar_page)
148 ret = remap_pfn_range(vma,
149 addr + image->sym_vvar_page,
150 __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
151 PAGE_SIZE,
152 PAGE_READONLY);
153
154 if (ret)
155 goto up_fail;
156
157#ifdef CONFIG_HPET_TIMER
158 if (hpet_address && image->sym_hpet_page) {
159 ret = io_remap_pfn_range(vma,
160 addr + image->sym_hpet_page,
161 hpet_address >> PAGE_SHIFT,
162 PAGE_SIZE,
163 pgprot_noncached(PAGE_READONLY));
164
165 if (ret)
166 goto up_fail;
167 }
168#endif
169
2aae950b 170up_fail:
18d0a6fd
AL
171 if (ret)
172 current->mm->context.vdso = NULL;
173
2aae950b
AK
174 up_write(&mm->mmap_sem);
175 return ret;
176}
177
18d0a6fd
AL
178#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
179static int load_vdso32(void)
180{
181 int ret;
182
183 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
184 return 0;
185
186 ret = map_vdso(selected_vdso32, false);
187 if (ret)
188 return ret;
189
190 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
191 current_thread_info()->sysenter_return =
192 current->mm->context.vdso +
193 selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
194
195 return 0;
196}
197#endif
198
199#ifdef CONFIG_X86_64
1a21d4e0
L
200int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
201{
18d0a6fd
AL
202 if (!vdso64_enabled)
203 return 0;
204
205 return map_vdso(&vdso_image_64, true);
1a21d4e0
L
206}
207
18d0a6fd
AL
208#ifdef CONFIG_COMPAT
209int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
210 int uses_interp)
211{
1a21d4e0 212#ifdef CONFIG_X86_X32_ABI
18d0a6fd
AL
213 if (test_thread_flag(TIF_X32)) {
214 if (!vdso64_enabled)
215 return 0;
216
217 return map_vdso(&vdso_image_x32, true);
218 }
219#endif
220
221 return load_vdso32();
222}
223#endif
224#else
225int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
1a21d4e0 226{
18d0a6fd 227 return load_vdso32();
1a21d4e0
L
228}
229#endif
230
18d0a6fd 231#ifdef CONFIG_X86_64
2aae950b
AK
232static __init int vdso_setup(char *s)
233{
3d7ee969 234 vdso64_enabled = simple_strtoul(s, NULL, 0);
2aae950b
AK
235 return 0;
236}
237__setup("vdso=", vdso_setup);
b4b541a6 238#endif