1 // SPDX-License-Identifier: GPL-2.0-only
3 * VDSO implementations.
5 * Copyright (C) 2012 ARM Limited
7 * Author: Will Deacon <will.deacon@arm.com>
10 #include <linux/cache.h>
11 #include <linux/clocksource.h>
12 #include <linux/elf.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/gfp.h>
16 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/signal.h>
20 #include <linux/slab.h>
21 #include <linux/timekeeper_internal.h>
22 #include <linux/vmalloc.h>
24 #include <asm/cacheflush.h>
25 #include <asm/signal32.h>
27 #include <asm/vdso_datapage.h>
29 extern char vdso_start
[], vdso_end
[];
30 static unsigned long vdso_pages __ro_after_init
;
36 struct vdso_data data
;
38 } vdso_data_store __page_aligned_data
;
39 struct vdso_data
*vdso_data
= &vdso_data_store
.data
;
43 * Create and map the vectors page for AArch32 tasks.
47 #define C_PAGES (C_SIGPAGE + 1)
48 static struct page
*aarch32_vdso_pages
[C_PAGES
] __ro_after_init
;
49 static const struct vm_special_mapping aarch32_vdso_spec
[C_PAGES
] = {
51 .name
= "[vectors]", /* ABI */
52 .pages
= &aarch32_vdso_pages
[C_VECTORS
],
55 .name
= "[sigpage]", /* ABI */
56 .pages
= &aarch32_vdso_pages
[C_SIGPAGE
],
60 static int aarch32_alloc_kuser_vdso_page(void)
62 extern char __kuser_helper_start
[], __kuser_helper_end
[];
63 int kuser_sz
= __kuser_helper_end
- __kuser_helper_start
;
64 unsigned long vdso_page
;
66 if (!IS_ENABLED(CONFIG_KUSER_HELPERS
))
69 vdso_page
= get_zeroed_page(GFP_ATOMIC
);
73 memcpy((void *)(vdso_page
+ 0x1000 - kuser_sz
), __kuser_helper_start
,
75 aarch32_vdso_pages
[C_VECTORS
] = virt_to_page(vdso_page
);
76 flush_dcache_page(aarch32_vdso_pages
[C_VECTORS
]);
80 static int __init
aarch32_alloc_vdso_pages(void)
82 extern char __aarch32_sigret_code_start
[], __aarch32_sigret_code_end
[];
83 int sigret_sz
= __aarch32_sigret_code_end
- __aarch32_sigret_code_start
;
84 unsigned long sigpage
;
87 sigpage
= get_zeroed_page(GFP_ATOMIC
);
91 memcpy((void *)sigpage
, __aarch32_sigret_code_start
, sigret_sz
);
92 aarch32_vdso_pages
[C_SIGPAGE
] = virt_to_page(sigpage
);
93 flush_dcache_page(aarch32_vdso_pages
[C_SIGPAGE
]);
95 ret
= aarch32_alloc_kuser_vdso_page();
101 arch_initcall(aarch32_alloc_vdso_pages
);
103 static int aarch32_kuser_helpers_setup(struct mm_struct
*mm
)
107 if (!IS_ENABLED(CONFIG_KUSER_HELPERS
))
111 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
112 * not safe to CoW the page containing the CPU exception vectors.
114 ret
= _install_special_mapping(mm
, AARCH32_VECTORS_BASE
, PAGE_SIZE
,
116 VM_MAYREAD
| VM_MAYEXEC
,
117 &aarch32_vdso_spec
[C_VECTORS
]);
119 return PTR_ERR_OR_ZERO(ret
);
122 static int aarch32_sigreturn_setup(struct mm_struct
*mm
)
127 addr
= get_unmapped_area(NULL
, 0, PAGE_SIZE
, 0, 0);
128 if (IS_ERR_VALUE(addr
)) {
134 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
137 ret
= _install_special_mapping(mm
, addr
, PAGE_SIZE
,
138 VM_READ
| VM_EXEC
| VM_MAYREAD
|
139 VM_MAYWRITE
| VM_MAYEXEC
,
140 &aarch32_vdso_spec
[C_SIGPAGE
]);
144 mm
->context
.vdso
= (void *)addr
;
147 return PTR_ERR_OR_ZERO(ret
);
150 int aarch32_setup_additional_pages(struct linux_binprm
*bprm
, int uses_interp
)
152 struct mm_struct
*mm
= current
->mm
;
155 if (down_write_killable(&mm
->mmap_sem
))
158 ret
= aarch32_kuser_helpers_setup(mm
);
162 ret
= aarch32_sigreturn_setup(mm
);
165 up_write(&mm
->mmap_sem
);
168 #endif /* CONFIG_COMPAT */
170 static int vdso_mremap(const struct vm_special_mapping
*sm
,
171 struct vm_area_struct
*new_vma
)
173 unsigned long new_size
= new_vma
->vm_end
- new_vma
->vm_start
;
174 unsigned long vdso_size
= vdso_end
- vdso_start
;
176 if (vdso_size
!= new_size
)
179 current
->mm
->context
.vdso
= (void *)new_vma
->vm_start
;
184 static struct vm_special_mapping vdso_spec
[2] __ro_after_init
= {
190 .mremap
= vdso_mremap
,
194 static int __init
vdso_init(void)
197 struct page
**vdso_pagelist
;
200 if (memcmp(vdso_start
, "\177ELF", 4)) {
201 pr_err("vDSO is not a valid ELF object!\n");
205 vdso_pages
= (vdso_end
- vdso_start
) >> PAGE_SHIFT
;
207 /* Allocate the vDSO pagelist, plus a page for the data. */
208 vdso_pagelist
= kcalloc(vdso_pages
+ 1, sizeof(struct page
*),
210 if (vdso_pagelist
== NULL
)
213 /* Grab the vDSO data page. */
214 vdso_pagelist
[0] = phys_to_page(__pa_symbol(vdso_data
));
217 /* Grab the vDSO code pages. */
218 pfn
= sym_to_pfn(vdso_start
);
220 for (i
= 0; i
< vdso_pages
; i
++)
221 vdso_pagelist
[i
+ 1] = pfn_to_page(pfn
+ i
);
223 vdso_spec
[0].pages
= &vdso_pagelist
[0];
224 vdso_spec
[1].pages
= &vdso_pagelist
[1];
228 arch_initcall(vdso_init
);
230 int arch_setup_additional_pages(struct linux_binprm
*bprm
,
233 struct mm_struct
*mm
= current
->mm
;
234 unsigned long vdso_base
, vdso_text_len
, vdso_mapping_len
;
237 vdso_text_len
= vdso_pages
<< PAGE_SHIFT
;
238 /* Be sure to map the data page */
239 vdso_mapping_len
= vdso_text_len
+ PAGE_SIZE
;
241 if (down_write_killable(&mm
->mmap_sem
))
243 vdso_base
= get_unmapped_area(NULL
, 0, vdso_mapping_len
, 0, 0);
244 if (IS_ERR_VALUE(vdso_base
)) {
245 ret
= ERR_PTR(vdso_base
);
248 ret
= _install_special_mapping(mm
, vdso_base
, PAGE_SIZE
,
254 vdso_base
+= PAGE_SIZE
;
255 mm
->context
.vdso
= (void *)vdso_base
;
256 ret
= _install_special_mapping(mm
, vdso_base
, vdso_text_len
,
258 VM_MAYREAD
|VM_MAYWRITE
|VM_MAYEXEC
,
264 up_write(&mm
->mmap_sem
);
268 mm
->context
.vdso
= NULL
;
269 up_write(&mm
->mmap_sem
);
274 * Update the vDSO data page to keep in sync with kernel timekeeping.
276 void update_vsyscall(struct timekeeper
*tk
)
278 u32 use_syscall
= !tk
->tkr_mono
.clock
->archdata
.vdso_direct
;
280 ++vdso_data
->tb_seq_count
;
283 vdso_data
->use_syscall
= use_syscall
;
284 vdso_data
->xtime_coarse_sec
= tk
->xtime_sec
;
285 vdso_data
->xtime_coarse_nsec
= tk
->tkr_mono
.xtime_nsec
>>
287 vdso_data
->wtm_clock_sec
= tk
->wall_to_monotonic
.tv_sec
;
288 vdso_data
->wtm_clock_nsec
= tk
->wall_to_monotonic
.tv_nsec
;
290 /* Read without the seqlock held by clock_getres() */
291 WRITE_ONCE(vdso_data
->hrtimer_res
, hrtimer_resolution
);
294 /* tkr_mono.cycle_last == tkr_raw.cycle_last */
295 vdso_data
->cs_cycle_last
= tk
->tkr_mono
.cycle_last
;
296 vdso_data
->raw_time_sec
= tk
->raw_sec
;
297 vdso_data
->raw_time_nsec
= tk
->tkr_raw
.xtime_nsec
;
298 vdso_data
->xtime_clock_sec
= tk
->xtime_sec
;
299 vdso_data
->xtime_clock_nsec
= tk
->tkr_mono
.xtime_nsec
;
300 vdso_data
->cs_mono_mult
= tk
->tkr_mono
.mult
;
301 vdso_data
->cs_raw_mult
= tk
->tkr_raw
.mult
;
302 /* tkr_mono.shift == tkr_raw.shift */
303 vdso_data
->cs_shift
= tk
->tkr_mono
.shift
;
307 ++vdso_data
->tb_seq_count
;
310 void update_vsyscall_tz(void)
312 vdso_data
->tz_minuteswest
= sys_tz
.tz_minuteswest
;
313 vdso_data
->tz_dsttime
= sys_tz
.tz_dsttime
;