From: Thomas Weißschuh Date: Thu, 10 Oct 2024 07:01:16 +0000 (+0200) Subject: x86/vdso: Allocate vvar page from C code X-Git-Tag: v6.13-rc1~172^2~15 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=7175126a6d45fea82cb25f4d35b35a0999fd6dae;p=thirdparty%2Fkernel%2Flinux.git x86/vdso: Allocate vvar page from C code Allocate the vvar page through the standard union vdso_data_store and remove the custom linker script logic. Signed-off-by: Thomas Weißschuh Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/all/20241010-vdso-generic-base-v1-14-b64f0842d512@linutronix.de --- diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 8437906fd4b35..5731dc35d1d2c 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -20,25 +20,19 @@ #include #include #include -#include #include #include #include #include #include -#undef _ASM_X86_VVAR_H -#define EMIT_VVAR(name, offset) \ - const size_t name ## _offset = offset; -#include - struct vdso_data *arch_get_vdso_data(void *vvar_page) { - return (struct vdso_data *)(vvar_page + _vdso_data_offset); + return (struct vdso_data *)vvar_page; } -#undef EMIT_VVAR -DEFINE_VVAR(struct vdso_data, _vdso_data); +static union vdso_data_store vdso_data_store __page_aligned_data; +struct vdso_data *vdso_data = vdso_data_store.data; unsigned int vclocks_used __read_mostly; @@ -153,7 +147,7 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, if (sym_offset == image->sym_vvar_page) { struct page *timens_page = find_timens_vvar_page(vma); - pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT; + pfn = __pa_symbol(vdso_data) >> PAGE_SHIFT; /* * If a task belongs to a time namespace then a namespace @@ -200,7 +194,7 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, if (!timens_page) return VM_FAULT_SIGBUS; - pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT; + pfn = __pa_symbol(vdso_data) >> PAGE_SHIFT; return vmf_insert_pfn(vma, vmf->address, pfn); } diff --git a/arch/x86/include/asm/vdso/vsyscall.h b/arch/x86/include/asm/vdso/vsyscall.h index ce8d5c81ebf44..aac7d2b7b9a8c 100644 --- a/arch/x86/include/asm/vdso/vsyscall.h +++ b/arch/x86/include/asm/vdso/vsyscall.h @@ -8,20 +8,22 @@ #include #include +extern struct vdso_data *vdso_data; + /* * Update the vDSO data page to keep in sync with kernel timekeeping. */ static __always_inline struct vdso_data *__x86_get_k_vdso_data(void) { - return _vdso_data; + return vdso_data; } #define __arch_get_k_vdso_data __x86_get_k_vdso_data static __always_inline struct vdso_rng_data *__x86_get_k_vdso_rng_data(void) { - return (void *)&__vvar_page + __VDSO_RND_DATA_OFFSET; + return (void *)vdso_data + __VDSO_RND_DATA_OFFSET; } #define __arch_get_k_vdso_rng_data __x86_get_k_vdso_rng_data diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 6726be89b7a66..e7e19842736a7 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -193,29 +193,6 @@ SECTIONS ORC_UNWIND_TABLE - . = ALIGN(PAGE_SIZE); - __vvar_page = .; - - .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) { - /* work around gold bug 13023 */ - __vvar_beginning_hack = .; - - /* Place all vvars at the offsets in asm/vvar.h. */ -#define EMIT_VVAR(name, offset) \ - . = __vvar_beginning_hack + offset; \ - *(.vvar_ ## name) -#include -#undef EMIT_VVAR - - /* - * Pad the rest of the page with zeros. Otherwise the loader - * can leave garbage here. - */ - . = __vvar_beginning_hack + PAGE_SIZE; - } :data - - . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); - /* Init code and data - will be freed after init */ . = ALIGN(PAGE_SIZE); .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index c101bed619400..6afe2e5e91028 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -89,7 +89,6 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = { "init_per_cpu__.*|" "__end_rodata_hpage_align|" #endif - "__vvar_page|" "_end)$" };