]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
x86/entry/vdso: Rename vdso_image_* to vdso*_image
authorH. Peter Anvin <hpa@zytor.com>
Tue, 16 Dec 2025 21:25:55 +0000 (13:25 -0800)
committerDave Hansen <dave.hansen@linux.intel.com>
Tue, 13 Jan 2026 23:33:20 +0000 (15:33 -0800)
The vdso .so files are named vdso*.so. These structures are binary
images and descriptions of these files, so it is more consistent for
them to have a naming that more directly mirrors the filenames.

It is also very slightly more compact (by one character...) and
simplifies the Makefile just a little bit.

Signed-off-by: H. Peter Anvin (Intel) <hpa@zytor.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://patch.msgid.link/20251216212606.1325678-2-hpa@zytor.com
arch/x86/entry/syscall_32.c
arch/x86/entry/vdso/.gitignore
arch/x86/entry/vdso/Makefile
arch/x86/entry/vdso/vma.c
arch/x86/include/asm/elf.h
arch/x86/include/asm/vdso.h
arch/x86/kernel/process_64.c
arch/x86/kernel/signal_32.c

index a67a644d0cfe04288ec910d1b7c85520cfbe1734..8e829575e12f9c27a716183b9ed6bfed3ab704b4 100644 (file)
@@ -319,7 +319,7 @@ __visible noinstr bool do_fast_syscall_32(struct pt_regs *regs)
         * convention.  Adjust regs so it looks like we entered using int80.
         */
        unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
-                                       vdso_image_32.sym_int80_landing_pad;
+                                       vdso32_image.sym_int80_landing_pad;
 
        /*
         * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
index 37a6129d597bb5d57f8b9932a0496f606ed52bc7..eb60859dbcbf189ff5ed8b0b6836ed8520b5c8f1 100644 (file)
@@ -1,8 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0-only
-vdso.lds
-vdsox32.lds
-vdso32-syscall-syms.lds
-vdso32-sysenter-syms.lds
-vdso32-int80-syms.lds
-vdso-image-*.c
-vdso2c
+*.lds
+*.so
+*.so.dbg
+vdso*-image.c
index f247f5f5cb44dad706701dd5344c9a8031deffc1..7f833026d5b2e191c6f6fccef2e438010260d5c1 100644 (file)
@@ -16,9 +16,9 @@ vobjs-$(CONFIG_X86_SGX)       += vsgx.o
 obj-y                                          += vma.o extable.o
 
 # vDSO images to build:
-obj-$(CONFIG_X86_64)                           += vdso-image-64.o
-obj-$(CONFIG_X86_X32_ABI)                      += vdso-image-x32.o
-obj-$(CONFIG_COMPAT_32)                                += vdso-image-32.o vdso32-setup.o
+obj-$(CONFIG_X86_64)                           += vdso64-image.o
+obj-$(CONFIG_X86_X32_ABI)                      += vdsox32-image.o
+obj-$(CONFIG_COMPAT_32)                                += vdso32-image.o vdso32-setup.o
 
 vobjs := $(addprefix $(obj)/, $(vobjs-y))
 vobjs32 := $(addprefix $(obj)/, $(vobjs32-y))
@@ -44,7 +44,7 @@ hostprogs += vdso2c
 quiet_cmd_vdso2c = VDSO2C  $@
       cmd_vdso2c = $(obj)/vdso2c $< $(<:%.dbg=%) $@
 
-$(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
+$(obj)/vdso%-image.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
        $(call if_changed,vdso2c)
 
 #
index afe105b2f907b1cb8f422c207b5f5294a1256b6c..8f98c2d7c7a98026fb46b1f9a6d7dd307a5f9780 100644 (file)
@@ -65,7 +65,7 @@ static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
 static void vdso_fix_landing(const struct vdso_image *image,
                struct vm_area_struct *new_vma)
 {
-       if (in_ia32_syscall() && image == &vdso_image_32) {
+       if (in_ia32_syscall() && image == &vdso32_image) {
                struct pt_regs *regs = current_pt_regs();
                unsigned long vdso_land = image->sym_int80_landing_pad;
                unsigned long old_land_addr = vdso_land +
@@ -230,7 +230,7 @@ static int load_vdso32(void)
        if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
                return 0;
 
-       return map_vdso(&vdso_image_32, 0);
+       return map_vdso(&vdso32_image, 0);
 }
 
 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
@@ -239,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
                if (!vdso64_enabled)
                        return 0;
 
-               return map_vdso(&vdso_image_64, 0);
+               return map_vdso(&vdso64_image, 0);
        }
 
        return load_vdso32();
@@ -252,7 +252,7 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
        if (IS_ENABLED(CONFIG_X86_X32_ABI) && x32) {
                if (!vdso64_enabled)
                        return 0;
-               return map_vdso(&vdso_image_x32, 0);
+               return map_vdso(&vdsox32_image, 0);
        }
 
        if (IS_ENABLED(CONFIG_IA32_EMULATION))
@@ -267,7 +267,7 @@ bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
        const struct vdso_image *image = current->mm->context.vdso_image;
        unsigned long vdso = (unsigned long) current->mm->context.vdso;
 
-       if (in_ia32_syscall() && image == &vdso_image_32) {
+       if (in_ia32_syscall() && image == &vdso32_image) {
                if (regs->ip == vdso + image->sym_vdso32_sigreturn_landing_pad ||
                    regs->ip == vdso + image->sym_vdso32_rt_sigreturn_landing_pad)
                        return true;
index 6c8fdc96be7e8bd33919d9b56ae8a8a68f4b0c3f..2ba5f166e58fee4c2cd194daaddfe2712b845641 100644 (file)
@@ -361,7 +361,7 @@ else if (IS_ENABLED(CONFIG_IA32_EMULATION))                         \
 
 #define VDSO_ENTRY                                                     \
        ((unsigned long)current->mm->context.vdso +                     \
-        vdso_image_32.sym___kernel_vsyscall)
+        vdso32_image.sym___kernel_vsyscall)
 
 struct linux_binprm;
 
index b7253ef3205a6d146f505f4edb36b452cc04518b..e8afbe9faa5b9c88da824d2fd8e45aff33e6b173 100644 (file)
@@ -27,9 +27,9 @@ struct vdso_image {
        long sym_vdso32_rt_sigreturn_landing_pad;
 };
 
-extern const struct vdso_image vdso_image_64;
-extern const struct vdso_image vdso_image_x32;
-extern const struct vdso_image vdso_image_32;
+extern const struct vdso_image vdso64_image;
+extern const struct vdso_image vdsox32_image;
+extern const struct vdso_image vdso32_image;
 
 extern int __init init_vdso_image(const struct vdso_image *image);
 
index 432c0a004c60b0fd4340e69978bd73c9cb83e77b..08e72f42987014cb5cec3d65eb4208caa9621df4 100644 (file)
@@ -941,14 +941,14 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
 #ifdef CONFIG_CHECKPOINT_RESTORE
 # ifdef CONFIG_X86_X32_ABI
        case ARCH_MAP_VDSO_X32:
-               return prctl_map_vdso(&vdso_image_x32, arg2);
+               return prctl_map_vdso(&vdsox32_image, arg2);
 # endif
 # ifdef CONFIG_IA32_EMULATION
        case ARCH_MAP_VDSO_32:
-               return prctl_map_vdso(&vdso_image_32, arg2);
+               return prctl_map_vdso(&vdso32_image, arg2);
 # endif
        case ARCH_MAP_VDSO_64:
-               return prctl_map_vdso(&vdso_image_64, arg2);
+               return prctl_map_vdso(&vdso64_image, arg2);
 #endif
 #ifdef CONFIG_ADDRESS_MASKING
        case ARCH_GET_UNTAG_MASK:
index 42bbc42bd3503cbd733079c80548f8f3e65b001a..e55cf19e68fe088a4ffe719998916bea3ce341fe 100644 (file)
@@ -282,7 +282,7 @@ int ia32_setup_frame(struct ksignal *ksig, struct pt_regs *regs)
                /* Return stub is in 32bit vsyscall page */
                if (current->mm->context.vdso)
                        restorer = current->mm->context.vdso +
-                               vdso_image_32.sym___kernel_sigreturn;
+                               vdso32_image.sym___kernel_sigreturn;
                else
                        restorer = &frame->retcode;
        }
@@ -368,7 +368,7 @@ int ia32_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
                restorer = ksig->ka.sa.sa_restorer;
        else
                restorer = current->mm->context.vdso +
-                       vdso_image_32.sym___kernel_rt_sigreturn;
+                       vdso32_image.sym___kernel_rt_sigreturn;
        unsafe_put_user(ptr_to_compat(restorer), &frame->pretcode, Efault);
 
        /*