From 304221775cd321886d0ca86ac167f7af6913a1ec Mon Sep 17 00:00:00 2001 From: Michael Jeanson Date: Thu, 1 Aug 2024 11:10:49 -0400 Subject: [PATCH] Add Linux 'extra TLS' Add the Linux implementation of 'extra TLS' which will allocate space for the rseq area at the end of the TLS blocks in allocation order. Signed-off-by: Michael Jeanson Reviewed-by: Mathieu Desnoyers Reviewed-by: Florian Weimer --- sysdeps/unix/sysv/linux/dl-extra_tls.h | 71 ++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 sysdeps/unix/sysv/linux/dl-extra_tls.h diff --git a/sysdeps/unix/sysv/linux/dl-extra_tls.h b/sysdeps/unix/sysv/linux/dl-extra_tls.h new file mode 100644 index 0000000000..d48a5e935d --- /dev/null +++ b/sysdeps/unix/sysv/linux/dl-extra_tls.h @@ -0,0 +1,71 @@ +/* extra tls block utils for the dynamic linker. Linux version. + Copyright (C) 2025 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#ifndef _DL_EXTRA_TLS_H +#define _DL_EXTRA_TLS_H 1 + +#include +#include +#include +#include + +/* Returns the size of the extra TLS block, it must always be a multiple of the + alignment. */ +static inline size_t +_dl_extra_tls_get_size (void) +{ + bool do_rseq = TUNABLE_GET_FULL (glibc, pthread, rseq, int, NULL); + if (do_rseq) + { + /* Make sure the rseq area size is at least the minimum ABI size and a + multiple of the requested aligment. */ + return roundup (MAX (_rseq_size, RSEQ_AREA_SIZE_INITIAL), _rseq_align); + } + + /* Even when disabled by tunable, an rseq area will be allocated to allow + application code to test the registration status with 'rseq->cpu_id >= 0'. + Default to the rseq ABI minimum size, this will ensure we don't use more + TLS than necessary. */ + return RSEQ_AREA_SIZE_INITIAL; +} + +/* Returns the alignment requirements of the extra TLS block. */ +static inline size_t +_dl_extra_tls_get_align (void) +{ + bool do_rseq = TUNABLE_GET_FULL (glibc, pthread, rseq, int, NULL); + if (do_rseq) + { + return _rseq_align; + } + + /* Even when disabled by tunable, an rseq area will be allocated to allow + application code to test the registration status with 'rseq->cpu_id >= 0'. + Default to the rseq ABI minimum alignment, this will ensure we don't use + more TLS than necessary. */ + return RSEQ_MIN_ALIGN; +} + +/* Record the offset of the extra TLS block from the thread pointer. */ +static inline void +_dl_extra_tls_set_offset (ptrdiff_t tls_offset) +{ + _rseq_offset = tls_offset; +} + +#endif -- 2.47.2