From: Jakub Jelinek Date: Tue, 26 Apr 2022 06:57:17 +0000 (+0200) Subject: libgomp: Fix up two non-GOMP_USE_ALIGNED_WORK_SHARES related issues [PR105358] X-Git-Tag: basepoints/gcc-13~54 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=57a957cb71c004de80b0fd30c8db3cc67576e0ce;p=thirdparty%2Fgcc.git libgomp: Fix up two non-GOMP_USE_ALIGNED_WORK_SHARES related issues [PR105358] Last fall I've changed struct gomp_work_share, so that it doesn't have __attribute__((aligned (64))) lock member in the middle unless the target has non-emulated aligned allocator, otherwise it just makes sure the first and second halves are 64 bytes appart for cache line reasons, but doesn't make the struct 64-byte aligned itself and so we can use normal allocators for it. When the struct isn't 64-byte aligned, the amount of tail padding significantly decreases, to 0 or 4 bytes or so. The library uses that tail padding when the ordered_teams_ids array (array of uints) and/or the memory for lastprivate conditional temporaries (the latter wants to guarantee long long alignment). The problem with it on ia32 darwin9 is that while the struct contains long long members, long long is just 4 byte aligned while __alignof__(long long) is 8. That causes problems in gomp_init_work_share, where we currently rely on if offsetof (struct gomp_work_share, inline_ordered_team_ids) is long long aligned, then that tail array will be aligned at runtime and so no extra memory for dynamic realignment will be needed (that is false when the whole struct doesn't have long long alignment). And also in the remaining hunks causes another problem, where we compute INLINE_ORDERED_TEAM_IDS_OFF as the above offsetof aligned up to long long boundary and subtract sizeof (struct gomp_work_share) and INLINE_ORDERED_TEAM_IDS_OFF. When unlucky, the former isn't multiple of 8 and the latter is 4 bigger than that and as the subtraction is done in size_t, we end up with (size_t) -4, so the comparison doesn't really work. The fixes add additional conditions to make it work properly, but all of them should be evaluated at compile time when optimizing and so shouldn't slow anything. 2022-04-26 Jakub Jelinek PR libgomp/105358 * work.c (gomp_init_work_share): Don't mask of adjustment for dynamic long long realignment if struct gomp_work_share has smaller alignof than long long. * loop.c (GOMP_loop_start): Don't use inline_ordered_team_ids if struct gomp_work_share has smaller alignof than long long or if sizeof (struct gomp_work_share) is smaller than INLINE_ORDERED_TEAM_IDS_OFF. * loop_ull.c (GOMP_loop_ull_start): Likewise. * sections.c (GOMP_sections2_start): Likewise. --- diff --git a/libgomp/loop.c b/libgomp/loop.c index 682df39a4ed4..be85162bb1ec 100644 --- a/libgomp/loop.c +++ b/libgomp/loop.c @@ -270,8 +270,11 @@ GOMP_loop_start (long start, long end, long incr, long sched, #define INLINE_ORDERED_TEAM_IDS_OFF \ ((offsetof (struct gomp_work_share, inline_ordered_team_ids) \ + __alignof__ (long long) - 1) & ~(__alignof__ (long long) - 1)) - if (size > (sizeof (struct gomp_work_share) - - INLINE_ORDERED_TEAM_IDS_OFF)) + if (sizeof (struct gomp_work_share) + <= INLINE_ORDERED_TEAM_IDS_OFF + || __alignof__ (struct gomp_work_share) < __alignof__ (long long) + || size > (sizeof (struct gomp_work_share) + - INLINE_ORDERED_TEAM_IDS_OFF)) *mem = (void *) (thr->ts.work_share->ordered_team_ids = gomp_malloc_cleared (size)); diff --git a/libgomp/loop_ull.c b/libgomp/loop_ull.c index 2aaa34e3bca6..602737296d40 100644 --- a/libgomp/loop_ull.c +++ b/libgomp/loop_ull.c @@ -269,8 +269,11 @@ GOMP_loop_ull_start (bool up, gomp_ull start, gomp_ull end, #define INLINE_ORDERED_TEAM_IDS_OFF \ ((offsetof (struct gomp_work_share, inline_ordered_team_ids) \ + __alignof__ (long long) - 1) & ~(__alignof__ (long long) - 1)) - if (size > (sizeof (struct gomp_work_share) - - INLINE_ORDERED_TEAM_IDS_OFF)) + if (sizeof (struct gomp_work_share) + <= INLINE_ORDERED_TEAM_IDS_OFF + || __alignof__ (struct gomp_work_share) < __alignof__ (long long) + || size > (sizeof (struct gomp_work_share) + - INLINE_ORDERED_TEAM_IDS_OFF)) *mem = (void *) (thr->ts.work_share->ordered_team_ids = gomp_malloc_cleared (size)); diff --git a/libgomp/sections.c b/libgomp/sections.c index e9d99e434ac5..7751d5aac830 100644 --- a/libgomp/sections.c +++ b/libgomp/sections.c @@ -121,8 +121,11 @@ GOMP_sections2_start (unsigned count, uintptr_t *reductions, void **mem) #define INLINE_ORDERED_TEAM_IDS_OFF \ ((offsetof (struct gomp_work_share, inline_ordered_team_ids) \ + __alignof__ (long long) - 1) & ~(__alignof__ (long long) - 1)) - if (size > (sizeof (struct gomp_work_share) - - INLINE_ORDERED_TEAM_IDS_OFF)) + if (sizeof (struct gomp_work_share) + <= INLINE_ORDERED_TEAM_IDS_OFF + || __alignof__ (struct gomp_work_share) < __alignof__ (long long) + || size > (sizeof (struct gomp_work_share) + - INLINE_ORDERED_TEAM_IDS_OFF)) *mem = (void *) (thr->ts.work_share->ordered_team_ids = gomp_malloc_cleared (size)); diff --git a/libgomp/work.c b/libgomp/work.c index a88409dc78b0..c53625afe2c6 100644 --- a/libgomp/work.c +++ b/libgomp/work.c @@ -113,7 +113,9 @@ gomp_init_work_share (struct gomp_work_share *ws, size_t ordered, size_t o = nthreads * sizeof (*ws->ordered_team_ids); o += __alignof__ (long long) - 1; if ((offsetof (struct gomp_work_share, inline_ordered_team_ids) - & (__alignof__ (long long) - 1)) == 0) + & (__alignof__ (long long) - 1)) == 0 + && __alignof__ (struct gomp_work_share) + >= __alignof__ (long long)) o &= ~(__alignof__ (long long) - 1); ordered += o - 1; }