From: Andrew Stubbs Date: Fri, 13 Jan 2023 17:38:39 +0000 (+0000) Subject: libgomp, amdgcn: Switch USM to 128-byte alignment X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=c9b47ccf32a91d8c851a3e20d1a2f47ed0aaa47e;p=thirdparty%2Fgcc.git libgomp, amdgcn: Switch USM to 128-byte alignment This should optimize cache-lines on the AMD GPUs somewhat. libgomp/ChangeLog: * usm-allocator.c (ALIGN): Use 128-byte alignment. --- diff --git a/libgomp/usm-allocator.c b/libgomp/usm-allocator.c index c45109169ca3..68c1ebafec28 100644 --- a/libgomp/usm-allocator.c +++ b/libgomp/usm-allocator.c @@ -57,7 +57,8 @@ static int usm_lock = 0; static struct usm_splay_tree_s usm_allocations = { NULL }; static struct usm_splay_tree_s usm_free_space = { NULL }; -#define ALIGN(VAR) (((VAR) + 7) & ~7) /* 8-byte granularity. */ +/* 128-byte granularity means GPU cache-line aligned. */ +#define ALIGN(VAR) (((VAR) + 127) & ~127) /* Coalesce contiguous free space into one entry. This considers the entries either side of the root node only, so it should be called each time a new