The following (anti-)pattern was observed in the code tree:
do {
start = u64_stats_fetch_begin(&pstats->syncp);
memcpy(&temp, &pstats->stats, sizeof(temp));
} while (u64_stats_fetch_retry(&pstats->syncp, start));
On 64bit arches, struct u64_stats_sync is empty and provides no help
against load/store tearing, especially for memcpy(), for which arches may
provide their highly-optimized implements.
In theory the affected code should convert to u64_stats_t, or use
READ_ONCE()/WRITE_ONCE() properly.
However since there are needs to copy chunks of statistics, instead of
writing loops at random places, we provide a safe memcpy() variant for
u64_stats.
Signed-off-by: David Yang <mmyangfl@gmail.com>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Link: https://patch.msgid.link/20260120092137.2161162-2-mmyangfl@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
return local64_read(&p->v);
}
+static inline void *u64_stats_copy(void *dst, const void *src, size_t len)
+{
+ BUILD_BUG_ON(len % sizeof(u64_stats_t));
+ for (size_t i = 0; i < len / sizeof(u64_stats_t); i++)
+ ((u64 *)dst)[i] = local64_read(&((local64_t *)src)[i]);
+ return dst;
+}
+
static inline void u64_stats_set(u64_stats_t *p, u64 val)
{
local64_set(&p->v, val);
}
#else /* 64 bit */
+#include <linux/string.h>
typedef struct {
u64 v;
return p->v;
}
+static inline void *u64_stats_copy(void *dst, const void *src, size_t len)
+{
+ BUILD_BUG_ON(len % sizeof(u64_stats_t));
+ return memcpy(dst, src, len);
+}
+
static inline void u64_stats_set(u64_stats_t *p, u64 val)
{
p->v = val;