]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
warn-access: -Winvalid-memory-model fixes [PR124827] master trunk
authorJakub Jelinek <jakub@redhat.com>
Fri, 10 Apr 2026 09:47:59 +0000 (11:47 +0200)
committerJakub Jelinek <jakub@gcc.gnu.org>
Fri, 10 Apr 2026 09:47:59 +0000 (11:47 +0200)
A few years ago Martin moved for unknown reasons -Winvalid-memory-model
diagnostics from expansion to the gimple-ssa-warn-access middle end warning
black hole.
A recent change limited the second instance of the pass to only a small
subset of warnings.
This regressed diagnostics of -Winvalid-memory-model with -fsanitize=thread,
because invalid cases are not warned about anymore during waccess2 and
during waccess3 we've already transformed those builtins into corresponding
tsan builtins.

The following patch fixes that regression by handling the tsan atomic
builtins as well.  While doing that, I've also fixed formatting and noticed
other bogosities in the code, e.g. existance of xchg_models.  No idea
where Martin got that from, neither C11, nor C23, nor various versions of
C++ nor GCC documentation have any restrictions on what memory models can be
used for atomic_exchange_explicit, so why is it trying to prevent
__ATOMIC_ACQUIRE?
And incorrectly so, __atomic_exchange_N has 3 arguments, and suc_arg is 0
based, so setting it to 3 meant it didn't check anything because the 4th
argument doesn't exist.  So fixed to use all_models with the correct arg
index.

Besides this, there is another problem that we fold some atomic builtins
into IFN_ATOMIC* internal functions.  That isn't a 16 Regression though,
could be fixed by also diagnosing this stuff for the IFN_ATOMIC_* calls,
but I'm not doing it right now because there is yet another problem.
C++17 in https://wg21.link/p0418r2 dropped some of the restrictions, in
particular that for the compare and exchange cases failure mode can't be
stronger than success mode.  So I'm hesistant to add further warnings for
16 (beyond just fixing the -fsanitize=thread inconsistency), unsure if
we should somehow mark the atomic builtins from the C++ <atomic> APIs
that the no stronger checking shouldn't be done for those, or simply
mark those for all of C++17 and later, something else?
C23/C2Y I think still require it and it is reasonable requirement,

2026-04-10  <jakub@redhat.com>

PR middle-end/124827
* gimple-ssa-warn-access.cc (xchg_models): Remove.
(pass_waccess::check_atomic_builtin): Fix up sucs_arg for
BUILT_IN_ATOMIC_EXCHAGE_* and use all_models instead of xchg_models.
Handle BUILT_IN_TSAN_ATOMIC*.  Formatting fixes.

* gcc.dg/tsan/atomic-invalid.c: New test.

gcc/gimple-ssa-warn-access.cc
gcc/testsuite/gcc.dg/tsan/atomic-invalid.c [new file with mode: 0644]

index c8f10cae32f54e811b4ef7035fd7a0e202a8ec65..98de5df86b0230b72f5846bcd45e4b40562a1b65 100644 (file)
@@ -2940,7 +2940,6 @@ memmodel_name (unsigned HOST_WIDE_INT val)
 /* Indices of valid MEMORY_MODELS above for corresponding atomic operations.  */
 static const unsigned char load_models[] = { 0, 1, 2, 3, UCHAR_MAX };
 static const unsigned char store_models[] = { 0, 1, 4, UCHAR_MAX };
-static const unsigned char xchg_models[] = { 0, 1, 3, 4, 5, UCHAR_MAX };
 static const unsigned char flag_clr_models[] = { 0, 1, 4, UCHAR_MAX };
 static const unsigned char all_models[] = { 0, 1, 2, 3, 4, 5, UCHAR_MAX };
 
@@ -3097,7 +3096,7 @@ pass_waccess::check_atomic_builtin (gcall *stmt)
   switch (DECL_FUNCTION_CODE (callee))
     {
 #define BUILTIN_ACCESS_SIZE_FNSPEC(N)                  \
-      BUILT_IN_SYNC_FETCH_AND_ADD_ ## N:               \
+        BUILT_IN_SYNC_FETCH_AND_ADD_ ## N:             \
     case BUILT_IN_SYNC_FETCH_AND_SUB_ ## N:            \
     case BUILT_IN_SYNC_FETCH_AND_OR_ ## N:             \
     case BUILT_IN_SYNC_FETCH_AND_AND_ ## N:            \
@@ -3135,23 +3134,23 @@ pass_waccess::check_atomic_builtin (gcall *stmt)
     case BUILT_IN_ATOMIC_FETCH_NAND_ ## N:             \
     case BUILT_IN_ATOMIC_FETCH_OR_ ## N:               \
     case BUILT_IN_ATOMIC_FETCH_XOR_ ## N:              \
-       bytes = N;                                      \
-       if (sucs_arg == UINT_MAX)                       \
-         sucs_arg = 2;                                 \
-       if (!pvalid_models)                             \
-         pvalid_models = all_models;                   \
-       break;                                          \
+      bytes = N;                                       \
+      if (sucs_arg == UINT_MAX)                                \
+       sucs_arg = 2;                                   \
+      if (!pvalid_models)                              \
+       pvalid_models = all_models;                     \
+      break;                                           \
     case BUILT_IN_ATOMIC_EXCHANGE_ ## N:               \
-       bytes = N;                                      \
-       sucs_arg = 3;                                   \
-       pvalid_models = xchg_models;                    \
-       break;                                          \
+      bytes = N;                                       \
+      sucs_arg = 2;                                    \
+      pvalid_models = all_models;                      \
+      break;                                           \
     case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_ ## N:       \
-       bytes = N;                                      \
-       sucs_arg = 4;                                   \
-       fail_arg = 5;                                   \
-       pvalid_models = all_models;                     \
-       arg2 = 1
+      bytes = N;                                       \
+      sucs_arg = 4;                                    \
+      fail_arg = 5;                                    \
+      pvalid_models = all_models;                      \
+      arg2 = 1
 
     case BUILTIN_ACCESS_SIZE_FNSPEC (1);
       break;
@@ -3169,6 +3168,55 @@ pass_waccess::check_atomic_builtin (gcall *stmt)
       pvalid_models = flag_clr_models;
       break;
 
+#define BUILTIN_TSAN_ACCESS_SIZE_FNSPEC(N)             \
+        BUILT_IN_TSAN_ATOMIC ## N ##_LOAD:             \
+      pvalid_models = load_models;                     \
+      sucs_arg = 1;                                    \
+      /* FALLTHROUGH */                                        \
+    case BUILT_IN_TSAN_ATOMIC ## N ##_STORE:           \
+      if (!pvalid_models)                              \
+       pvalid_models = store_models;                   \
+      /* FALLTHROUGH */                                        \
+    case BUILT_IN_TSAN_ATOMIC ## N ##_FETCH_ADD:       \
+    case BUILT_IN_TSAN_ATOMIC ## N ##_FETCH_SUB:       \
+    case BUILT_IN_TSAN_ATOMIC ## N ##_FETCH_AND:       \
+    case BUILT_IN_TSAN_ATOMIC ## N ##_FETCH_NAND:      \
+    case BUILT_IN_TSAN_ATOMIC ## N ##_FETCH_OR:                \
+    case BUILT_IN_TSAN_ATOMIC ## N ##_FETCH_XOR:       \
+      bytes = N / 8;                                   \
+      if (sucs_arg == UINT_MAX)                                \
+       sucs_arg = 2;                                   \
+      if (!pvalid_models)                              \
+       pvalid_models = all_models;                     \
+      break;                                           \
+    case BUILT_IN_TSAN_ATOMIC ## N ##_EXCHANGE:                \
+      bytes = N / 8;                                   \
+      sucs_arg = 2;                                    \
+      pvalid_models = all_models;                      \
+      break;                                           \
+    case BUILT_IN_TSAN_ATOMIC ## N ##_COMPARE_EXCHANGE_STRONG: \
+    case BUILT_IN_TSAN_ATOMIC ## N ##_COMPARE_EXCHANGE_WEAK:   \
+      bytes = N / 8;                                   \
+      sucs_arg = 3;                                    \
+      fail_arg = 4;                                    \
+      pvalid_models = all_models;                      \
+      arg2 = 1
+
+    case BUILTIN_TSAN_ACCESS_SIZE_FNSPEC (8);
+      break;
+
+    case BUILTIN_TSAN_ACCESS_SIZE_FNSPEC (16);
+      break;
+
+    case BUILTIN_TSAN_ACCESS_SIZE_FNSPEC (32);
+      break;
+
+    case BUILTIN_TSAN_ACCESS_SIZE_FNSPEC (64);
+      break;
+
+    case BUILTIN_TSAN_ACCESS_SIZE_FNSPEC (128);
+      break;
+
     default:
       return false;
     }
diff --git a/gcc/testsuite/gcc.dg/tsan/atomic-invalid.c b/gcc/testsuite/gcc.dg/tsan/atomic-invalid.c
new file mode 100644 (file)
index 0000000..d35dcf2
--- /dev/null
@@ -0,0 +1,39 @@
+/* PR middle-end/124827 */
+/* Test __atomic routines for invalid memory model errors. This only needs
+   to be tested on a single size.  */
+/* { dg-do compile } */
+/* { dg-require-effective-target sync_int_long } */
+/* { dg-options "-fsanitize=thread -Winvalid-memory-model" } */
+
+int i, e, b;
+bool x;
+
+[[gnu::always_inline]] static inline void
+foo (int relaxed, int seq_cst, int acquire, int release, int acq_rel, int consume)
+{
+  __atomic_compare_exchange_n (&i, &e, 1, 0, relaxed, seq_cst); /* { dg-warning "failure memory model 'memory_order_seq_cst' cannot be stronger" } */
+  __atomic_compare_exchange_n (&i, &e, 1, 0, seq_cst, release); /* { dg-warning "invalid failure memory" } */
+  __atomic_compare_exchange_n (&i, &e, 1, 1, seq_cst, acq_rel); /* { dg-warning "invalid failure memory" } */
+
+  __atomic_load_n (&i, release); /* { dg-warning "invalid memory model" } */
+  __atomic_load_n (&i, acq_rel); /* { dg-warning "invalid memory model" } */
+
+  __atomic_store_n (&i, 1, acquire); /* { dg-warning "invalid memory model" } */
+  __atomic_store_n (&i, 1, consume); /* { dg-warning "invalid memory model" } */
+  __atomic_store_n (&i, 1, acq_rel); /* { dg-warning "invalid memory model" } */
+
+  __atomic_load_n (&i, 44); /* { dg-warning "invalid memory model" } */
+
+  __atomic_clear (&x, consume); /* { dg-warning "invalid memory model" } */
+  __atomic_clear (&x, acquire); /* { dg-warning "invalid memory model" } */
+
+  __atomic_clear (&x, acq_rel); /* { dg-warning "invalid memory model" } */
+
+}
+
+int
+main ()
+{
+  foo (__ATOMIC_RELAXED, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE, __ATOMIC_RELEASE,
+       __ATOMIC_ACQ_REL, __ATOMIC_CONSUME);
+}