]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
arm: Always use vmov.f64 instead of vmov.f32 with MVE
authorChristophe Lyon <christophe.lyon@linaro.org>
Wed, 21 Aug 2024 13:58:08 +0000 (13:58 +0000)
committerChristophe Lyon <christophe.lyon@linaro.org>
Tue, 27 Aug 2024 14:41:45 +0000 (14:41 +0000)
With MVE, vmov.f64 is always supported (no need for +fp.dp extension).

This patch updates two patterns:
- in movdi_vfp, we incorrectly checked
  TARGET_VFP_SINGLE || TARGET_HAVE_MVE instead of
  TARGET_VFP_SINGLE && !TARGET_HAVE_MVE, and didn't take into account
  these two possibilities when computing the length attribute.

- in thumb2_movdf_vfp, we checked only TARGET_VFP_SINGLE.

No need to update movdf_vfp, since it is enabled only for TARGET_ARM
(which is not the case when MVE is enabled).

The patch also updates gcc.target/arm/armv8_1m-fp64-move-1.c, to
accept only vmov.f64 instead of vmov.f32.

Tested on arm-none-eabi with:
qemu/-mthumb/-mtune=cortex-m55/-mfloat-abi=hard/-mfpu=auto
qemu/-mthumb/-mtune=cortex-m55/-mfloat-abi=hard/-mfpu=auto/-march=armv8.1-m.main+mve
qemu/-mthumb/-mtune=cortex-m55/-mfloat-abi=hard/-mfpu=auto/-march=armv8.1-m.main+mve.fp
qemu/-mthumb/-mtune=cortex-m55/-mfloat-abi=hard/-mfpu=auto/-march=armv8.1-m.main+mve.fp+fp.dp

2024-08-21  Christophe Lyon  <christophe.lyon@linaro.org>

gcc/
* config/arm/vfp.md (movdi_vfp, thumb2_movdf_vfp): Handle MVE
case.

gcc/testsuite/
* gcc.target/arm/armv8_1m-fp64-move-1.c: Update expected code.

gcc/config/arm/vfp.md
gcc/testsuite/gcc.target/arm/armv8_1m-fp64-move-1.c

index 773f55664a95cce396e2a1057754ccf3697f4060..3212d9c7aa171ef053bea81f7c689fff7527f422 100644 (file)
     case 8:
       return \"vmov%?\\t%Q0, %R0, %P1\\t%@ int\";
     case 9:
-      if (TARGET_VFP_SINGLE || TARGET_HAVE_MVE)
+      if (TARGET_VFP_SINGLE && !TARGET_HAVE_MVE)
        return \"vmov%?.f32\\t%0, %1\\t%@ int\;vmov%?.f32\\t%p0, %p1\\t%@ int\";
       else
        return \"vmov%?.f64\\t%P0, %P1\\t%@ int\";
                               (symbol_ref "arm_count_output_move_double_insns (operands) * 4")
                               (eq_attr "alternative" "9")
                                (if_then_else
-                                 (match_test "TARGET_VFP_SINGLE")
+                                 (match_test "TARGET_VFP_SINGLE && !TARGET_HAVE_MVE")
                                  (const_int 8)
                                  (const_int 4))]
                               (const_int 4)))
       case 6: case 7: case 9:
        return output_move_double (operands, true, NULL);
       case 8:
-       if (TARGET_VFP_SINGLE)
+       if (TARGET_VFP_SINGLE && !TARGET_HAVE_MVE)
          return \"vmov%?.f32\\t%0, %1\;vmov%?.f32\\t%p0, %p1\";
        else
          return \"vmov%?.f64\\t%P0, %P1\";
    (set (attr "length") (cond [(eq_attr "alternative" "6,7,9") (const_int 8)
                               (eq_attr "alternative" "8")
                                (if_then_else
-                                (match_test "TARGET_VFP_SINGLE")
+                                (match_test "TARGET_VFP_SINGLE && !TARGET_HAVE_MVE")
                                 (const_int 8)
                                 (const_int 4))]
                              (const_int 4)))
index 39e8c40632477ee571da4875435507df6e37ed10..b7e82f60149c28d14ecb599bbfe4a5a71a9bc64d 100644 (file)
@@ -33,13 +33,7 @@ w_r ()
 
 /*
 ** w_w:
-** (
-**     vmov.f32        s2, s0
-**     vmov.f32        s3, s1
-** |
-**     vmov.f32        s3, s1
-**     vmov.f32        s2, s0
-** )
+**     vmov.f64        d1, d0
 **     bx      lr
 */
 void