gimple_seq_add_stmt_without_update (&stmts, mem_ref_stmt);
int source_nelts = TYPE_VECTOR_SUBPARTS (access_type).to_constant ();
+
+ /* When the SVE vector has the same number of elements as the
+ 128-bit quadword (i.e. VL == 128), the load fills the entire
+ register and no replication is needed. Just convert the
+ loaded value from the Advanced SIMD type to the SVE type. */
+ if (known_eq (lhs_len, (unsigned int) source_nelts))
+ {
+ gimple *g
+ = gimple_build_assign (lhs, build1 (VIEW_CONVERT_EXPR,
+ lhs_type, mem_ref_lhs));
+ gimple_seq_add_stmt_without_update (&stmts, g);
+ gsi_replace_with_seq_vops (f.gsi, stmts);
+ return g;
+ }
vec_perm_builder sel (lhs_len, source_nelts, 1);
for (int i = 0; i < source_nelts; i++)
sel.quick_push (i);
--- /dev/null
+/* PR target/124908 */
+/* { dg-options "-O2 -msve-vector-bits=128" } */
+
+#include <arm_sve.h>
+
+/* Verify that folding svld1rq does not ICE with -msve-vector-bits=128. */
+
+svuint8_t
+f_u8 (const uint8_t *p)
+{
+ return svld1rq_u8 (svptrue_b8 (), p);
+}
+
+svint8_t
+f_s8 (const int8_t *p)
+{
+ return svld1rq_s8 (svptrue_b8 (), p);
+}
+
+svuint16_t
+f_u16 (const uint16_t *p)
+{
+ return svld1rq_u16 (svptrue_b16 (), p);
+}
+
+svuint32_t
+f_u32 (const uint32_t *p)
+{
+ return svld1rq_u32 (svptrue_b32 (), p);
+}
+
+svfloat64_t
+f_f64 (const float64_t *p)
+{
+ return svld1rq_f64 (svptrue_b64 (), p);
+}
+