&& MEM_VOLATILE_P (x)
&& XEXP (x, 0) == stack_pointer_rtx)
return true;
- if (/* MEM_NOTRAP_P only relates to the actual position of the memory
- reference; moving it out of context such as when moving code
- when optimizing, might cause its address to become invalid. */
- code_changed
+ if (/* MEM_READONLY_P means that the memory is both statically
+ allocated and readonly, so MEM_NOTRAP_P should remain true
+ even if the memory reference is moved. This is certainly
+ true for the important case of force_const_mem.
+
+ Otherwise, MEM_NOTRAP_P only relates to the actual position
+ of the memory reference; moving it out of context such as
+ when moving code when optimizing, might cause its address
+ to become invalid. */
+ (code_changed && !MEM_READONLY_P (x))
|| !MEM_NOTRAP_P (x))
{
poly_int64 size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : -1;
--- /dev/null
+// { dg-options "-O2" }
+
+#include <stdlib.h>
+#include <arm_sve.h>
+
+#pragma GCC target "+sve2"
+
+typedef unsigned char uchar;
+
+const uchar *
+search_line_fast (const uchar *s, const uchar *end)
+{
+ size_t VL = svcntb();
+ svuint8_t arr1, arr2;
+ svbool_t pc, pg = svptrue_b8();
+
+ // This should not be loaded inside the loop every time.
+ arr2 = svreinterpret_u8(svdup_u32(0x0a0d5c3f));
+
+ for (; s+VL <= end; s += VL) {
+ arr1 = svld1_u8(pg, s);
+ pc = svmatch_u8(pg, arr1, arr2);
+
+ if (svptest_any(pg, pc)) {
+ pc = svbrkb_z(pg, pc);
+ return s+svcntp_b8(pg, pc);
+ }
+ }
+
+ // Handle remainder.
+ if (s < end) {
+ pg = svwhilelt_b8((size_t)s, (size_t)end);
+
+ arr1 = svld1_u8(pg, s);
+ pc = svmatch_u8(pg, arr1, arr2);
+
+ if (svptest_any(pg, pc)) {
+ pc = svbrkb_z(pg, pc);
+ return s+svcntp_b8(pg, pc);
+ }
+ }
+
+ return end;
+}
+
+// { dg-final { scan-assembler {:\n\tld1b\t[^\n]*\n\tmatch\t[^\n]*\n\tb\.} } }