/* Expand mult operation with constant integer, multiplicand also used as a
* temporary register. */
-static void
+static bool
riscv_expand_mult_with_const_int (machine_mode mode, rtx dest, rtx multiplicand,
HOST_WIDE_INT multiplier)
{
if (multiplier == 0)
{
riscv_emit_move (dest, GEN_INT (0));
- return;
+ return false;
}
bool neg_p = multiplier < 0;
if (neg_p)
riscv_expand_op (NEG, mode, dest, multiplicand, NULL_RTX);
else
- riscv_emit_move (dest, multiplicand);
+ {
+ riscv_emit_move (dest, multiplicand);
+
+ /* Signal to our caller that it should try to optimize away
+ the copy. */
+ return true;
+ }
}
else
{
riscv_expand_op (MULT, mode, dest, dest, multiplicand);
}
}
+ return false;
}
-/* Analyze src and emit const_poly_int mov sequence. */
+/* Analyze src and emit const_poly_int mov sequence.
+ Essentially we want to generate (set (dest) (src)), where SRC is
+ a poly_int. We may need TMP as a scratch register. We assume TMP
+ is truely a scratch register and need not have any particular value
+ after the sequence. */
void
riscv_legitimize_poly_move (machine_mode mode, rtx dest, rtx tmp, rtx src)
{
riscv_expand_op (LSHIFTRT, mode, tmp, tmp,
gen_int_mode (exact_log2 (div_factor), QImode));
- riscv_expand_mult_with_const_int (mode, dest, tmp,
- factor / (vlenb / div_factor));
+ bool opt_seq
+ = riscv_expand_mult_with_const_int (mode, dest, tmp,
+ factor / (vlenb / div_factor));
+
+ /* Potentially try to optimize the sequence we've generated so far.
+ Essentially when OPT_SEQ is true, we should have a simple reg->reg
+ copy from TMP to DEST as the last insn in the sequence. Try to
+ back up one real insn and adjust it in that case.
+
+ This is important for frame setup/teardown with RVV since we can't
+ propagate away the copy as the copy is not frame related, but the
+ insn creating or destroying the frame is frame related. */
+ if (opt_seq)
+ {
+ rtx_insn *insn = get_last_insn ();
+ rtx set = single_set (insn);
+
+ /* Verify the last insn in the chain is a simple assignment from
+ DEST to TMP. */
+ gcc_assert (set);
+ gcc_assert (SET_SRC (set) == tmp);
+ gcc_assert (SET_DEST (set) == dest);
+
+ /* Now back up one real insn and see if it sets TMP, if so adjust
+ it so that it sets DEST. */
+ rtx_insn *insn2 = prev_nonnote_nondebug_insn (insn);
+ rtx set2 = insn2 ? single_set (insn2) : NULL_RTX;
+ if (set2 && SET_DEST (set2) == tmp)
+ {
+ SET_DEST (set2) = dest;
+ /* Turn the prior insn into a NOP. But don't delete. */
+ SET_SRC (set) = SET_DEST (set);
+ }
+
+ }
+
HOST_WIDE_INT constant = offset - factor;
if (constant == 0)
/*
** spill_1:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** ...
** csrr\t[a-x0-9]+,vlenb
** srli\t[a-x0-9]+,[a-x0-9]+,3
** add\t[a-x0-9]+,[a-x0-9]+,sp
** vle8.v\tv[0-9]+,0\([a-x0-9]+\)
** vse8.v\tv[0-9]+,0\([a-x0-9]+\)
-** csrr\tt0,vlenb
-** add\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** add\tsp,sp,[a-x0-9]+
** ...
** jr\tra
*/
/*
** spill_2:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** vsetvli\ta5,zero,e8,mf4,ta,ma
** vle8.v\tv[0-9]+,0\(a0\)
** csrr\t[a-x0-9]+,vlenb
** add\t[a-x0-9]+,[a-x0-9]+,sp
** vle8.v\tv[0-9]+,0\([a-x0-9]+\)
** vse8.v\tv[0-9]+,0\([a-x0-9]+\)
-** csrr\tt0,vlenb
-** add\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** add\tsp,sp,[a-x0-9]+
** ...
** jr\tra
*/
/*
** spill_3:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** vsetvli\ta5,zero,e8,mf2,ta,ma
** vle8.v\tv[0-9]+,0\(a0\)
** csrr\t[a-x0-9]+,vlenb
** add\t[a-x0-9]+,[a-x0-9]+,sp
** vse8.v\tv[0-9]+,0\([a-x0-9]+\)
** ...
-** csrr\t[a-x0-9]+,vlenb
** srli\t[a-x0-9]+,[a-x0-9]+,1
** add\t[a-x0-9]+,[a-x0-9]+,sp
** vle8.v\tv[0-9]+,0\([a-x0-9]+\)
** vse8.v\tv[0-9]+,0\([a-x0-9]+\)
-** csrr\tt0,vlenb
-** add\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** add\tsp,sp,[a-x0-9]+
** ...
** jr\tra
*/
/*
** spill_4:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** ...
** vs1r.v\tv[0-9]+,0\(sp\)
** ...
/*
** spill_5:
-** csrr\tt0,vlenb
-** slli\tt1,t0,1
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,1
** sub\tsp,sp,t1
** ...
** vs2r.v\tv[0-9]+,0\(sp\)
/*
** spill_6:
-** csrr\tt0,vlenb
-** slli\tt1,t0,2
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,2
** sub\tsp,sp,t1
** ...
** vs4r.v\tv[0-9]+,0\(sp\)
/*
** spill_7:
-** csrr\tt0,vlenb
-** slli\tt1,t0,3
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,3
** sub\tsp,sp,t1
** ...
** vs8r.v\tv[0-9]+,0\(sp\)
/*
** spill_8:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** vsetvli\ta5,zero,e8,mf8,ta,ma
** vle8.v\tv[0-9]+,0\(a0\)
** csrr\t[a-x0-9]+,vlenb
** add\t[a-x0-9]+,[a-x0-9]+,sp
** vle8.v\tv[0-9]+,0\([a-x0-9]+\)
** vse8.v\tv[0-9]+,0\([a-x0-9]+\)
-** csrr\tt0,vlenb
-** add\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** add\tsp,sp,[a-x0-9]+
** ...
** jr\tra
*/
/*
** spill_9:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** vsetvli\ta5,zero,e8,mf4,ta,ma
** vle8.v\tv[0-9]+,0\(a0\)
** csrr\t[a-x0-9]+,vlenb
** add\t[a-x0-9]+,[a-x0-9]+,sp
** vle8.v\tv[0-9]+,0\([a-x0-9]+\)
** vse8.v\tv[0-9]+,0\([a-x0-9]+\)
-** csrr\tt0,vlenb
-** add\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** add\tsp,sp,[a-x0-9]+
** ...
** jr\tra
*/
/*
** spill_10:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** vsetvli\ta5,zero,e8,mf2,ta,ma
** vle8.v\tv[0-9]+,0\(a0\)
** csrr\t[a-x0-9]+,vlenb
** add\t[a-x0-9]+,[a-x0-9]+,sp
** vse8.v\tv[0-9]+,0\([a-x0-9]+\)
** ...
-** csrr\t[a-x0-9]+,vlenb
** srli\t[a-x0-9]+,[a-x0-9]+,1
** add\t[a-x0-9]+,[a-x0-9]+,sp
** vle8.v\tv[0-9]+,0\([a-x0-9]+\)
** vse8.v\tv[0-9]+,0\([a-x0-9]+\)
-** csrr\tt0,vlenb
-** add\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** add\tsp,sp,[a-x0-9]+
** ...
** jr\tra
*/
/*
** spill_11:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** ...
** vs1r.v\tv[0-9]+,0\(sp\)
** ...
/*
** spill_12:
-** csrr\tt0,vlenb
-** slli\tt1,t0,1
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,1
** sub\tsp,sp,t1
** ...
** vs2r.v\tv[0-9]+,0\(sp\)
/*
** spill_13:
-** csrr\tt0,vlenb
-** slli\tt1,t0,2
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,2
** sub\tsp,sp,t1
** ...
** vs4r.v\tv[0-9]+,0\(sp\)
/*
** spill_14:
-** csrr\tt0,vlenb
-** slli\tt1,t0,3
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,3
** sub\tsp,sp,t1
** ...
** vs8r.v\tv[0-9]+,0\(sp\)
/*
** spill_2:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** vsetvli\ta5,zero,e16,mf4,ta,ma
** vle16.v\tv[0-9]+,0\(a0\)
** csrr\t[a-x0-9]+,vlenb
** add\t[a-x0-9]+,[a-x0-9]+,sp
** vle16.v\tv[0-9]+,0\([a-x0-9]+\)
** vse16.v\tv[0-9]+,0\([a-x0-9]+\)
-** csrr\tt0,vlenb
-** add\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** add\tsp,sp,[a-x0-9]+
** ...
** jr\tra
*/
/*
** spill_3:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** vsetvli\ta5,zero,e16,mf2,ta,ma
** vle16.v\tv[0-9]+,0\(a0\)
** csrr\t[a-x0-9]+,vlenb
** add\t[a-x0-9]+,[a-x0-9]+,sp
** vse16.v\tv[0-9]+,0\([a-x0-9]+\)
** ...
-** csrr\t[a-x0-9]+,vlenb
** srli\t[a-x0-9]+,[a-x0-9]+,1
** add\t[a-x0-9]+,[a-x0-9]+,sp
** vle16.v\tv[0-9]+,0\([a-x0-9]+\)
** vse16.v\tv[0-9]+,0\([a-x0-9]+\)
-** csrr\tt0,vlenb
-** add\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** add\tsp,sp,[a-x0-9]+
** ...
** jr\tra
*/
/*
** spill_4:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** ...
** vs1r.v\tv[0-9]+,0\(sp\)
** ...
/*
** spill_5:
-** csrr\tt0,vlenb
-** slli\tt1,t0,1
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,1
** sub\tsp,sp,t1
** ...
** vs2r.v\tv[0-9]+,0\(sp\)
/*
** spill_6:
-** csrr\tt0,vlenb
-** slli\tt1,t0,2
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,2
** sub\tsp,sp,t1
** ...
** vs4r.v\tv[0-9]+,0\(sp\)
/*
** spill_7:
-** csrr\tt0,vlenb
-** slli\tt1,t0,3
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,3
** sub\tsp,sp,t1
** ...
** vs8r.v\tv[0-9]+,0\(sp\)
/*
** spill_9:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** vsetvli\ta5,zero,e16,mf4,ta,ma
** vle16.v\tv[0-9]+,0\(a0\)
** csrr\t[a-x0-9]+,vlenb
** add\t[a-x0-9]+,[a-x0-9]+,sp
** vle16.v\tv[0-9]+,0\([a-x0-9]+\)
** vse16.v\tv[0-9]+,0\([a-x0-9]+\)
-** csrr\tt0,vlenb
-** add\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** add\tsp,sp,[a-x0-9]+
** ...
** jr\tra
*/
/*
** spill_10:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** vsetvli\ta5,zero,e16,mf2,ta,ma
** vle16.v\tv[0-9]+,0\(a0\)
** csrr\t[a-x0-9]+,vlenb
** add\t[a-x0-9]+,[a-x0-9]+,sp
** vse16.v\tv[0-9]+,0\([a-x0-9]+\)
** ...
-** csrr\t[a-x0-9]+,vlenb
** srli\t[a-x0-9]+,[a-x0-9]+,1
** add\t[a-x0-9]+,[a-x0-9]+,sp
** vle16.v\tv[0-9]+,0\([a-x0-9]+\)
** vse16.v\tv[0-9]+,0\([a-x0-9]+\)
-** csrr\tt0,vlenb
-** add\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** add\tsp,sp,[a-x0-9]+
** ...
** jr\tra
*/
/*
** spill_11:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** ...
** vs1r.v\tv[0-9]+,0\(sp\)
** ...
/*
** spill_12:
-** csrr\tt0,vlenb
-** slli\tt1,t0,1
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,1
** sub\tsp,sp,t1
** ...
** vs2r.v\tv[0-9]+,0\(sp\)
/*
** spill_13:
-** csrr\tt0,vlenb
-** slli\tt1,t0,2
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,2
** sub\tsp,sp,t1
** ...
** vs4r.v\tv[0-9]+,0\(sp\)
/*
** spill_14:
-** csrr\tt0,vlenb
-** slli\tt1,t0,3
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,3
** sub\tsp,sp,t1
** ...
** vs8r.v\tv[0-9]+,0\(sp\)
/*
** spill_3:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** vsetvli\ta5,zero,e32,mf2,ta,ma
** vle32.v\tv[0-9]+,0\(a0\)
** csrr\t[a-x0-9]+,vlenb
** add\t[a-x0-9]+,[a-x0-9]+,sp
** vse32.v\tv[0-9]+,0\([a-x0-9]+\)
** ...
-** csrr\t[a-x0-9]+,vlenb
** srli\t[a-x0-9]+,[a-x0-9]+,1
** add\t[a-x0-9]+,[a-x0-9]+,sp
** vle32.v\tv[0-9]+,0\([a-x0-9]+\)
** vse32.v\tv[0-9]+,0\([a-x0-9]+\)
-** csrr\tt0,vlenb
-** add\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** add\tsp,sp,[a-x0-9]+
** ...
** jr\tra
*/
/*
** spill_4:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** ...
** vs1r.v\tv[0-9]+,0\(sp\)
** ...
/*
** spill_5:
-** csrr\tt0,vlenb
-** slli\tt1,t0,1
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,1
** sub\tsp,sp,t1
** ...
** vs2r.v\tv[0-9]+,0\(sp\)
/*
** spill_6:
-** csrr\tt0,vlenb
-** slli\tt1,t0,2
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,2
** sub\tsp,sp,t1
** ...
** vs4r.v\tv[0-9]+,0\(sp\)
/*
** spill_7:
-** csrr\tt0,vlenb
-** slli\tt1,t0,3
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,3
** sub\tsp,sp,t1
** ...
** vs8r.v\tv[0-9]+,0\(sp\)
/*
** spill_10:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** vsetvli\ta5,zero,e32,mf2,ta,ma
** vle32.v\tv[0-9]+,0\(a0\)
** csrr\t[a-x0-9]+,vlenb
** add\t[a-x0-9]+,[a-x0-9]+,sp
** vse32.v\tv[0-9]+,0\([a-x0-9]+\)
** ...
-** csrr\t[a-x0-9]+,vlenb
** srli\t[a-x0-9]+,[a-x0-9]+,1
** add\t[a-x0-9]+,[a-x0-9]+,sp
** vle32.v\tv[0-9]+,0\([a-x0-9]+\)
** vse32.v\tv[0-9]+,0\([a-x0-9]+\)
-** csrr\tt0,vlenb
-** add\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** add\tsp,sp,[a-x0-9]+
** ...
** jr\tra
*/
/*
** spill_11:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** ...
** vs1r.v\tv[0-9]+,0\(sp\)
** ...
/*
** spill_12:
-** csrr\tt0,vlenb
-** slli\tt1,t0,1
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,1
** sub\tsp,sp,t1
** ...
** vs2r.v\tv[0-9]+,0\(sp\)
/*
** spill_13:
-** csrr\tt0,vlenb
-** slli\tt1,t0,2
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,2
** sub\tsp,sp,t1
** ...
** vs4r.v\tv[0-9]+,0\(sp\)
/*
** spill_14:
-** csrr\tt0,vlenb
-** slli\tt1,t0,3
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,3
** sub\tsp,sp,t1
** ...
** vs8r.v\tv[0-9]+,0\(sp\)
/*
** spill_4:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** ...
** vs1r.v\tv[0-9]+,0\(sp\)
** ...
/*
** spill_5:
-** csrr\tt0,vlenb
-** slli\tt1,t0,1
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,1
** sub\tsp,sp,t1
** ...
** vs2r.v\tv[0-9]+,0\(sp\)
/*
** spill_6:
-** csrr\tt0,vlenb
-** slli\tt1,t0,2
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,2
** sub\tsp,sp,t1
** ...
** vs4r.v\tv[0-9]+,0\(sp\)
/*
** spill_7:
-** csrr\tt0,vlenb
-** slli\tt1,t0,3
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,3
** sub\tsp,sp,t1
** ...
** vs8r.v\tv[0-9]+,0\(sp\)
/*
** spill_11:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** ...
** vs1r.v\tv[0-9]+,0\(sp\)
** ...
/*
** spill_12:
-** csrr\tt0,vlenb
-** slli\tt1,t0,1
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,1
** sub\tsp,sp,t1
** ...
** vs2r.v\tv[0-9]+,0\(sp\)
/*
** spill_13:
-** csrr\tt0,vlenb
-** slli\tt1,t0,2
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,2
** sub\tsp,sp,t1
** ...
** vs4r.v\tv[0-9]+,0\(sp\)
/*
** spill_14:
-** csrr\tt0,vlenb
-** slli\tt1,t0,3
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,3
** sub\tsp,sp,t1
** ...
** vs8r.v\tv[0-9]+,0\(sp\)
/*
** spill_3:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** vsetvli\ta5,zero,e32,mf2,ta,ma
** vle32.v\tv[0-9]+,0\(a0\)
** csrr\t[a-x0-9]+,vlenb
** add\t[a-x0-9]+,[a-x0-9]+,sp
** vse32.v\tv[0-9]+,0\([a-x0-9]+\)
** ...
-** csrr\t[a-x0-9]+,vlenb
** srli\t[a-x0-9]+,[a-x0-9]+,1
** add\t[a-x0-9]+,[a-x0-9]+,sp
** vle32.v\tv[0-9]+,0\([a-x0-9]+\)
** vse32.v\tv[0-9]+,0\([a-x0-9]+\)
-** csrr\tt0,vlenb
-** add\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** add\tsp,sp,[a-x0-9]+
** ...
** jr\tra
*/
/*
** spill_4:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** ...
** vs1r.v\tv[0-9]+,0\(sp\)
** ...
/*
** spill_5:
-** csrr\tt0,vlenb
-** slli\tt1,t0,1
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,1
** sub\tsp,sp,t1
** ...
** vs2r.v\tv[0-9]+,0\(sp\)
/*
** spill_6:
-** csrr\tt0,vlenb
-** slli\tt1,t0,2
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,2
** sub\tsp,sp,t1
** ...
** vs4r.v\tv[0-9]+,0\(sp\)
/*
** spill_7:
-** csrr\tt0,vlenb
-** slli\tt1,t0,3
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,3
** sub\tsp,sp,t1
** ...
** vs8r.v\tv[0-9]+,0\(sp\)
/*
** spill_4:
-** csrr\tt0,vlenb
-** sub\tsp,sp,t0
+** csrr\t[a-x0-9]+,vlenb
+** sub\tsp,sp,[a-x0-9]+
** ...
** vs1r.v\tv[0-9]+,0\(sp\)
** ...
/*
** spill_5:
-** csrr\tt0,vlenb
-** slli\tt1,t0,1
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,1
** sub\tsp,sp,t1
** ...
** vs2r.v\tv[0-9]+,0\(sp\)
/*
** spill_6:
-** csrr\tt0,vlenb
-** slli\tt1,t0,2
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,2
** sub\tsp,sp,t1
** ...
** vs4r.v\tv[0-9]+,0\(sp\)
/*
** spill_7:
-** csrr\tt0,vlenb
-** slli\tt1,t0,3
+** csrr\t[a-x0-9]+,vlenb
+** slli\tt1,[a-x0-9]+,3
** sub\tsp,sp,t1
** ...
** vs8r.v\tv[0-9]+,0\(sp\)