(define_insn "*rotrsi3_sext"
[(set (match_operand:DI 0 "register_operand" "=r")
(sign_extend:DI (rotatert:SI (match_operand:SI 1 "register_operand" "r")
- (match_operand:QI 2 "register_operand" "r"))))]
+ (match_operand:QI 2 "arith_operand" "rI"))))]
"TARGET_64BIT && (TARGET_ZBB || TARGET_ZBKB)"
- "rorw\t%0,%1,%2"
+ "ror%i2%~\t%0,%1,%2"
[(set_attr "type" "bitmanip")])
(define_insn "rotlsi3"
op1 = SUBREG_REG (op1);
}
- /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
- prefer left rotation, if op1 is from bitsize / 2 + 1 to
- bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
- amount instead. */
- if (rotate
- && CONST_INT_P (op1)
- && IN_RANGE (INTVAL (op1), GET_MODE_BITSIZE (scalar_mode) / 2 + left,
- GET_MODE_BITSIZE (scalar_mode) - 1))
+ /* Canonicalize rotates by constant amount. We may canonicalize
+ to reduce the immediate or if the ISA can rotate by constants
+ in only on direction. */
+ if (rotate && reverse_rotate_by_imm_p (scalar_mode, left, op1))
{
op1 = gen_int_shift_amount (mode, (GET_MODE_BITSIZE (scalar_mode)
- INTVAL (op1)));
unsigned HOST_WIDE_INT);
extern bool val_signbit_known_clear_p (machine_mode,
unsigned HOST_WIDE_INT);
+extern bool reverse_rotate_by_imm_p (machine_mode, unsigned int, rtx);
/* In reginfo.cc */
extern machine_mode choose_hard_reg_mode (unsigned int, unsigned int,
return NULL_RTX;
}
+/* Return TRUE if a rotate in mode MODE with a constant count in OP1
+ should be reversed.
+
+ If the rotate should not be reversed, return FALSE.
+
+ LEFT indicates if this is a rotate left or a rotate right. */
+
+bool
+reverse_rotate_by_imm_p (machine_mode mode, unsigned int left, rtx op1)
+{
+ if (!CONST_INT_P (op1))
+ return false;
+
+ /* Some targets may only be able to rotate by a constant
+ in one direction. So we need to query the optab interface
+ to see what is possible. */
+ optab binoptab = left ? rotl_optab : rotr_optab;
+ optab re_binoptab = left ? rotr_optab : rotl_optab;
+ enum insn_code icode = optab_handler (binoptab, mode);
+ enum insn_code re_icode = optab_handler (re_binoptab, mode);
+
+ /* If the target can not support the reversed optab, then there
+ is nothing to do. */
+ if (re_icode == CODE_FOR_nothing)
+ return false;
+
+ /* If the target does not support the requested rotate-by-immediate,
+ then we want to try reversing the rotate. We also want to try
+ reversing to minimize the count. */
+ if ((icode == CODE_FOR_nothing)
+ || (!insn_operand_matches (icode, 2, op1))
+ || (IN_RANGE (INTVAL (op1),
+ GET_MODE_UNIT_PRECISION (mode) / 2 + left,
+ GET_MODE_UNIT_PRECISION (mode) - 1)))
+ return (insn_operand_matches (re_icode, 2, op1));
+ return false;
+}
+
/* Subroutine of simplify_binary_operation. Simplify a binary operation
CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
case ROTATE:
if (trueop1 == CONST0_RTX (mode))
return op0;
- /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
- prefer left rotation, if op1 is from bitsize / 2 + 1 to
- bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
- amount instead. */
+ /* Canonicalize rotates by constant amount. If the condition of
+ reversing direction is met, then reverse the direction. */
#if defined(HAVE_rotate) && defined(HAVE_rotatert)
- if (CONST_INT_P (trueop1)
- && IN_RANGE (INTVAL (trueop1),
- GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
- GET_MODE_UNIT_PRECISION (mode) - 1))
+ if (reverse_rotate_by_imm_p (mode, (code == ROTATE), trueop1))
{
int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zbb -mabi=lp64d -fno-lto -O2" } */
+/* { dg-skip-if "" { *-*-* } { "-g" } } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+/*
+**foo1:
+** rori a0,a0,34
+** ret
+*/
+unsigned long foo1 (unsigned long rs1)
+{ return (rs1 >> (34)) | (rs1 << 30); }
+
+/*
+**foo2:
+** rori a0,a0,54
+** ret
+*/
+unsigned long foo2(unsigned long rs1)
+{
+ return (rs1 << 10) | (rs1 >> 54);
+}
+
+/*
+**foo3:
+** roriw a0,a0,20
+** ret
+*/
+unsigned int foo3(unsigned int rs1)
+{
+ return (rs1 >> 20) | (rs1 << 12);
+}
+
+/*
+**foo4:
+** roriw a0,a0,22
+** ret
+*/
+unsigned int foo4(unsigned int rs1)
+{
+ return (rs1 << 10) | (rs1 >> 22);
+}
+
+/*
+**foo5:
+** rorw a0,a0,a1
+** ret
+*/
+unsigned int foo5(unsigned int rs1, unsigned int rs2)
+{
+ return (rs1 >> rs2) | (rs1 << (32 - rs2));
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv32gc_zbb -mabi=ilp32 -fno-lto -O2" } */
+/* { dg-skip-if "" { *-*-* } { "-g" } } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+/*
+**foo1:
+** rori a0,a0,20
+** ret
+*/
+unsigned int foo1(unsigned int rs1)
+{
+ return (rs1 >> 20) | (rs1 << 12);
+}
+
+/*
+**foo2:
+** rori a0,a0,22
+** ret
+*/
+unsigned int foo2(unsigned int rs1)
+{
+ return (rs1 << 10) | (rs1 >> 22);
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zbb -mabi=lp64d -fno-lto -O2" } */
+/* { dg-skip-if "" { *-*-* } { "-g" } } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+/*
+**foo1:
+** roriw a0,a0,14
+** ret
+*/
+unsigned int foo1 (unsigned int rs1)
+{ return ((rs1 >> 14) | (rs1 << 18)); }
+
+/*
+**foo2:
+** roriw a0,a0,18
+** ret
+*/
+unsigned int foo2 (unsigned int rs1)
+{ return ((rs1 >> 18) | (rs1 << 14)); }
+
+/*
+**foo3:
+** roriw a0,a0,18
+** ret
+*/
+unsigned int foo3 (unsigned int rs1)
+{ return ((rs1 << 14) | (rs1 >> 18)); }
+
+/*
+**foo4:
+** roriw a0,a0,14
+** ret
+*/
+unsigned int foo4 (unsigned int rs1)
+{ return ((rs1 << 18) | (rs1 >> 14)); }
\ No newline at end of file
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gc_zbb -mabi=lp64d -fno-lto -O2" } */
+/* { dg-skip-if "" { *-*-* } { "-g" } } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+/*
+**foo1:
+** rori a0,a0,34
+** ret
+*/
+unsigned long foo1 (unsigned long rs1)
+{
+ unsigned long tempt;
+ tempt = rs1 >> 30;
+ tempt = tempt << 2;
+ tempt = tempt >> 6;
+ rs1 = tempt | (rs1 << 30);
+ return rs1 ;
+}
+
+/*
+**foo2:
+** rori a0,a0,24
+** ret
+*/
+unsigned long foo2 (unsigned long rs1)
+{
+ unsigned long tempt;
+ tempt = rs1 >> 20;
+ tempt = tempt << 2;
+ tempt = tempt >> 6;
+ rs1 = tempt | (rs1 << 40);
+ return rs1 ;
+}
+
+/*
+**foo3:
+** rori a0,a0,40
+** ret
+*/
+unsigned long foo3 (unsigned long rs1)
+{
+ unsigned long tempt;
+ tempt = rs1 << 20;
+ tempt = tempt >> 2;
+ tempt = tempt << 6;
+ rs1 = tempt | (rs1 >> 40);
+ return rs1 ;
+}
+
+/*
+**foo4:
+** rori a0,a0,20
+** ret
+*/
+unsigned long foo4 (unsigned long rs1)
+{
+ unsigned long tempt;
+ tempt = rs1 << 40;
+ tempt = tempt >> 2;
+ tempt = tempt << 6;
+ rs1 = tempt | (rs1 >> 20);
+ return rs1 ;
+}
\ No newline at end of file