]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
i386: Promote {QI,HI}mode x86_mov<mode>cc_0_m1_neg to SImode
authorUros Bizjak <ubizjak@gmail.com>
Mon, 8 Jul 2024 18:47:52 +0000 (20:47 +0200)
committerUros Bizjak <ubizjak@gmail.com>
Mon, 8 Jul 2024 20:39:59 +0000 (22:39 +0200)
Promote HImode x86_mov<mode>cc_0_m1_neg insn to SImode to avoid
redundant prefixes. Also promote QImode insn when TARGET_PROMOTE_QImode
is set. This is similar to promotable_binary_operator splitter, where we
promote the result to SImode.

Also correct insn condition for splitters to SImode of NEG and NOT
instructions. The sizes of QImode and SImode instructions are always
the same, so there is no need for optimize_insn_for_size bypass.

gcc/ChangeLog:

* config/i386/i386.md (x86_mov<mode>cc_0_m1_neg splitter to SImode):
New splitter.
(NEG and NOT splitter to SImode): Remove optimize_insn_for_size_p
predicate from insn condition.

gcc/config/i386/i386.md

index b24c4fe58750549759407ed54a0b5e4dc139a311..214cb2e239ae1c815392c15e32920cf6333e1b66 100644 (file)
    (clobber (reg:CC FLAGS_REG))]
   "! TARGET_PARTIAL_REG_STALL && reload_completed
    && (GET_MODE (operands[0]) == HImode
-       || (GET_MODE (operands[0]) == QImode
-          && (TARGET_PROMOTE_QImode
-              || optimize_insn_for_size_p ())))"
+       || (GET_MODE (operands[0]) == QImode && TARGET_PROMOTE_QImode))"
   [(parallel [(set (match_dup 0)
                   (neg:SI (match_dup 1)))
              (clobber (reg:CC FLAGS_REG))])]
        (not (match_operand 1 "general_reg_operand")))]
   "! TARGET_PARTIAL_REG_STALL && reload_completed
    && (GET_MODE (operands[0]) == HImode
-       || (GET_MODE (operands[0]) == QImode
-          && (TARGET_PROMOTE_QImode
-              || optimize_insn_for_size_p ())))"
+       || (GET_MODE (operands[0]) == QImode && TARGET_PROMOTE_QImode))"
   [(set (match_dup 0)
        (not:SI (match_dup 1)))]
 {
   operands[0] = gen_lowpart (SImode, operands[0]);
   operands[1] = gen_lowpart (SImode, operands[1]);
 })
+
+(define_split
+  [(set (match_operand 0 "general_reg_operand")
+       (neg (match_operator 1 "ix86_carry_flag_operator"
+             [(reg FLAGS_REG) (const_int 0)])))
+   (clobber (reg:CC FLAGS_REG))]
+  "! TARGET_PARTIAL_REG_STALL && reload_completed
+   && (GET_MODE (operands[0]) == HImode
+       || (GET_MODE (operands[0]) == QImode && TARGET_PROMOTE_QImode))"
+  [(parallel [(set (match_dup 0)
+                  (neg:SI (match_dup 1)))
+             (clobber (reg:CC FLAGS_REG))])]
+{
+  operands[0] = gen_lowpart (SImode, operands[0]);
+  operands[1] = shallow_copy_rtx (operands[1]);
+  PUT_MODE (operands[1], SImode);
+})
 \f
 ;; RTL Peephole optimizations, run before sched2.  These primarily look to
 ;; transform a complex memory operation into two memory to register operations.