UNSPEC_SBB
UNSPEC_CC_NE
UNSPEC_STC
+ UNSPEC_PUSHFL
+ UNSPEC_POPFL
;; For SSE/MMX support:
UNSPEC_FIX_NOTRUNC
;; For insn_callee_abi:
UNSPEC_CALLEE_ABI
+ ;; For PUSH2/POP2 support
+ UNSPEC_APXPUSH2
+ UNSPEC_APXPOP2_LOW
+ UNSPEC_APXPOP2_HIGH
])
(define_c_enum "unspecv" [
;; For PREFETCHI support
UNSPECV_PREFETCHI
+
+ ;; For USER_MSR support
+ UNSPECV_URDMSR
+ UNSPECV_UWRMSR
])
;; Constants to represent rounding modes in the ROUND instruction
(MASK5_REG 73)
(MASK6_REG 74)
(MASK7_REG 75)
- (FIRST_PSEUDO_REG 76)
+ (R16_REG 76)
+ (R17_REG 77)
+ (R18_REG 78)
+ (R19_REG 79)
+ (R20_REG 80)
+ (R21_REG 81)
+ (R22_REG 82)
+ (R23_REG 83)
+ (R24_REG 84)
+ (R25_REG 85)
+ (R26_REG 86)
+ (R27_REG 87)
+ (R28_REG 88)
+ (R29_REG 89)
+ (R30_REG 90)
+ (R31_REG 91)
+ (FIRST_PSEUDO_REG 92)
])
;; Insn callee abi index.
\f
;; Processor type.
(define_attr "cpu" "none,pentium,pentiumpro,geode,k6,athlon,k8,core2,nehalem,
- atom,slm,glm,haswell,generic,lujiazui,amdfam10,bdver1,
+ atom,slm,glm,haswell,generic,lujiazui,yongfeng,amdfam10,bdver1,
bdver2,bdver3,bdver4,btver2,znver1,znver2,znver3,znver4"
(const (symbol_ref "ix86_schedule")))
(define_attr "isa" "base,x64,nox64,x64_sse2,x64_sse4,x64_sse4_noavx,
x64_avx,x64_avx512bw,x64_avx512dq,aes,
sse_noavx,sse2,sse2_noavx,sse3,sse3_noavx,sse4,sse4_noavx,
- avx,noavx,avx2,noavx2,bmi,bmi2,fma4,fma,avx512f,noavx512f,
- avx512bw,noavx512bw,avx512dq,noavx512dq,fma_or_avx512vl,
- avx512vl,noavx512vl,avxvnni,avx512vnnivl,avx512fp16,avxifma,
- avx512ifmavl,avxneconvert,avx512bf16vl,vpclmulqdqvl"
+ avx,noavx,avx2,noavx2,bmi,bmi2,fma4,fma,avx512f,avx512f_512,
+ noavx512f,avx512bw,avx512bw_512,noavx512bw,avx512dq,
+ noavx512dq,fma_or_avx512vl,avx512vl,noavx512vl,avxvnni,
+ avx512vnnivl,avx512fp16,avxifma,avx512ifmavl,avxneconvert,
+ avx512bf16vl,vpclmulqdqvl,avx_noavx512f,avx_noavx512vl"
(const_string "base"))
;; The (bounding maximum) length of an instruction immediate.
;; Define attribute to indicate unaligned ssemov insns
(define_attr "movu" "0,1" (const_string "0"))
+;; Define attribute to limit memory address register set.
+(define_attr "addr" "gpr8,gpr16,gpr32" (const_string "gpr32"))
+
;; Define instruction set of MMX instructions
(define_attr "mmx_isa" "base,native,sse,sse_noavx,avx"
(const_string "base"))
(eq_attr "isa" "sse4_noavx")
(symbol_ref "TARGET_SSE4_1 && !TARGET_AVX")
(eq_attr "isa" "avx") (symbol_ref "TARGET_AVX")
+ (eq_attr "isa" "avx_noavx512f")
+ (symbol_ref "TARGET_AVX && !TARGET_AVX512F")
+ (eq_attr "isa" "avx_noavx512vl")
+ (symbol_ref "TARGET_AVX && !TARGET_AVX512VL")
(eq_attr "isa" "noavx") (symbol_ref "!TARGET_AVX")
(eq_attr "isa" "avx2") (symbol_ref "TARGET_AVX2")
(eq_attr "isa" "noavx2") (symbol_ref "!TARGET_AVX2")
(eq_attr "isa" "fma_or_avx512vl")
(symbol_ref "TARGET_FMA || TARGET_AVX512VL")
(eq_attr "isa" "avx512f") (symbol_ref "TARGET_AVX512F")
+ (eq_attr "isa" "avx512f_512")
+ (symbol_ref "TARGET_AVX512F && TARGET_EVEX512")
(eq_attr "isa" "noavx512f") (symbol_ref "!TARGET_AVX512F")
(eq_attr "isa" "avx512bw") (symbol_ref "TARGET_AVX512BW")
+ (eq_attr "isa" "avx512bw_512")
+ (symbol_ref "TARGET_AVX512BW && TARGET_EVEX512")
(eq_attr "isa" "noavx512bw") (symbol_ref "!TARGET_AVX512BW")
(eq_attr "isa" "avx512dq") (symbol_ref "TARGET_AVX512DQ")
(eq_attr "isa" "noavx512dq") (symbol_ref "!TARGET_AVX512DQ")
(define_code_iterator any_shift [ashift lshiftrt ashiftrt])
;; Base name for insn mnemonic.
-(define_code_attr shift [(ashift "sll") (lshiftrt "shr") (ashiftrt "sar")])
+(define_code_attr shift [(ashift "sal") (lshiftrt "shr") (ashiftrt "sar")])
(define_code_attr vshift [(ashift "sll") (lshiftrt "srl") (ashiftrt "sra")])
;; Mapping of rotate operators
(include "core2.md")
(include "haswell.md")
(include "lujiazui.md")
+(include "yongfeng.md")
\f
;; Operand and operator predicates and constraints
DONE;
})
+(define_expand "cbranchxi4"
+ [(set (reg:CC FLAGS_REG)
+ (compare:CC (match_operand:XI 1 "nonimmediate_operand")
+ (match_operand:XI 2 "nonimmediate_operand")))
+ (set (pc) (if_then_else
+ (match_operator 0 "bt_comparison_operator"
+ [(reg:CC FLAGS_REG) (const_int 0)])
+ (label_ref (match_operand 3))
+ (pc)))]
+ "TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256"
+{
+ ix86_expand_branch (GET_CODE (operands[0]),
+ operands[1], operands[2], operands[3]);
+ DONE;
+})
+
(define_expand "cstore<mode>4"
[(set (reg:CC FLAGS_REG)
(compare:CC (match_operand:SDWIM 2 "nonimmediate_operand")
(define_mode_iterator SWI1248_AVX512BWDQ_64
[(QI "TARGET_AVX512DQ") HI
- (SI "TARGET_AVX512BW") (DI "TARGET_AVX512BW && TARGET_64BIT")])
+ (SI "TARGET_AVX512BW")
+ (DI "TARGET_AVX512BW && TARGET_EVEX512 && TARGET_64BIT")])
(define_insn "*cmp<mode>_ccz_1"
[(set (reg FLAGS_REG)
[(set_attr "type" "icmp")
(set_attr "mode" "<MODE>")])
-(define_insn "*cmpqi_ext<mode>_1_mem_rex64"
- [(set (reg FLAGS_REG)
- (compare
- (match_operand:QI 0 "norex_memory_operand" "Bn")
- (subreg:QI
- (match_operator:SWI248 2 "extract_operator"
- [(match_operand 1 "int248_register_operand" "Q")
- (const_int 8)
- (const_int 8)]) 0)))]
- "TARGET_64BIT && reload_completed
- && ix86_match_ccmode (insn, CCmode)"
- "cmp{b}\t{%h1, %0|%0, %h1}"
- [(set_attr "type" "icmp")
- (set_attr "mode" "QI")])
-
(define_insn "*cmpqi_ext<mode>_1"
[(set (reg FLAGS_REG)
(compare
- (match_operand:QI 0 "nonimmediate_operand" "QBc,m")
+ (match_operand:QI 0 "nonimmediate_operand" "QBn")
(subreg:QI
(match_operator:SWI248 2 "extract_operator"
- [(match_operand 1 "int248_register_operand" "Q,Q")
+ [(match_operand 1 "int248_register_operand" "Q")
(const_int 8)
(const_int 8)]) 0)))]
"ix86_match_ccmode (insn, CCmode)"
"cmp{b}\t{%h1, %0|%0, %h1}"
- [(set_attr "isa" "*,nox64")
+ [(set_attr "addr" "gpr8")
(set_attr "type" "icmp")
(set_attr "mode" "QI")])
-(define_peephole2
- [(set (match_operand:QI 0 "register_operand")
- (match_operand:QI 1 "norex_memory_operand"))
- (set (match_operand 3 "flags_reg_operand")
- (match_operator 4 "compare_operator"
- [(match_dup 0)
- (subreg:QI
- (match_operator:SWI248 5 "extract_operator"
- [(match_operand 2 "int248_register_operand")
- (const_int 8)
- (const_int 8)]) 0)]))]
- "TARGET_64BIT
- && peep2_reg_dead_p (2, operands[0])"
- [(set (match_dup 3)
- (match_op_dup 4
- [(match_dup 1)
- (subreg:QI
- (match_op_dup 5
- [(match_dup 2)
- (const_int 8)
- (const_int 8)]) 0)]))])
-
(define_insn "*cmpqi_ext<mode>_2"
[(set (reg FLAGS_REG)
(compare
(const_int 8)) 0)
(match_operand:QI 1 "const_int_operand")))])
-(define_insn "*cmpqi_ext<mode>_3_mem_rex64"
- [(set (reg FLAGS_REG)
- (compare
- (subreg:QI
- (match_operator:SWI248 2 "extract_operator"
- [(match_operand 0 "int248_register_operand" "Q")
- (const_int 8)
- (const_int 8)]) 0)
- (match_operand:QI 1 "norex_memory_operand" "Bn")))]
- "TARGET_64BIT && reload_completed
- && ix86_match_ccmode (insn, CCmode)"
- "cmp{b}\t{%1, %h0|%h0, %1}"
- [(set_attr "type" "icmp")
- (set_attr "mode" "QI")])
-
(define_insn "*cmpqi_ext<mode>_3"
[(set (reg FLAGS_REG)
(compare
(subreg:QI
(match_operator:SWI248 2 "extract_operator"
- [(match_operand 0 "int248_register_operand" "Q,Q")
+ [(match_operand 0 "int248_register_operand" "Q")
(const_int 8)
(const_int 8)]) 0)
- (match_operand:QI 1 "general_operand" "QnBc,m")))]
+ (match_operand:QI 1 "general_operand" "QnBn")))]
"ix86_match_ccmode (insn, CCmode)"
"cmp{b}\t{%1, %h0|%h0, %1}"
- [(set_attr "isa" "*,nox64")
+ [(set_attr "addr" "gpr8")
(set_attr "type" "icmp")
(set_attr "mode" "QI")])
-(define_peephole2
- [(set (match_operand:QI 0 "register_operand")
- (match_operand:QI 1 "norex_memory_operand"))
- (set (match_operand 3 "flags_reg_operand")
- (match_operator 4 "compare_operator"
- [(subreg:QI
- (match_operator:SWI248 5 "extract_operator"
- [(match_operand 2 "int248_register_operand")
- (const_int 8)
- (const_int 8)]) 0)
- (match_dup 0)]))]
- "TARGET_64BIT
- && peep2_reg_dead_p (2, operands[0])"
- [(set (match_dup 3)
- (match_op_dup 4
- [(subreg:QI
- (match_op_dup 5
- [(match_dup 2)
- (const_int 8)
- (const_int 8)]) 0)
- (match_dup 1)]))])
-
(define_insn "*cmpqi_ext<mode>_4"
[(set (reg FLAGS_REG)
(compare
[(set_attr "type" "pop")
(set_attr "mode" "<MODE>")])
-(define_insn "*pushfl<mode>2"
+(define_insn "@pushfl<mode>2"
[(set (match_operand:W 0 "push_operand" "=<")
- (match_operand:W 1 "flags_reg_operand"))]
+ (unspec:W [(match_operand:CC 1 "flags_reg_operand")]
+ UNSPEC_PUSHFL))]
""
"pushf{<imodesuffix>}"
[(set_attr "type" "push")
(set_attr "mode" "<MODE>")])
-(define_insn "*popfl<mode>1"
- [(set (match_operand:W 0 "flags_reg_operand")
- (match_operand:W 1 "pop_operand" ">"))]
+(define_insn "@popfl<mode>1"
+ [(set (match_operand:CC 0 "flags_reg_operand")
+ (unspec:CC [(match_operand:W 1 "pop_operand" ">")]
+ UNSPEC_POPFL))]
""
"popf{<imodesuffix>}"
[(set_attr "type" "pop")
(define_expand "movxi"
[(set (match_operand:XI 0 "nonimmediate_operand")
(match_operand:XI 1 "general_operand"))]
- "TARGET_AVX512F"
+ "TARGET_AVX512F && TARGET_EVEX512"
"ix86_expand_vector_move (XImode, operands); DONE;")
(define_expand "movoi"
(define_insn "*movxi_internal_avx512f"
[(set (match_operand:XI 0 "nonimmediate_operand" "=v,v ,v ,m")
(match_operand:XI 1 "nonimmediate_or_sse_const_operand" " C,BC,vm,v"))]
- "TARGET_AVX512F
+ "TARGET_AVX512F && TARGET_EVEX512
&& (register_operand (operands[0], XImode)
|| register_operand (operands[1], XImode))"
{
(set_attr "mode" "OI")])
(define_insn "*movti_internal"
- [(set (match_operand:TI 0 "nonimmediate_operand" "=!r ,o ,v,v ,v ,m,?r,?Yd")
- (match_operand:TI 1 "general_operand" "riFo,re,C,BC,vm,v,Yd,r"))]
+ [(set (match_operand:TI 0 "nonimmediate_operand" "=!r ,o ,v,v ,v ,m,?jc,?Yd")
+ (match_operand:TI 1 "general_operand" "riFo,re,C,BC,vm,v,Yd,jc"))]
"(TARGET_64BIT
&& !(MEM_P (operands[0]) && MEM_P (operands[1])))
|| (TARGET_SSE
(define_insn "*movdi_internal"
[(set (match_operand:DI 0 "nonimmediate_operand"
- "=r ,o ,r,r ,r,m ,*y,*y,?*y,?m,?r,?*y,?v,?v,?v,m ,m,?r ,?*Yd,?r,?v,?*y,?*x,*k,*k ,*r,*m,*k")
+ "=r ,o ,r,r ,r,m ,*y,*y,?*y,?m,?r,?*y,?Yv,?v,?v,m ,m,?jc,?*Yd,?r,?v,?*y,?*x,*k,*k ,*r,*m,*k")
(match_operand:DI 1 "general_operand"
- "riFo,riF,Z,rem,i,re,C ,*y,Bk ,*y,*y,r ,C ,?v,Bk,?v,v,*Yd,r ,?v,r ,*x ,*y ,*r,*kBk,*k,*k,CBC"))]
+ "riFo,riF,Z,rem,i,re,C ,*y,Bk ,*y,*y,r ,C ,?v,Bk,?v,v,*Yd,jc ,?v,r ,*x ,*y ,*r,*kBk,*k,*k,CBC"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& ix86_hardreg_mov_ok (operands[0], operands[1])"
{
(set (attr "mode")
(cond [(eq_attr "alternative" "2")
(const_string "SI")
- (eq_attr "alternative" "12,13")
+ (eq_attr "alternative" "12")
(cond [(match_test "TARGET_AVX")
(const_string "TI")
(ior (not (match_test "TARGET_SSE2"))
(const_string "V4SF")
]
(const_string "TI"))
+ (eq_attr "alternative" "13")
+ (cond [(match_test "TARGET_AVX512VL")
+ (const_string "TI")
+ (match_test "TARGET_AVX512F")
+ (const_string "DF")
+ (match_test "TARGET_AVX")
+ (const_string "TI")
+ (ior (not (match_test "TARGET_SSE2"))
+ (match_test "optimize_function_for_size_p (cfun)"))
+ (const_string "V4SF")
+ ]
+ (const_string "TI"))
(and (eq_attr "alternative" "14,15,16")
(not (match_test "TARGET_SSE2")))
(define_insn "*movsi_internal"
[(set (match_operand:SI 0 "nonimmediate_operand"
- "=r,m ,*y,*y,?*y,?m,?r,?*y,?v,?v,?v,m ,?r,?v,*k,*k ,*rm,*k")
+ "=r,m ,*y,*y,?*y,?m,?r,?*y,?Yv,?v,?v,m ,?r,?v,*k,*k ,*rm,*k")
(match_operand:SI 1 "general_operand"
- "g ,re,C ,*y,Bk ,*y,*y,r ,C ,?v,Bk,?v,?v,r ,*r,*kBk,*k ,CBC"))]
+ "g ,re,C ,*y,Bk ,*y,*y,r ,C ,?v,Bk,?v,?v,r ,*r,*kBk,*k ,CBC"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& ix86_hardreg_mov_ok (operands[0], operands[1])"
{
(set (attr "mode")
(cond [(eq_attr "alternative" "2,3")
(const_string "DI")
- (eq_attr "alternative" "8,9")
+ (eq_attr "alternative" "8")
(cond [(match_test "TARGET_AVX")
(const_string "TI")
(ior (not (match_test "TARGET_SSE2"))
(const_string "V4SF")
]
(const_string "TI"))
+ (eq_attr "alternative" "9")
+ (cond [(match_test "TARGET_AVX512VL")
+ (const_string "TI")
+ (match_test "TARGET_AVX512F")
+ (const_string "SF")
+ (match_test "TARGET_AVX")
+ (const_string "TI")
+ (ior (not (match_test "TARGET_SSE2"))
+ (match_test "optimize_function_for_size_p (cfun)"))
+ (const_string "V4SF")
+ ]
+ (const_string "TI"))
(and (eq_attr "alternative" "10,11")
(not (match_test "TARGET_SSE2")))
(define_insn "*movhi_internal"
[(set (match_operand:HI 0 "nonimmediate_operand"
- "=r,r,r,m ,*k,*k ,r ,m ,*k ,?r,?*v,*v,*v,*v,m")
+ "=r,r,r,m ,*k,*k ,r ,m ,*k ,?r,?*v,*Yv,*v,*v,jm,m")
(match_operand:HI 1 "general_operand"
- "r ,n,m,rn,r ,*km,*k,*k,CBC,*v,r ,C ,*v,m ,*v"))]
+ "r ,n,m,rn,r ,*km,*k,*k,CBC,*v,r ,C ,*v,m ,*x,*v"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& ix86_hardreg_mov_ok (operands[0], operands[1])"
{
(cond [(eq_attr "alternative" "9,10,11,12,13")
(const_string "sse2")
(eq_attr "alternative" "14")
- (const_string "sse4")
+ (const_string "sse4_noavx")
+ (eq_attr "alternative" "15")
+ (const_string "avx")
]
(const_string "*")))
+ (set (attr "addr")
+ (if_then_else (eq_attr "alternative" "14")
+ (const_string "gpr16")
+ (const_string "*")))
(set (attr "type")
(cond [(eq_attr "alternative" "4,5,6,7")
(const_string "mskmov")
(eq_attr "alternative" "8")
(const_string "msklog")
- (eq_attr "alternative" "13,14")
+ (eq_attr "alternative" "13,14,15")
(if_then_else (match_test "TARGET_AVX512FP16")
(const_string "ssemov")
(const_string "sselog1"))
(set (attr "prefix")
(cond [(eq_attr "alternative" "4,5,6,7,8")
(const_string "vex")
- (eq_attr "alternative" "9,10,11,12,13,14")
+ (eq_attr "alternative" "9,10,11,12,13,14,15")
(const_string "maybe_evex")
]
(const_string "orig")))
(if_then_else (match_test "TARGET_AVX512FP16")
(const_string "HI")
(const_string "SI"))
- (eq_attr "alternative" "13,14")
+ (eq_attr "alternative" "13,14,15")
(if_then_else (match_test "TARGET_AVX512FP16")
(const_string "HI")
(const_string "TI"))
[(set_attr "type" "imovx")
(set_attr "mode" "SI")])
+;; Split sign-extension of single least significant bit as and x,$1;neg x
+(define_insn_and_split "*extv<mode>_1_0"
+ [(set (match_operand:SWI48 0 "register_operand" "=r")
+ (sign_extract:SWI48 (match_operand:SWI48 1 "register_operand" "0")
+ (const_int 1)
+ (const_int 0)))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "#"
+ ""
+ [(parallel [(set (match_dup 0) (and:SWI48 (match_dup 1) (const_int 1)))
+ (clobber (reg:CC FLAGS_REG))])
+ (parallel [(set (match_dup 0) (neg:SWI48 (match_dup 0)))
+ (clobber (reg:CC FLAGS_REG))])])
+
(define_expand "extzv<mode>"
[(set (match_operand:SWI248 0 "register_operand")
(zero_extract:SWI248 (match_operand:SWI248 1 "register_operand")
[(set_attr "type" "imovx")
(set_attr "mode" "SI")])
-(define_insn "*extzvqi_mem_rex64"
- [(set (match_operand:QI 0 "norex_memory_operand" "=Bn")
- (subreg:QI
- (match_operator:SWI248 2 "extract_operator"
- [(match_operand 1 "int248_register_operand" "Q")
- (const_int 8)
- (const_int 8)]) 0))]
- "TARGET_64BIT && reload_completed"
- "mov{b}\t{%h1, %0|%0, %h1}"
- [(set_attr "type" "imov")
- (set_attr "mode" "QI")])
-
(define_insn "*extzvqi"
- [(set (match_operand:QI 0 "nonimmediate_operand" "=QBc,?R,m")
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=QBn,?R")
(subreg:QI
(match_operator:SWI248 2 "extract_operator"
- [(match_operand 1 "int248_register_operand" "Q,Q,Q")
+ [(match_operand 1 "int248_register_operand" "Q,Q")
(const_int 8)
(const_int 8)]) 0))]
""
return "mov{b}\t{%h1, %0|%0, %h1}";
}
}
- [(set_attr "isa" "*,*,nox64")
+ [(set_attr "addr" "gpr8,*")
(set (attr "type")
(if_then_else (and (match_operand:QI 0 "register_operand")
(ior (not (match_operand:QI 0 "QIreg_operand"))
(const_string "SI")
(const_string "QI")))])
-(define_peephole2
- [(set (match_operand:QI 0 "register_operand")
- (subreg:QI
- (match_operator:SWI248 3 "extract_operator"
- [(match_operand 1 "int248_register_operand")
- (const_int 8)
- (const_int 8)]) 0))
- (set (match_operand:QI 2 "norex_memory_operand") (match_dup 0))]
- "TARGET_64BIT
- && peep2_reg_dead_p (2, operands[0])"
- [(set (match_dup 2)
- (subreg:QI
- (match_op_dup 3
- [(match_dup 1)
- (const_int 8)
- (const_int 8)]) 0))])
-
(define_expand "insv<mode>"
[(set (zero_extract:SWI248 (match_operand:SWI248 0 "register_operand")
(match_operand:QI 1 "const_int_operand")
DONE;
})
-(define_insn "*insvqi_1_mem_rex64"
- [(set (zero_extract:SWI248
- (match_operand 0 "int248_register_operand" "+Q")
- (const_int 8)
- (const_int 8))
- (subreg:SWI248
- (match_operand:QI 1 "norex_memory_operand" "Bn") 0))]
- "TARGET_64BIT && reload_completed"
- "mov{b}\t{%1, %h0|%h0, %1}"
- [(set_attr "type" "imov")
- (set_attr "mode" "QI")])
-
(define_insn "@insv<mode>_1"
[(set (zero_extract:SWI248
- (match_operand 0 "int248_register_operand" "+Q,Q")
+ (match_operand 0 "int248_register_operand" "+Q")
(const_int 8)
(const_int 8))
- (match_operand:SWI248 1 "general_operand" "QnBc,m"))]
+ (match_operand:SWI248 1 "general_operand" "QnBn"))]
""
{
if (CONST_INT_P (operands[1]))
operands[1] = gen_int_mode (INTVAL (operands[1]), QImode);
return "mov{b}\t{%b1, %h0|%h0, %b1}";
}
- [(set_attr "isa" "*,nox64")
+ [(set_attr "addr" "gpr8")
(set_attr "type" "imov")
(set_attr "mode" "QI")])
(define_insn "*insvqi_1"
[(set (zero_extract:SWI248
- (match_operand 0 "int248_register_operand" "+Q,Q")
+ (match_operand 0 "int248_register_operand" "+Q")
(const_int 8)
(const_int 8))
(subreg:SWI248
- (match_operand:QI 1 "general_operand" "QnBc,m") 0))]
+ (match_operand:QI 1 "general_operand" "QnBn") 0))]
""
"mov{b}\t{%1, %h0|%h0, %1}"
- [(set_attr "isa" "*,nox64")
+ [(set_attr "addr" "gpr8")
(set_attr "type" "imov")
(set_attr "mode" "QI")])
-(define_peephole2
- [(set (match_operand:QI 0 "register_operand")
- (match_operand:QI 1 "norex_memory_operand"))
- (set (zero_extract:SWI248 (match_operand 2 "int248_register_operand")
- (const_int 8)
- (const_int 8))
- (subreg:SWI248 (match_dup 0) 0))]
- "TARGET_64BIT
- && peep2_reg_dead_p (2, operands[0])"
- [(set (zero_extract:SWI248 (match_dup 2)
- (const_int 8)
- (const_int 8))
- (subreg:SWI248 (match_dup 1) 0))])
-
;; Eliminate redundant insv, e.g. xorl %eax,%eax; movb $0, %ah
(define_peephole2
[(parallel [(set (match_operand:SWI48 0 "general_reg_operand")
(set_attr "type" "push,multi")
(set_attr "mode" "SI,TI")])
+(define_insn "push2_di"
+ [(set (match_operand:TI 0 "push_operand" "=<")
+ (unspec:TI [(match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "register_operand" "r")]
+ UNSPEC_APXPUSH2))]
+ "TARGET_APX_PUSH2POP2"
+ "push2\t%1, %2"
+ [(set_attr "mode" "TI")
+ (set_attr "type" "multi")
+ (set_attr "prefix" "evex")])
+
+(define_insn "pop2_di"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(match_operand:TI 1 "pop_operand" ">")]
+ UNSPEC_APXPOP2_LOW))
+ (set (match_operand:DI 2 "register_operand" "=r")
+ (unspec:DI [(const_int 0)] UNSPEC_APXPOP2_HIGH))])]
+ "TARGET_APX_PUSH2POP2"
+ "pop2\t%0, %2"
+ [(set_attr "mode" "TI")
+ (set_attr "prefix" "evex")])
+
(define_insn "*pushsf_rex64"
[(set (match_operand:SF 0 "push_operand" "=X,X,X")
(match_operand:SF 1 "nonmemory_no_elim_operand" "f,rF,v"))]
;; Possible store forwarding (partial memory) stall in alternatives 4, 6 and 7.
(define_insn "*movdf_internal"
[(set (match_operand:DF 0 "nonimmediate_operand"
- "=Yf*f,m ,Yf*f,?r ,!o,?*r ,!o,!o,?r,?m,?r,?r,v,v,v,m,*x,*x,*x,m ,?r,?v,r ,o ,r ,m")
+ "=Yf*f,m ,Yf*f,?r ,!o,?*r ,!o,!o,?r,?m,?r,?r,Yv,v,v,m,*x,*x,*x,m ,?r,?v,r ,o ,r ,m")
(match_operand:DF 1 "general_operand"
- "Yf*fm,Yf*f,G ,roF,r ,*roF,*r,F ,rm,rC,C ,F ,C,v,m,v,C ,*x,m ,*x, v, r,roF,rF,rmF,rC"))]
+ "Yf*fm,Yf*f,G ,roF,r ,*roF,*r,F ,rm,rC,C ,F ,C ,v,m,v,C ,*x,m ,*x, v, r,roF,rF,rmF,rC"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& (lra_in_progress || reload_completed
|| !CONST_DOUBLE_P (operands[1])
(define_insn "*movsf_internal"
[(set (match_operand:SF 0 "nonimmediate_operand"
- "=Yf*f,m ,Yf*f,?r ,?m,v,v,v,m,?r,?v,!*y,!*y,!m,!r,!*y,r ,m")
+ "=Yf*f,m ,Yf*f,?r ,?m,Yv,v,v,m,?r,?v,!*y,!*y,!m,!r,!*y,r ,m")
(match_operand:SF 1 "general_operand"
- "Yf*fm,Yf*f,G ,rmF,rF,C,v,m,v,v ,r ,*y ,m ,*y,*y,r ,rmF,rF"))]
+ "Yf*fm,Yf*f,G ,rmF,rF,C ,v,m,v,v ,r ,*y ,m ,*y,*y,r ,rmF,rF"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& (lra_in_progress || reload_completed
|| !CONST_DOUBLE_P (operands[1])
(eq_attr "alternative" "11")
(const_string "DI")
(eq_attr "alternative" "5")
- (cond [(and (match_test "TARGET_AVX512F")
+ (cond [(and (match_test "TARGET_AVX512F && TARGET_EVEX512")
(not (match_test "TARGET_PREFER_AVX256")))
(const_string "V16SF")
(match_test "TARGET_AVX")
better to maintain the whole registers in single format
to avoid problems on using packed logical operations. */
(eq_attr "alternative" "6")
- (cond [(ior (match_test "TARGET_SSE_PARTIAL_REG_DEPENDENCY")
+ (cond [(match_test "TARGET_AVX512VL")
+ (const_string "V4SF")
+ (match_test "TARGET_AVX512F")
+ (const_string "SF")
+ (ior (match_test "TARGET_SSE_PARTIAL_REG_DEPENDENCY")
(match_test "TARGET_SSE_SPLIT_REGS"))
(const_string "V4SF")
]
(define_insn "*mov<mode>_internal"
[(set (match_operand:HFBF 0 "nonimmediate_operand"
- "=?r,?r,?r,?m,v,v,?r,m,?v,v")
+ "=?r,?r,?r,?m ,Yv,v,?r,jm,m,?v,v")
(match_operand:HFBF 1 "general_operand"
- "r ,F ,m ,r<hfbfconstf>,C,v, v,v,r ,m"))]
+ "r ,F ,m ,r<hfbfconstf>,C ,v, v,v ,v,r ,m"))]
"!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& (lra_in_progress
|| reload_completed
}
}
[(set (attr "isa")
- (cond [(eq_attr "alternative" "4,5,6,8,9")
+ (cond [(eq_attr "alternative" "4,5,6,9,10")
(const_string "sse2")
(eq_attr "alternative" "7")
- (const_string "sse4")
+ (const_string "sse4_noavx")
+ (eq_attr "alternative" "8")
+ (const_string "avx")
]
(const_string "*")))
+ (set (attr "addr")
+ (if_then_else (eq_attr "alternative" "7")
+ (const_string "gpr16")
+ (const_string "*")))
(set (attr "type")
(cond [(eq_attr "alternative" "4")
(const_string "sselog1")
- (eq_attr "alternative" "5,6,8")
+ (eq_attr "alternative" "5,6,9")
(const_string "ssemov")
- (eq_attr "alternative" "7,9")
+ (eq_attr "alternative" "7,8,10")
(if_then_else
(match_test ("TARGET_AVX512FP16"))
(const_string "ssemov")
]
(const_string "imov")))
(set (attr "prefix")
- (cond [(eq_attr "alternative" "4,5,6,7,8,9")
+ (cond [(eq_attr "alternative" "4,5,6,7,8,9,10")
(const_string "maybe_vex")
]
(const_string "orig")))
(set (attr "mode")
(cond [(eq_attr "alternative" "4")
(const_string "V4SF")
- (eq_attr "alternative" "6,8")
+ (eq_attr "alternative" "6,9")
(if_then_else
(match_test "TARGET_AVX512FP16")
(const_string "HI")
(const_string "SI"))
- (eq_attr "alternative" "7,9")
+ (eq_attr "alternative" "7,8,10")
(if_then_else
(match_test "TARGET_AVX512FP16")
(const_string "HI")
(eq_attr "alternative" "12")
(const_string "x64_avx512bw")
(eq_attr "alternative" "13")
- (const_string "avx512bw")
+ (const_string "avx512bw_512")
]
(const_string "*")))
(set (attr "mmx_isa")
"split_double_mode (DImode, &operands[0], 1, &operands[3], &operands[4]);")
(define_mode_attr kmov_isa
- [(QI "avx512dq") (HI "avx512f") (SI "avx512bw") (DI "avx512bw")])
+ [(QI "avx512dq") (HI "avx512f") (SI "avx512bw") (DI "avx512bw_512")])
(define_insn "zero_extend<mode>di2"
[(set (match_operand:DI 0 "register_operand" "=r,*r,*k")
&& optimize_insn_for_speed_p ()
&& reload_completed
&& (!EXT_REX_SSE_REG_P (operands[0])
- || TARGET_AVX512VL)"
+ || TARGET_AVX512VL || TARGET_EVEX512)"
[(set (match_dup 2)
(float_extend:V2DF
(vec_select:V2SF
(set_attr "memory" "none")
(set (attr "enabled")
(if_then_else (eq_attr "alternative" "2")
- (symbol_ref "TARGET_AVX512F && !TARGET_AVX512VL
- && !TARGET_PREFER_AVX256")
+ (symbol_ref "TARGET_AVX512F && TARGET_EVEX512
+ && !TARGET_AVX512VL && !TARGET_PREFER_AVX256")
(const_string "*")))])
(define_expand "extend<mode>xf2"
gcc_assert (TARGET_64BIT);
return "lea{l}\t{%E1, %k0|%k0, %E1}";
}
- else
+ else
return "lea{<imodesuffix>}\t{%E1, %0|%0, %E1}";
}
[(set_attr "type" "lea")
return "add{<imodesuffix>}\t{%2, %0|%0, %2}";
}
}
- "&& reload_completed"
+ "&& reload_completed
+ && !(rtx_equal_p (operands[0], operands[1])
+ || rtx_equal_p (operands[0], operands[2]))"
[(set (strict_low_part (match_dup 0)) (match_dup 1))
(parallel
[(set (strict_low_part (match_dup 0))
(const_string "alu")))
(set_attr "mode" "<MODE>")])
+;; Alternative 1 is needed to work around LRA limitation, see PR82524.
+(define_insn_and_split "*addqi_ext<mode>_1_slp"
+ [(set (strict_low_part (match_operand:QI 0 "register_operand" "+Q,&Q"))
+ (plus:QI
+ (subreg:QI
+ (match_operator:SWI248 3 "extract_operator"
+ [(match_operand 2 "int248_register_operand" "Q,Q")
+ (const_int 8)
+ (const_int 8)]) 0)
+ (match_operand:QI 1 "nonimmediate_operand" "0,!qm")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_PARTIAL_REG_STALL || optimize_function_for_size_p (cfun)"
+ "@
+ add{b}\t{%h2, %0|%0, %h2}
+ #"
+ "&& reload_completed
+ && !rtx_equal_p (operands[0], operands[1])"
+ [(set (strict_low_part (match_dup 0)) (match_dup 1))
+ (parallel
+ [(set (strict_low_part (match_dup 0))
+ (plus:QI
+ (subreg:QI
+ (match_op_dup 3
+ [(match_dup 2) (const_int 8) (const_int 8)]) 0)
+ (match_dup 0)))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI")])
+
;; Split non destructive adds if we cannot use lea.
(define_split
[(set (match_operand:SWI48 0 "register_operand")
(set_attr "mode" "<MODE>")])
(define_insn "*addqi_ext<mode>_0"
- [(set (match_operand:QI 0 "nonimm_x64constmem_operand" "=QBc,m")
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=QBn")
(plus:QI
(subreg:QI
(match_operator:SWI248 3 "extract_operator"
- [(match_operand 2 "int248_register_operand" "Q,Q")
+ [(match_operand 2 "int248_register_operand" "Q")
(const_int 8)
(const_int 8)]) 0)
- (match_operand:QI 1 "nonimm_x64constmem_operand" "0,0")))
+ (match_operand:QI 1 "nonimmediate_operand" "0")))
(clobber (reg:CC FLAGS_REG))]
""
"add{b}\t{%h2, %0|%0, %h2}"
- [(set_attr "isa" "*,nox64")
+ [(set_attr "addr" "gpr8")
(set_attr "type" "alu")
(set_attr "mode" "QI")])
(match_operand:QI 2 "const_int_operand")) 0))
(clobber (reg:CC FLAGS_REG))])])
-(define_insn "*addqi_ext<mode>_1"
+;; Alternative 1 is needed to work around LRA limitation, see PR82524.
+(define_insn_and_split "*addqi_ext<mode>_1"
[(set (zero_extract:SWI248
- (match_operand 0 "int248_register_operand" "+Q,Q")
+ (match_operand 0 "int248_register_operand" "+Q,&Q")
(const_int 8)
(const_int 8))
(subreg:SWI248
(plus:QI
(subreg:QI
(match_operator:SWI248 3 "extract_operator"
- [(match_operand 1 "int248_register_operand" "0,0")
+ [(match_operand 1 "int248_register_operand" "0,!Q")
(const_int 8)
(const_int 8)]) 0)
- (match_operand:QI 2 "general_x64constmem_operand" "QnBc,m")) 0))
+ (match_operand:QI 2 "general_operand" "QnBn,QnBn")) 0))
(clobber (reg:CC FLAGS_REG))]
- "/* FIXME: without this LRA can't reload this pattern, see PR82524. */
- rtx_equal_p (operands[0], operands[1])"
+ ""
{
+ if (which_alternative)
+ return "#";
+
switch (get_attr_type (insn))
{
case TYPE_INCDEC:
if (operands[2] == const1_rtx)
return "inc{b}\t%h0";
else
- {
+ {
gcc_assert (operands[2] == constm1_rtx);
- return "dec{b}\t%h0";
- }
+ return "dec{b}\t%h0";
+ }
default:
return "add{b}\t{%2, %h0|%h0, %2}";
}
}
- [(set_attr "isa" "*,nox64")
+ "reload_completed
+ && !rtx_equal_p (operands[0], operands[1])"
+ [(set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (zero_extract:SWI248
+ (match_dup 1) (const_int 8) (const_int 8)))
+ (parallel
+ [(set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (subreg:SWI248
+ (plus:QI
+ (subreg:QI
+ (match_op_dup 3
+ [(match_dup 0) (const_int 8) (const_int 8)]) 0)
+ (match_dup 2)) 0))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ [(set_attr "addr" "gpr8")
(set (attr "type")
(if_then_else (match_operand:QI 2 "incdec_operand")
(const_string "incdec")
(const_string "alu")))
(set_attr "mode" "QI")])
-(define_insn "*addqi_ext<mode>_2"
+;; Alternative 1 is needed to work around LRA limitation, see PR82524.
+(define_insn_and_split "*<insn>qi_ext<mode>_2"
[(set (zero_extract:SWI248
- (match_operand 0 "int248_register_operand" "+Q")
+ (match_operand 0 "int248_register_operand" "+Q,&Q")
(const_int 8)
(const_int 8))
(subreg:SWI248
- (plus:QI
+ (plusminus:QI
(subreg:QI
(match_operator:SWI248 3 "extract_operator"
- [(match_operand 1 "int248_register_operand" "%0")
+ [(match_operand 1 "int248_register_operand" "<comm>0,!Q")
(const_int 8)
(const_int 8)]) 0)
(subreg:QI
(match_operator:SWI248 4 "extract_operator"
- [(match_operand 2 "int248_register_operand" "Q")
+ [(match_operand 2 "int248_register_operand" "Q,Q")
(const_int 8)
(const_int 8)]) 0)) 0))
- (clobber (reg:CC FLAGS_REG))]
- "/* FIXME: without this LRA can't reload this pattern, see PR82524. */
- rtx_equal_p (operands[0], operands[1])
- || rtx_equal_p (operands[0], operands[2])"
- "add{b}\t{%h2, %h0|%h0, %h2}"
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "@
+ <insn>{b}\t{%h2, %h0|%h0, %h2}
+ #"
+ "reload_completed
+ && !(rtx_equal_p (operands[0], operands[1])
+ || (<CODE> == PLUS && rtx_equal_p (operands[0], operands[2])))"
+ [(set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (zero_extract:SWI248
+ (match_dup 1) (const_int 8) (const_int 8)))
+ (parallel
+ [(set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (subreg:SWI248
+ (plusminus:QI
+ (subreg:QI
+ (match_op_dup 3
+ [(match_dup 0) (const_int 8) (const_int 8)]) 0)
+ (subreg:QI
+ (match_op_dup 4
+ [(match_dup 2) (const_int 8) (const_int 8)]) 0)) 0))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
[(set_attr "type" "alu")
(set_attr "mode" "QI")])
"@
sub{<imodesuffix>}\t{%2, %0|%0, %2}
#"
- "&& reload_completed"
+ "&& reload_completed
+ && !(rtx_equal_p (operands[0], operands[1]))"
[(set (strict_low_part (match_dup 0)) (match_dup 1))
(parallel
[(set (strict_low_part (match_dup 0))
[(set_attr "type" "alu")
(set_attr "mode" "<MODE>")])
+;; Alternative 1 is needed to work around LRA limitation, see PR82524.
+(define_insn_and_split "*subqi_ext<mode>_1_slp"
+ [(set (strict_low_part (match_operand:QI 0 "register_operand" "+Q,&Q"))
+ (minus:QI
+ (match_operand:QI 1 "nonimmediate_operand" "0,!qm")
+ (subreg:QI
+ (match_operator:SWI248 3 "extract_operator"
+ [(match_operand 2 "int248_register_operand" "Q,Q")
+ (const_int 8)
+ (const_int 8)]) 0)))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_PARTIAL_REG_STALL || optimize_function_for_size_p (cfun)"
+ "@
+ sub{b}\t{%h2, %0|%0, %h2}
+ #"
+ "&& reload_completed
+ && !rtx_equal_p (operands[0], operands[1])"
+ [(set (strict_low_part (match_dup 0)) (match_dup 1))
+ (parallel
+ [(set (strict_low_part (match_dup 0))
+ (minus:QI
+ (match_dup 0)
+ (subreg:QI
+ (match_op_dup 3
+ [(match_dup 2) (const_int 8) (const_int 8)]) 0)))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI")])
+
(define_insn "*sub<mode>_2"
[(set (reg FLAGS_REG)
(compare
(set_attr "mode" "SI")])
(define_insn "*subqi_ext<mode>_0"
- [(set (match_operand:QI 0 "nonimm_x64constmem_operand" "=QBc,m")
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=QBn")
(minus:QI
- (match_operand:QI 1 "nonimm_x64constmem_operand" "0,0")
+ (match_operand:QI 1 "nonimmediate_operand" "0")
(subreg:QI
(match_operator:SWI248 3 "extract_operator"
- [(match_operand 2 "int248_register_operand" "Q,Q")
+ [(match_operand 2 "int248_register_operand" "Q")
(const_int 8)
(const_int 8)]) 0)))
(clobber (reg:CC FLAGS_REG))]
""
"sub{b}\t{%h2, %0|%0, %h2}"
- [(set_attr "isa" "*,nox64")
+ [(set_attr "addr" "gpr8")
(set_attr "type" "alu")
(set_attr "mode" "QI")])
-(define_insn "*subqi_ext<mode>_2"
+;; Alternative 1 is needed to work around LRA limitation, see PR82524.
+(define_insn_and_split "*subqi_ext<mode>_1"
[(set (zero_extract:SWI248
- (match_operand 0 "int248_register_operand" "+Q")
+ (match_operand 0 "int248_register_operand" "+Q,&Q")
(const_int 8)
(const_int 8))
(subreg:SWI248
(minus:QI
(subreg:QI
(match_operator:SWI248 3 "extract_operator"
- [(match_operand 1 "int248_register_operand" "0")
+ [(match_operand 1 "int248_register_operand" "0,!Q")
(const_int 8)
(const_int 8)]) 0)
- (subreg:QI
- (match_operator:SWI248 4 "extract_operator"
- [(match_operand 2 "int248_register_operand" "Q")
- (const_int 8)
- (const_int 8)]) 0)) 0))
- (clobber (reg:CC FLAGS_REG))]
- "/* FIXME: without this LRA can't reload this pattern, see PR82524. */
- rtx_equal_p (operands[0], operands[1])"
- "sub{b}\t{%h2, %h0|%h0, %h2}"
- [(set_attr "type" "alu")
+ (match_operand:QI 2 "general_operand" "QnBn,QnBn")) 0))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "@
+ sub{b}\t{%2, %h0|%h0, %2}
+ #"
+ "reload_completed
+ && !(rtx_equal_p (operands[0], operands[1]))"
+ [(set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (zero_extract:SWI248
+ (match_dup 1) (const_int 8) (const_int 8)))
+ (parallel
+ [(set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (subreg:SWI248
+ (minus:QI
+ (subreg:QI
+ (match_op_dup 3
+ [(match_dup 0) (const_int 8) (const_int 8)]) 0)
+ (match_dup 2)) 0))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ [(set_attr "addr" "gpr8")
+ (set_attr "type" "alu")
(set_attr "mode" "QI")])
;; Subtract with jump on overflow.
[(set_attr "type" "alu")
(set_attr "mode" "<MODE>")])
-(define_insn "*add<mode>3_cc_overflow_1"
+(define_insn "@add<mode>3_cc_overflow_1"
[(set (reg:CCC FLAGS_REG)
(compare:CCC
(plus:SWI
[(parallel [(set (match_operand:<DWI> 0 "register_operand")
(mult:<DWI>
(any_extend:<DWI>
- (match_operand:DWIH 1 "nonimmediate_operand"))
+ (match_operand:DWIH 1 "register_operand"))
(any_extend:<DWI>
- (match_operand:DWIH 2 "register_operand"))))
+ (match_operand:DWIH 2 "nonimmediate_operand"))))
(clobber (reg:CC FLAGS_REG))])])
(define_expand "<u>mulqihi3"
[(parallel [(set (match_operand:HI 0 "register_operand")
(mult:HI
(any_extend:HI
- (match_operand:QI 1 "nonimmediate_operand"))
+ (match_operand:QI 1 "register_operand"))
(any_extend:HI
- (match_operand:QI 2 "register_operand"))))
+ (match_operand:QI 2 "nonimmediate_operand"))))
(clobber (reg:CC FLAGS_REG))])]
"TARGET_QIMODE_MATH")
(define_insn "*bmi2_umul<mode><dwi>3_1"
[(set (match_operand:DWIH 0 "register_operand" "=r")
(mult:DWIH
- (match_operand:DWIH 2 "nonimmediate_operand" "%d")
+ (match_operand:DWIH 2 "register_operand" "%d")
(match_operand:DWIH 3 "nonimmediate_operand" "rm")))
(set (match_operand:DWIH 1 "register_operand" "=r")
- (truncate:DWIH
- (lshiftrt:<DWI>
- (mult:<DWI> (zero_extend:<DWI> (match_dup 2))
- (zero_extend:<DWI> (match_dup 3)))
- (match_operand:QI 4 "const_int_operand"))))]
- "TARGET_BMI2 && INTVAL (operands[4]) == <MODE_SIZE> * BITS_PER_UNIT
- && !(MEM_P (operands[2]) && MEM_P (operands[3]))"
+ (umul_highpart:DWIH (match_dup 2) (match_dup 3)))]
+ "TARGET_BMI2"
"mulx\t{%3, %0, %1|%1, %0, %3}"
[(set_attr "type" "imulx")
(set_attr "prefix" "vex")
(set_attr "mode" "<MODE>")])
+;; Tweak *bmi2_umul<mode><dwi>3_1 to eliminate following mov.
+(define_peephole2
+ [(parallel [(set (match_operand:DWIH 0 "general_reg_operand")
+ (mult:DWIH (match_operand:DWIH 2 "register_operand")
+ (match_operand:DWIH 3 "nonimmediate_operand")))
+ (set (match_operand:DWIH 1 "general_reg_operand")
+ (umul_highpart:DWIH (match_dup 2) (match_dup 3)))])
+ (set (match_operand:DWIH 4 "general_reg_operand")
+ (match_operand:DWIH 5 "general_reg_operand"))]
+ "TARGET_BMI2
+ && ((REGNO (operands[5]) == REGNO (operands[0])
+ && REGNO (operands[1]) != REGNO (operands[4]))
+ || (REGNO (operands[5]) == REGNO (operands[1])
+ && REGNO (operands[0]) != REGNO (operands[4])))
+ && peep2_reg_dead_p (2, operands[5])"
+ [(parallel [(set (match_dup 0) (mult:DWIH (match_dup 2) (match_dup 3)))
+ (set (match_dup 1)
+ (umul_highpart:DWIH (match_dup 2) (match_dup 3)))])]
+{
+ if (REGNO (operands[5]) == REGNO (operands[0]))
+ operands[0] = operands[4];
+ else
+ operands[1] = operands[4];
+})
+
(define_insn "*umul<mode><dwi>3_1"
[(set (match_operand:<DWI> 0 "register_operand" "=r,A")
(mult:<DWI>
(zero_extend:<DWI>
- (match_operand:DWIH 1 "nonimmediate_operand" "%d,0"))
+ (match_operand:DWIH 1 "register_operand" "%d,a"))
(zero_extend:<DWI>
(match_operand:DWIH 2 "nonimmediate_operand" "rm,rm"))))
(clobber (reg:CC FLAGS_REG))]
[(parallel [(set (match_dup 3)
(mult:DWIH (match_dup 1) (match_dup 2)))
(set (match_dup 4)
- (truncate:DWIH
- (lshiftrt:<DWI>
- (mult:<DWI> (zero_extend:<DWI> (match_dup 1))
- (zero_extend:<DWI> (match_dup 2)))
- (match_dup 5))))])]
+ (umul_highpart:DWIH (match_dup 1) (match_dup 2)))])]
{
split_double_mode (<DWI>mode, &operands[0], 1, &operands[3], &operands[4]);
[(set (match_operand:<DWI> 0 "register_operand" "=A")
(mult:<DWI>
(sign_extend:<DWI>
- (match_operand:DWIH 1 "nonimmediate_operand" "%0"))
+ (match_operand:DWIH 1 "register_operand" "%a"))
(sign_extend:<DWI>
(match_operand:DWIH 2 "nonimmediate_operand" "rm"))))
(clobber (reg:CC FLAGS_REG))]
[(set (match_operand:HI 0 "register_operand" "=a")
(mult:HI
(any_extend:HI
- (match_operand:QI 1 "nonimmediate_operand" "%0"))
+ (match_operand:QI 1 "register_operand" "%0"))
(any_extend:HI
(match_operand:QI 2 "nonimmediate_operand" "qm"))))
(clobber (reg:CC FLAGS_REG))]
(set_attr "bdver1_decode" "direct")
(set_attr "mode" "QI")])
-;; Highpart multiplication patterns
-(define_insn "<s>mul<mode>3_highpart"
- [(set (match_operand:DWIH 0 "register_operand" "=d")
- (any_mul_highpart:DWIH
- (match_operand:DWIH 1 "register_operand" "%a")
- (match_operand:DWIH 2 "nonimmediate_operand" "rm")))
- (clobber (match_scratch:DWIH 3 "=1"))
- (clobber (reg:CC FLAGS_REG))]
+;; Widening multiplication peephole2s to tweak register allocation.
+;; mov imm,%rdx; mov %rdi,%rax; mulq %rdx -> mov imm,%rax; mulq %rdi
+(define_peephole2
+ [(set (match_operand:DWIH 0 "general_reg_operand")
+ (match_operand:DWIH 1 "immediate_operand"))
+ (set (match_operand:DWIH 2 "general_reg_operand")
+ (match_operand:DWIH 3 "general_reg_operand"))
+ (parallel [(set (match_operand:<DWI> 4 "general_reg_operand")
+ (mult:<DWI> (zero_extend:<DWI> (match_dup 2))
+ (zero_extend:<DWI> (match_dup 0))))
+ (clobber (reg:CC FLAGS_REG))])]
+ "REGNO (operands[3]) != AX_REG
+ && REGNO (operands[0]) != REGNO (operands[2])
+ && REGNO (operands[0]) != REGNO (operands[3])
+ && (REGNO (operands[0]) == REGNO (operands[4])
+ || REGNO (operands[0]) == DX_REG
+ || peep2_reg_dead_p (3, operands[0]))"
+ [(set (match_dup 2) (match_dup 1))
+ (parallel [(set (match_dup 4)
+ (mult:<DWI> (zero_extend:<DWI> (match_dup 2))
+ (zero_extend:<DWI> (match_dup 3))))
+ (clobber (reg:CC FLAGS_REG))])])
+
+;; mov imm,%rax; mov %rdi,%rdx; mulx %rax -> mov imm,%rdx; mulx %rdi
+(define_peephole2
+ [(set (match_operand:DWIH 0 "general_reg_operand")
+ (match_operand:DWIH 1 "immediate_operand"))
+ (set (match_operand:DWIH 2 "general_reg_operand")
+ (match_operand:DWIH 3 "general_reg_operand"))
+ (parallel [(set (match_operand:DWIH 4 "general_reg_operand")
+ (mult:DWIH (match_dup 2) (match_dup 0)))
+ (set (match_operand:DWIH 5 "general_reg_operand")
+ (umul_highpart:DWIH (match_dup 2) (match_dup 0)))])]
+ "REGNO (operands[3]) != DX_REG
+ && REGNO (operands[0]) != REGNO (operands[2])
+ && REGNO (operands[0]) != REGNO (operands[3])
+ && (REGNO (operands[0]) == REGNO (operands[4])
+ || REGNO (operands[0]) == REGNO (operands[5])
+ || peep2_reg_dead_p (3, operands[0]))"
+ [(set (match_dup 2) (match_dup 1))
+ (parallel [(set (match_dup 4)
+ (mult:DWIH (match_dup 2) (match_dup 3)))
+ (set (match_dup 5)
+ (umul_highpart:DWIH (match_dup 2) (match_dup 3)))])])
+
+;; Highpart multiplication patterns
+(define_insn "<s>mul<mode>3_highpart"
+ [(set (match_operand:DWIH 0 "register_operand" "=d")
+ (any_mul_highpart:DWIH
+ (match_operand:DWIH 1 "register_operand" "%a")
+ (match_operand:DWIH 2 "nonimmediate_operand" "rm")))
+ (clobber (match_scratch:DWIH 3 "=1"))
+ (clobber (reg:CC FLAGS_REG))]
""
"<sgnprefix>mul{<imodesuffix>}\t%2"
[(set_attr "type" "imul")
(and:QI
(subreg:QI
(match_operator:SWI248 2 "extract_operator"
- [(match_operand 0 "int248_register_operand" "Q,Q")
+ [(match_operand 0 "int248_register_operand" "Q")
(const_int 8)
(const_int 8)]) 0)
- (match_operand:QI 1 "general_x64constmem_operand" "QnBc,m"))
+ (match_operand:QI 1 "general_operand" "QnBn"))
(const_int 0)))]
"ix86_match_ccmode (insn, CCNOmode)"
"test{b}\t{%1, %h0|%h0, %1}"
- [(set_attr "isa" "*,nox64")
+ [(set_attr "addr" "gpr8")
(set_attr "type" "test")
(set_attr "mode" "QI")])
operands[3] = gen_int_mode (INTVAL (operands[3]), QImode);
})
+;; Narrow test instructions with immediate operands that test
+;; memory locations for zero. E.g. testl $0x00aa0000, mem can be
+;; converted to testb $0xaa, mem+2. Reject volatile locations and
+;; targets where reading (possibly unaligned) part of memory
+;; location after a large write to the same address causes
+;; store-to-load forwarding stall.
+(define_peephole2
+ [(set (reg:CCZ FLAGS_REG)
+ (compare:CCZ
+ (and:SWI248 (match_operand:SWI248 0 "memory_operand")
+ (match_operand 1 "const_int_operand"))
+ (const_int 0)))]
+ "!TARGET_PARTIAL_MEMORY_READ_STALL && !MEM_VOLATILE_P (operands[0])"
+ [(set (reg:CCZ FLAGS_REG)
+ (compare:CCZ (match_dup 2) (const_int 0)))]
+{
+ unsigned HOST_WIDE_INT ival = UINTVAL (operands[1]);
+ int first_nonzero_byte, bitsize;
+ rtx new_addr, new_const;
+ machine_mode new_mode;
+
+ if (ival == 0)
+ FAIL;
+
+ /* Clear bits outside mode width. */
+ ival &= GET_MODE_MASK (<MODE>mode);
+
+ first_nonzero_byte = ctz_hwi (ival) / BITS_PER_UNIT;
+
+ ival >>= first_nonzero_byte * BITS_PER_UNIT;
+
+ bitsize = sizeof (ival) * BITS_PER_UNIT - clz_hwi (ival);
+
+ if (bitsize <= GET_MODE_BITSIZE (QImode))
+ new_mode = QImode;
+ else if (bitsize <= GET_MODE_BITSIZE (HImode))
+ new_mode = HImode;
+ else if (bitsize <= GET_MODE_BITSIZE (SImode))
+ new_mode = SImode;
+ else
+ new_mode = DImode;
+
+ if (GET_MODE_SIZE (new_mode) >= GET_MODE_SIZE (<MODE>mode))
+ FAIL;
+
+ new_addr = adjust_address (operands[0], new_mode, first_nonzero_byte);
+ new_const = gen_int_mode (ival, new_mode);
+
+ operands[2] = gen_rtx_AND (new_mode, new_addr, new_const);
+})
+
;; %%% This used to optimize known byte-wide and operations to memory,
;; and sometimes to QImode registers. If this is considered useful,
;; it should be done with splitters.
and{q}\t{%2, %0|%0, %2}
#
#"
- [(set_attr "isa" "x64,x64,x64,x64,avx512bw")
+ [(set_attr "isa" "x64,x64,x64,x64,avx512bw_512")
(set_attr "type" "alu,alu,alu,imovx,msklog")
(set_attr "length_immediate" "*,*,*,0,*")
(set (attr "prefix_rex")
(symbol_ref "true")))])
;; Alternative 1 is needed to work around LRA limitation, see PR82524.
-(define_insn_and_split "*and<mode>_1_slp"
+(define_insn_and_split "*<code><mode>_1_slp"
[(set (strict_low_part (match_operand:SWI12 0 "register_operand" "+<r>,&<r>"))
- (and:SWI12 (match_operand:SWI12 1 "nonimmediate_operand" "%0,!<r>")
- (match_operand:SWI12 2 "general_operand" "<r>mn,<r>mn")))
+ (any_logic:SWI12 (match_operand:SWI12 1 "nonimmediate_operand" "%0,!<r>")
+ (match_operand:SWI12 2 "general_operand" "<r>mn,<r>mn")))
(clobber (reg:CC FLAGS_REG))]
"!TARGET_PARTIAL_REG_STALL || optimize_function_for_size_p (cfun)"
"@
- and{<imodesuffix>}\t{%2, %0|%0, %2}
+ <logic>{<imodesuffix>}\t{%2, %0|%0, %2}
#"
- "&& reload_completed"
+ "&& reload_completed
+ && !(rtx_equal_p (operands[0], operands[1])
+ || rtx_equal_p (operands[0], operands[2]))"
[(set (strict_low_part (match_dup 0)) (match_dup 1))
(parallel
[(set (strict_low_part (match_dup 0))
- (and:SWI12 (match_dup 0) (match_dup 2)))
+ (any_logic:SWI12 (match_dup 0) (match_dup 2)))
(clobber (reg:CC FLAGS_REG))])]
""
[(set_attr "type" "alu")
(set_attr "mode" "<MODE>")])
+;; Alternative 1 is needed to work around LRA limitation, see PR82524.
+(define_insn_and_split "*<code>qi_ext<mode>_1_slp"
+ [(set (strict_low_part (match_operand:QI 0 "register_operand" "+Q,&Q"))
+ (any_logic:QI
+ (subreg:QI
+ (match_operator:SWI248 3 "extract_operator"
+ [(match_operand 2 "int248_register_operand" "Q,Q")
+ (const_int 8)
+ (const_int 8)]) 0)
+ (match_operand:QI 1 "nonimmediate_operand" "0,!qm")))
+ (clobber (reg:CC FLAGS_REG))]
+ "!TARGET_PARTIAL_REG_STALL || optimize_function_for_size_p (cfun)"
+ "@
+ <logic>{b}\t{%h2, %0|%0, %h2}
+ #"
+ "&& reload_completed
+ && !rtx_equal_p (operands[0], operands[1])"
+ [(set (strict_low_part (match_dup 0)) (match_dup 1))
+ (parallel
+ [(set (strict_low_part (match_dup 0))
+ (any_logic:QI
+ (subreg:QI
+ (match_op_dup 3
+ [(match_dup 2) (const_int 8) (const_int 8)]) 0)))
+ (match_dup 0)
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI")])
+
(define_split
[(set (match_operand:SWI248 0 "register_operand")
(and:SWI248 (match_operand:SWI248 1 "nonimmediate_operand")
[(set_attr "type" "alu")
(set_attr "mode" "<MODE>")])
-(define_insn "*andqi_ext<mode>_0"
- [(set (match_operand:QI 0 "nonimm_x64constmem_operand" "=QBc,m")
- (and:QI
+(define_insn "*<code>qi_ext<mode>_0"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=QBn")
+ (any_logic:QI
(subreg:QI
(match_operator:SWI248 3 "extract_operator"
- [(match_operand 2 "int248_register_operand" "Q,Q")
+ [(match_operand 2 "int248_register_operand" "Q")
(const_int 8)
(const_int 8)]) 0)
- (match_operand:QI 1 "nonimm_x64constmem_operand" "0,0")))
+ (match_operand:QI 1 "nonimmediate_operand" "0")))
(clobber (reg:CC FLAGS_REG))]
""
- "and{b}\t{%h2, %0|%0, %h2}"
- [(set_attr "isa" "*,nox64")
+ "<logic>{b}\t{%h2, %0|%0, %h2}"
+ [(set_attr "addr" "gpr8")
(set_attr "type" "alu")
(set_attr "mode" "QI")])
(match_operand:QI 2 "const_int_operand")) 0))
(clobber (reg:CC FLAGS_REG))])])
-(define_insn "*andqi_ext<mode>_1"
+;; Alternative 1 is needed to work around LRA limitation, see PR82524.
+(define_insn_and_split "*<code>qi_ext<mode>_1"
[(set (zero_extract:SWI248
- (match_operand 0 "int248_register_operand" "+Q,Q")
+ (match_operand 0 "int248_register_operand" "+Q,&Q")
(const_int 8)
(const_int 8))
(subreg:SWI248
- (and:QI
+ (any_logic:QI
(subreg:QI
(match_operator:SWI248 3 "extract_operator"
- [(match_operand 1 "int248_register_operand" "0,0")
+ [(match_operand 1 "int248_register_operand" "0,!Q")
(const_int 8)
(const_int 8)]) 0)
- (match_operand:QI 2 "general_x64constmem_operand" "QnBc,m")) 0))
+ (match_operand:QI 2 "general_operand" "QnBn,QnBn")) 0))
(clobber (reg:CC FLAGS_REG))]
- "/* FIXME: without this LRA can't reload this pattern, see PR82524. */
- rtx_equal_p (operands[0], operands[1])"
- "and{b}\t{%2, %h0|%h0, %2}"
- [(set_attr "isa" "*,nox64")
+ ""
+ "@
+ <logic>{b}\t{%2, %h0|%h0, %2}
+ #"
+ "reload_completed
+ && !(rtx_equal_p (operands[0], operands[1]))"
+ [(set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (zero_extract:SWI248
+ (match_dup 1) (const_int 8) (const_int 8)))
+ (parallel
+ [(set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (subreg:SWI248
+ (any_logic:QI
+ (subreg:QI
+ (match_op_dup 3
+ [(match_dup 0) (const_int 8) (const_int 8)]) 0)
+ (match_dup 2)) 0))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
+ [(set_attr "addr" "gpr8")
(set_attr "type" "alu")
(set_attr "mode" "QI")])
-;; Generated by peephole translating test to and. This shows up
-;; often in fp comparisons.
-(define_insn "*andqi_ext<mode>_1_cc"
- [(set (reg FLAGS_REG)
- (compare
- (and:QI
- (subreg:QI
- (match_operator:SWI248 3 "extract_operator"
- [(match_operand 1 "int248_register_operand" "0,0")
- (const_int 8)
- (const_int 8)]) 0)
- (match_operand:QI 2 "general_x64constmem_operand" "QnBc,m"))
- (const_int 0)))
+;; Alternative 1 is needed to work around LRA limitation, see PR82524.
+(define_insn_and_split "*<code>qi_ext<mode>_1_cc"
+ [(set (match_operand 4 "flags_reg_operand")
+ (match_operator 5 "compare_operator"
+ [(any_logic:QI
+ (subreg:QI
+ (match_operator:SWI248 3 "extract_operator"
+ [(match_operand 1 "int248_register_operand" "0,!Q")
+ (const_int 8)
+ (const_int 8)]) 0)
+ (match_operand:QI 2 "general_operand" "QnBn,QnBn"))
+ (const_int 0)]))
(set (zero_extract:SWI248
- (match_operand 0 "int248_register_operand" "+Q,Q")
+ (match_operand 0 "int248_register_operand" "+Q,&Q")
(const_int 8)
(const_int 8))
(subreg:SWI248
- (and:QI
+ (any_logic:QI
(subreg:QI
(match_op_dup 3
- [(match_dup 1)
- (const_int 8)
- (const_int 8)]) 0)
+ [(match_dup 0) (const_int 8) (const_int 8)]) 0)
(match_dup 2)) 0))]
- "ix86_match_ccmode (insn, CCNOmode)
- /* FIXME: without this LRA can't reload this pattern, see PR82524. */
- && rtx_equal_p (operands[0], operands[1])"
- "and{b}\t{%2, %h0|%h0, %2}"
- [(set_attr "isa" "*,nox64")
+ "ix86_match_ccmode (insn, CCNOmode)"
+ "@
+ <logic>{b}\t{%2, %h0|%h0, %2}
+ #"
+ "&& reload_completed
+ && !(rtx_equal_p (operands[0], operands[1]))"
+ [(set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (zero_extract:SWI248
+ (match_dup 1) (const_int 8) (const_int 8)))
+ (parallel
+ [(set (match_dup 4)
+ (match_op_dup 5
+ [(any_logic:QI
+ (subreg:QI
+ (match_op_dup 3
+ [(match_dup 0) (const_int 8) (const_int 8)]) 0)
+ (match_dup 2))
+ (const_int 0)]))
+ (set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (subreg:SWI248
+ (any_logic:QI
+ (subreg:QI
+ (match_op_dup 3
+ [(match_dup 1) (const_int 8) (const_int 8)]) 0)
+ (match_dup 2)) 0))])]
+ ""
+ [(set_attr "addr" "gpr8")
(set_attr "type" "alu")
(set_attr "mode" "QI")])
-(define_insn "*andqi_ext<mode>_2"
+;; Alternative 1 is needed to work around LRA limitation, see PR82524.
+(define_insn_and_split "*<code>qi_ext<mode>_2"
[(set (zero_extract:SWI248
- (match_operand 0 "int248_register_operand" "+Q")
+ (match_operand 0 "int248_register_operand" "+Q,&Q")
(const_int 8)
(const_int 8))
(subreg:SWI248
- (and:QI
+ (any_logic:QI
(subreg:QI
(match_operator:SWI248 3 "extract_operator"
- [(match_operand 1 "int248_register_operand" "%0")
+ [(match_operand 1 "int248_register_operand" "%0,!Q")
(const_int 8)
(const_int 8)]) 0)
(subreg:QI
(match_operator:SWI248 4 "extract_operator"
- [(match_operand 2 "int248_register_operand" "Q")
+ [(match_operand 2 "int248_register_operand" "Q,Q")
(const_int 8)
(const_int 8)]) 0)) 0))
(clobber (reg:CC FLAGS_REG))]
- "/* FIXME: without this LRA can't reload this pattern, see PR82524. */
- rtx_equal_p (operands[0], operands[1])
- || rtx_equal_p (operands[0], operands[2])"
- "and{b}\t{%h2, %h0|%h0, %h2}"
+ ""
+ "@
+ <logic>{b}\t{%h2, %h0|%h0, %h2}
+ #"
+ "reload_completed
+ && !(rtx_equal_p (operands[0], operands[1])
+ || rtx_equal_p (operands[0], operands[2]))"
+ [(set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (zero_extract:SWI248
+ (match_dup 1) (const_int 8) (const_int 8)))
+ (parallel
+ [(set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (subreg:SWI248
+ (any_logic:QI
+ (subreg:QI
+ (match_op_dup 3
+ [(match_dup 0) (const_int 8) (const_int 8)]) 0)
+ (subreg:QI
+ (match_op_dup 4
+ [(match_dup 2) (const_int 8) (const_int 8)]) 0)) 0))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
[(set_attr "type" "alu")
(set_attr "mode" "QI")])
-;; *andqi_ext<mode>_3 is defined via *<code>qi_ext<mode>_3 below.
+;; Alternative 1 is needed to work around LRA limitation, see PR82524.
+(define_insn_and_split "*<code>qi_ext<mode>_3"
+ [(set (zero_extract:SWI248
+ (match_operand 0 "int248_register_operand" "+Q,&Q")
+ (const_int 8)
+ (const_int 8))
+ (match_operator:SWI248 3 "extract_operator"
+ [(any_logic
+ (match_operand 1 "int248_register_operand" "%0,!Q")
+ (match_operand 2 "int248_register_operand" "Q,Q"))
+ (const_int 8)
+ (const_int 8)]))
+ (clobber (reg:CC FLAGS_REG))]
+ "GET_MODE (operands[1]) == GET_MODE (operands[2])"
+ "@
+ <logic>{b}\t{%h2, %h0|%h0, %h2}
+ #"
+ "&& reload_completed
+ && !(rtx_equal_p (operands[0], operands[1])
+ || rtx_equal_p (operands[0], operands[2]))"
+ [(set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (zero_extract:SWI248
+ (match_dup 1) (const_int 8) (const_int 8)))
+ (parallel
+ [(set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (match_op_dup 3
+ [(any_logic (match_dup 4) (match_dup 2))
+ (const_int 8) (const_int 8)]))
+ (clobber (reg:CC FLAGS_REG))])]
+ "operands[4] = gen_lowpart (GET_MODE (operands[1]), operands[0]);"
+ [(set_attr "type" "alu")
+ (set_attr "mode" "QI")])
;; Convert wide AND instructions with immediate operand to shorter QImode
;; equivalents when possible.
(not:SWI48 (match_operand:SWI48 1 "register_operand" "r,r,k"))
(match_operand:SWI48 2 "nonimmediate_operand" "r,m,k")))
(clobber (reg:CC FLAGS_REG))]
- "TARGET_BMI || TARGET_AVX512BW"
+ "TARGET_BMI
+ || (TARGET_AVX512BW && (<MODE>mode == SImode || TARGET_EVEX512))"
"@
andn\t{%2, %1, %0|%0, %1, %2}
andn\t{%2, %1, %0|%0, %1, %2}
#"
- [(set_attr "isa" "bmi,bmi,avx512bw")
+ [(set_attr "isa" "bmi,bmi,<kmov_isa>")
(set_attr "type" "bitmanip,bitmanip,msklog")
(set_attr "btver2_decode" "direct, double,*")
(set_attr "mode" "<MODE>")])
<logic>{<imodesuffix>}\t{%2, %0|%0, %2}
<logic>{<imodesuffix>}\t{%2, %0|%0, %2}
#"
- [(set (attr "isa")
- (cond [(eq_attr "alternative" "2")
- (if_then_else (eq_attr "mode" "SI,DI")
- (const_string "avx512bw")
- (const_string "avx512f"))
- ]
- (const_string "*")))
+ [(set_attr "isa" "*,*,<kmov_isa>")
(set_attr "type" "alu, alu, msklog")
(set_attr "mode" "<MODE>")])
DONE;
}
}
- [(set (attr "isa")
- (cond [(eq_attr "alternative" "2")
- (if_then_else (eq_attr "mode" "SI,DI")
- (const_string "avx512bw")
- (const_string "avx512f"))
- ]
- (const_string "*")))
+ [(set_attr "isa" "*,*,<kmov_isa>")
(set_attr "type" "alu, alu, msklog")
(set_attr "mode" "<MODE>")])
(symbol_ref "!TARGET_PARTIAL_REG_STALL")]
(symbol_ref "true")))])
-;; Alternative 1 is needed to work around LRA limitation, see PR82524.
-(define_insn_and_split "*<code><mode>_1_slp"
- [(set (strict_low_part (match_operand:SWI12 0 "register_operand" "+<r>,&<r>"))
- (any_or:SWI12 (match_operand:SWI12 1 "nonimmediate_operand" "%0,!<r>")
- (match_operand:SWI12 2 "general_operand" "<r>mn,<r>mn")))
- (clobber (reg:CC FLAGS_REG))]
- "!TARGET_PARTIAL_REG_STALL || optimize_function_for_size_p (cfun)"
- "@
- <logic>{<imodesuffix>}\t{%2, %0|%0, %2}
- #"
- "&& reload_completed"
- [(set (strict_low_part (match_dup 0)) (match_dup 1))
- (parallel
- [(set (strict_low_part (match_dup 0))
- (any_or:SWI12 (match_dup 0) (match_dup 2)))
- (clobber (reg:CC FLAGS_REG))])]
- ""
- [(set_attr "type" "alu")
- (set_attr "mode" "<MODE>")])
-
;; convert (sign_extend:WIDE (any_logic:NARROW (memory, immediate)))
;; to (any_logic:WIDE (sign_extend (memory)), (sign_extend (immediate))).
;; This eliminates sign extension after logic operation.
[(set_attr "type" "alu")
(set_attr "mode" "<MODE>")])
-(define_insn "*<code>qi_ext<mode>_0"
- [(set (match_operand:QI 0 "nonimm_x64constmem_operand" "=QBc,m")
- (any_or:QI
- (subreg:QI
- (match_operator:SWI248 3 "extract_operator"
- [(match_operand 2 "int248_register_operand" "Q,Q")
- (const_int 8)
- (const_int 8)]) 0)
- (match_operand:QI 1 "nonimm_x64constmem_operand" "0,0")))
- (clobber (reg:CC FLAGS_REG))]
- ""
- "<logic>{b}\t{%h2, %0|%0, %h2}"
- [(set_attr "isa" "*,nox64")
- (set_attr "type" "alu")
- (set_attr "mode" "QI")])
-
-(define_insn "*<code>qi_ext<mode>_1"
- [(set (zero_extract:SWI248
- (match_operand 0 "int248_register_operand" "+Q,Q")
- (const_int 8)
- (const_int 8))
- (subreg:SWI248
- (any_or:QI
- (subreg:QI
- (match_operator:SWI248 3 "extract_operator"
- [(match_operand 1 "int248_register_operand" "0,0")
- (const_int 8)
- (const_int 8)]) 0)
- (match_operand:QI 2 "general_x64constmem_operand" "QnBc,m")) 0))
- (clobber (reg:CC FLAGS_REG))]
- "(!TARGET_PARTIAL_REG_STALL || optimize_function_for_size_p (cfun))
- /* FIXME: without this LRA can't reload this pattern, see PR82524. */
- && rtx_equal_p (operands[0], operands[1])"
- "<logic>{b}\t{%2, %h0|%h0, %2}"
- [(set_attr "isa" "*,nox64")
- (set_attr "type" "alu")
- (set_attr "mode" "QI")])
-
-(define_insn "*<code>qi_ext<mode>_2"
- [(set (zero_extract:SWI248
- (match_operand 0 "int248_register_operand" "+Q")
- (const_int 8)
- (const_int 8))
- (subreg:SWI248
- (any_or:QI
- (subreg:QI
- (match_operator:SWI248 3 "extract_operator"
- [(match_operand 1 "int248_register_operand" "%0")
- (const_int 8)
- (const_int 8)]) 0)
- (subreg:QI
- (match_operator:SWI248 4 "extract_operator"
- [(match_operand 2 "int248_register_operand" "Q")
- (const_int 8)
- (const_int 8)]) 0)) 0))
- (clobber (reg:CC FLAGS_REG))]
- "(!TARGET_PARTIAL_REG_STALL || optimize_function_for_size_p (cfun))
- /* FIXME: without this LRA can't reload this pattern, see PR82524. */
- && (rtx_equal_p (operands[0], operands[1])
- || rtx_equal_p (operands[0], operands[2]))"
- "<logic>{b}\t{%h2, %h0|%h0, %h2}"
- [(set_attr "type" "alu")
- (set_attr "mode" "QI")])
-
-(define_insn "*<code>qi_ext<mode>_3"
- [(set (zero_extract:SWI248
- (match_operand 0 "int248_register_operand" "+Q")
- (const_int 8)
- (const_int 8))
- (zero_extract:SWI248
- (any_logic:SWI248
- (match_operand 1 "int248_register_operand" "%0")
- (match_operand 2 "int248_register_operand" "Q"))
- (const_int 8)
- (const_int 8)))
- (clobber (reg:CC FLAGS_REG))]
- "(!TARGET_PARTIAL_REG_STALL || optimize_function_for_size_p (cfun))
- /* FIXME: without this LRA can't reload this pattern, see PR82524. */
- && (rtx_equal_p (operands[0], operands[1])
- || rtx_equal_p (operands[0], operands[2]))"
- "<logic>{b}\t{%h2, %h0|%h0, %h2}"
- [(set_attr "type" "alu")
- (set_attr "mode" "QI")])
-
;; Convert wide OR instructions with immediate operand to shorter QImode
;; equivalents when possible.
;; Don't do the splitting with memory operands, since it introduces risk
(const_int 8)) 0)
(match_dup 2)) 0))])])
-(define_insn "*xorqi_ext<mode>_1_cc"
- [(set (reg FLAGS_REG)
- (compare
- (xor:QI
- (subreg:QI
- (match_operator:SWI248 3 "extract_operator"
- [(match_operand 1 "int248_register_operand" "0,0")
- (const_int 8)
- (const_int 8)]) 0)
- (match_operand:QI 2 "general_x64constmem_operand" "QnBc,m"))
- (const_int 0)))
- (set (zero_extract:SWI248
- (match_operand 0 "int248_register_operand" "+Q,Q")
- (const_int 8)
- (const_int 8))
- (subreg:SWI248
- (xor:QI
- (subreg:QI
- (match_op_dup 3
- [(match_dup 1)
- (const_int 8)
- (const_int 8)]) 0)
- (match_dup 2)) 0))]
- "ix86_match_ccmode (insn, CCNOmode)
- /* FIXME: without this LRA can't reload this pattern, see PR82524. */
- && rtx_equal_p (operands[0], operands[1])"
- "xor{b}\t{%2, %h0|%h0, %2}"
- [(set_attr "isa" "*,nox64")
- (set_attr "type" "alu")
- (set_attr "mode" "QI")])
-
;; Peephole2 rega = 0; rega op= regb into rega = regb.
(define_peephole2
[(parallel [(set (match_operand:SWI 0 "general_reg_operand")
"@
neg{<imodesuffix>}\t%0
#"
- "&& reload_completed"
+ "&& reload_completed
+ && !(rtx_equal_p (operands[0], operands[1]))"
[(set (strict_low_part (match_dup 0)) (match_dup 1))
(parallel
[(set (strict_low_part (match_dup 0))
(set (match_operand:SWI48 0 "register_operand")
(neg:SWI48 (match_dup 1)))])])
-(define_insn "*negqi_ext<mode>_2"
+;; Alternative 1 is needed to work around LRA limitation, see PR82524.
+(define_insn_and_split "*negqi_ext<mode>_1"
[(set (zero_extract:SWI248
- (match_operand 0 "int248_register_operand" "+Q")
+ (match_operand 0 "int248_register_operand" "+Q,&Q")
(const_int 8)
(const_int 8))
(subreg:SWI248
(neg:QI
(subreg:QI
(match_operator:SWI248 2 "extract_operator"
- [(match_operand 1 "int248_register_operand" "0")
+ [(match_operand 1 "int248_register_operand" "0,!Q")
(const_int 8)
(const_int 8)]) 0)) 0))
(clobber (reg:CC FLAGS_REG))]
- "/* FIXME: without this LRA can't reload this pattern, see PR82524. */
- rtx_equal_p (operands[0], operands[1])"
- "neg{b}\t%h0"
+ ""
+ "@
+ neg{b}\t%h0
+ #"
+ "reload_completed
+ && !(rtx_equal_p (operands[0], operands[1]))"
+ [(set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (zero_extract:SWI248
+ (match_dup 1) (const_int 8) (const_int 8)))
+ (parallel
+ [(set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (subreg:SWI248
+ (neg:QI
+ (subreg:QI
+ (match_op_dup 2
+ [(match_dup 0) (const_int 8) (const_int 8)]) 0)) 0))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
[(set_attr "type" "negnot")
(set_attr "mode" "QI")])
"@
not{<imodesuffix>}\t%0
#"
- [(set (attr "isa")
- (cond [(eq_attr "alternative" "1")
- (if_then_else (eq_attr "mode" "SI,DI")
- (const_string "avx512bw")
- (const_string "avx512f"))
- ]
- (const_string "*")))
+ [(set_attr "isa" "*,<kmov_isa>")
(set_attr "type" "negnot,msklog")
(set_attr "mode" "<MODE>")])
"@
not{l}\t%k0
#"
- [(set_attr "isa" "x64,avx512bw")
+ [(set_attr "isa" "x64,avx512bw_512")
(set_attr "type" "negnot,msklog")
(set_attr "mode" "SI,SI")])
"@
not{<imodesuffix>}\t%0
#"
- "&& reload_completed"
+ "&& reload_completed
+ && !(rtx_equal_p (operands[0], operands[1]))"
[(set (strict_low_part (match_dup 0)) (match_dup 1))
(set (strict_low_part (match_dup 0))
(not:SWI12 (match_dup 0)))]
(const_int 0)]))
(set (match_dup 1)
(zero_extend:DI (xor:SI (match_dup 3) (const_int -1))))])])
+
+;; Alternative 1 is needed to work around LRA limitation, see PR82524.
+(define_insn_and_split "*one_cmplqi_ext<mode>_1"
+ [(set (zero_extract:SWI248
+ (match_operand 0 "int248_register_operand" "+Q,&Q")
+ (const_int 8)
+ (const_int 8))
+ (subreg:SWI248
+ (not:QI
+ (subreg:QI
+ (match_operator:SWI248 2 "extract_operator"
+ [(match_operand 1 "int248_register_operand" "0,!Q")
+ (const_int 8)
+ (const_int 8)]) 0)) 0))]
+ ""
+ "@
+ not{b}\t%h0
+ #"
+ "reload_completed
+ && !(rtx_equal_p (operands[0], operands[1]))"
+ [(set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (zero_extract:SWI248
+ (match_dup 1) (const_int 8) (const_int 8)))
+ (set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (subreg:SWI248
+ (not:QI
+ (subreg:QI
+ (match_op_dup 2
+ [(match_dup 0) (const_int 8) (const_int 8)]) 0)) 0))]
+ ""
+ [(set_attr "type" "negnot")
+ (set_attr "mode" "QI")])
\f
;; Shift instructions
return "sal{<imodesuffix>}\t{%2, %0|%0, %2}";
}
}
- [(set_attr "isa" "*,*,bmi2,avx512bw")
+ [(set_attr "isa" "*,*,bmi2,<kmov_isa>")
(set (attr "type")
(cond [(eq_attr "alternative" "1")
(const_string "lea")
return "sal{<imodesuffix>}\t{%2, %0|%0, %2}";
}
}
- "&& reload_completed"
+ "&& reload_completed
+ && !(rtx_equal_p (operands[0], operands[1]))"
[(set (strict_low_part (match_dup 0)) (match_dup 1))
(parallel
[(set (strict_low_part (match_dup 0))
(const_string "*")))
(set_attr "mode" "<MODE>")])
-(define_insn "*ashlqi_ext<mode>_2"
+;; Alternative 1 is needed to work around LRA limitation, see PR82524.
+(define_insn_and_split "*ashlqi_ext<mode>_1"
[(set (zero_extract:SWI248
- (match_operand 0 "int248_register_operand" "+Q")
+ (match_operand 0 "int248_register_operand" "+Q,&Q")
(const_int 8)
(const_int 8))
(subreg:SWI248
(ashift:QI
(subreg:QI
(match_operator:SWI248 3 "extract_operator"
- [(match_operand 1 "int248_register_operand" "0")
+ [(match_operand 1 "int248_register_operand" "0,!Q")
(const_int 8)
(const_int 8)]) 0)
- (match_operand:QI 2 "nonmemory_operand" "cI")) 0))
- (clobber (reg:CC FLAGS_REG))]
- "/* FIXME: without this LRA can't reload this pattern, see PR82524. */
- rtx_equal_p (operands[0], operands[1])"
+ (match_operand:QI 2 "nonmemory_operand" "cI,cI")) 0))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
{
+ if (which_alternative)
+ return "#";
+
switch (get_attr_type (insn))
{
case TYPE_ALU:
return "sal{b}\t{%2, %h0|%h0, %2}";
}
}
+ "reload_completed
+ && !(rtx_equal_p (operands[0], operands[1]))"
+ [(set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (match_dup 1))
+ (parallel
+ [(set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (subreg:SWI248
+ (ashift:QI
+ (subreg:QI
+ (match_op_dup 3
+ [(match_dup 0) (const_int 8) (const_int 8)]) 0)
+ (match_dup 2)) 0))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
[(set (attr "type")
(cond [(and (match_test "TARGET_DOUBLE_WITH_ADD")
(match_operand 2 "const1_operand"))
[(const_int 0)]
"ix86_split_<insn> (operands, operands[3], <DWI>mode); DONE;")
+;; Split truncations of double word right shifts into x86_shrd_1.
+(define_insn_and_split "<insn><dwi>3_doubleword_lowpart"
+ [(set (match_operand:DWIH 0 "register_operand" "=&r")
+ (subreg:DWIH
+ (any_shiftrt:<DWI> (match_operand:<DWI> 1 "register_operand" "r")
+ (match_operand:QI 2 "const_int_operand")) 0))
+ (clobber (reg:CC FLAGS_REG))]
+ "UINTVAL (operands[2]) < <MODE_SIZE> * BITS_PER_UNIT"
+ "#"
+ "&& reload_completed"
+ [(parallel
+ [(set (match_dup 0)
+ (ior:DWIH (lshiftrt:DWIH (match_dup 0) (match_dup 2))
+ (subreg:DWIH
+ (ashift:<DWI> (zero_extend:<DWI> (match_dup 3))
+ (match_dup 4)) 0)))
+ (clobber (reg:CC FLAGS_REG))])]
+{
+ split_double_mode (<DWI>mode, &operands[1], 1, &operands[1], &operands[3]);
+ operands[4] = GEN_INT ((<MODE_SIZE> * BITS_PER_UNIT) - INTVAL (operands[2]));
+ if (!rtx_equal_p (operands[0], operands[1]))
+ emit_move_insn (operands[0], operands[1]);
+})
+
(define_insn "x86_64_shrd"
[(set (match_operand:DI 0 "nonimmediate_operand" "+r*m")
(ior:DI (lshiftrt:DI (match_dup 0)
return "shr{<imodesuffix>}\t{%2, %0|%0, %2}";
}
}
- [(set_attr "isa" "*,bmi2,avx512bw")
+ [(set_attr "isa" "*,bmi2,<kmov_isa>")
(set_attr "type" "ishift,ishiftx,msklog")
(set (attr "length_immediate")
(if_then_else
else
return "<shift>{<imodesuffix>}\t{%2, %0|%0, %2}";
}
- "&& reload_completed"
+ "&& reload_completed
+ && !(rtx_equal_p (operands[0], operands[1]))"
[(set (strict_low_part (match_dup 0)) (match_dup 1))
(parallel
[(set (strict_low_part (match_dup 0))
(const_string "*")))
(set_attr "mode" "<MODE>")])
-(define_insn "*<insn>qi_ext<mode>_2"
+;; Alternative 1 is needed to work around LRA limitation, see PR82524.
+(define_insn_and_split "*<insn>qi_ext<mode>_1"
[(set (zero_extract:SWI248
- (match_operand 0 "int248_register_operand" "+Q")
+ (match_operand 0 "int248_register_operand" "+Q,&Q")
(const_int 8)
(const_int 8))
(subreg:SWI248
(any_shiftrt:QI
(subreg:QI
(match_operator:SWI248 3 "extract_operator"
- [(match_operand 1 "int248_register_operand" "0")
+ [(match_operand 1 "int248_register_operand" "0,!Q")
(const_int 8)
(const_int 8)]) 0)
- (match_operand:QI 2 "nonmemory_operand" "cI")) 0))
- (clobber (reg:CC FLAGS_REG))]
- "/* FIXME: without this LRA can't reload this pattern, see PR82524. */
- rtx_equal_p (operands[0], operands[1])"
+ (match_operand:QI 2 "nonmemory_operand" "cI,cI")) 0))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
{
+ if (which_alternative)
+ return "#";
+
if (operands[2] == const1_rtx
&& (TARGET_SHIFT1 || optimize_function_for_size_p (cfun)))
return "<shift>{b}\t%h0";
else
return "<shift>{b}\t{%2, %h0|%h0, %2}";
}
+ "reload_completed
+ && !(rtx_equal_p (operands[0], operands[1]))"
+ [(set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (match_dup 1))
+ (parallel
+ [(set (zero_extract:SWI248
+ (match_dup 0) (const_int 8) (const_int 8))
+ (subreg:SWI248
+ (any_shiftrt:QI
+ (subreg:QI
+ (match_op_dup 3
+ [(match_dup 0) (const_int 8) (const_int 8)]) 0)
+ (match_dup 2)) 0))
+ (clobber (reg:CC FLAGS_REG))])]
+ ""
[(set_attr "type" "ishift")
(set (attr "length_immediate")
(if_then_else
else
return "<rotate>{<imodesuffix>}\t{%2, %0|%0, %2}";
}
- "&& reload_completed"
+ "&& reload_completed
+ && !(rtx_equal_p (operands[0], operands[1]))"
[(set (strict_low_part (match_dup 0)) (match_dup 1))
(parallel
[(set (strict_low_part (match_dup 0))
[(parallel [(set (strict_low_part (match_dup 0))
(bswap:HI (match_dup 0)))
(clobber (reg:CC FLAGS_REG))])])
+
+;; Rotations through carry flag
+(define_insn "rcrsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "0")
+ (const_int 1))
+ (ashift:SI (ltu:SI (reg:CCC FLAGS_REG) (const_int 0))
+ (const_int 31))))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+ "rcr{l}\t%0"
+ [(set_attr "type" "ishift1")
+ (set_attr "memory" "none")
+ (set_attr "length_immediate" "0")
+ (set_attr "mode" "SI")])
+
+(define_insn "rcrdi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "0")
+ (const_int 1))
+ (ashift:DI (ltu:DI (reg:CCC FLAGS_REG) (const_int 0))
+ (const_int 63))))
+ (clobber (reg:CC FLAGS_REG))]
+ "TARGET_64BIT"
+ "rcr{q}\t%0"
+ [(set_attr "type" "ishift1")
+ (set_attr "length_immediate" "0")
+ (set_attr "mode" "DI")])
+
+;; Versions of sar and shr that set the carry flag.
+(define_insn "<insn><mode>3_carry"
+ [(set (reg:CCC FLAGS_REG)
+ (unspec:CCC [(and:SWI48 (match_operand:SWI48 1 "register_operand" "0")
+ (const_int 1))
+ (const_int 0)] UNSPEC_CC_NE))
+ (set (match_operand:SWI48 0 "register_operand" "=r")
+ (any_shiftrt:SWI48 (match_dup 1) (const_int 1)))]
+ ""
+{
+ if (TARGET_SHIFT1 || optimize_function_for_size_p (cfun))
+ return "<shift>{<imodesuffix>}\t%0";
+ return "<shift>{<imodesuffix>}\t{1, %0|%0, 1}";
+}
+ [(set_attr "type" "ishift1")
+ (set (attr "length_immediate")
+ (if_then_else
+ (ior (match_test "TARGET_SHIFT1")
+ (match_test "optimize_function_for_size_p (cfun)"))
+ (const_string "0")
+ (const_string "*")))
+ (set_attr "mode" "<MODE>")])
\f
;; Bit set / bit test instructions
[(set (match_operand:MODEF 0 "register_operand" "=x,x")
(match_operator:MODEF 3 "sse_comparison_operator"
[(match_operand:MODEF 1 "register_operand" "0,x")
- (match_operand:MODEF 2 "nonimmediate_operand" "xm,xm")]))]
+ (match_operand:MODEF 2 "nonimmediate_operand" "xm,xjm")]))]
"SSE_FLOAT_MODE_P (<MODE>mode)"
"@
cmp%D3<ssemodesuffix>\t{%2, %0|%0, %2}
vcmp%D3<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
+ (set_attr "addr" "*,gpr16")
(set_attr "type" "ssecmp")
(set_attr "length_immediate" "1")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "HF")])
(define_insn "*rcpsf2_sse"
- [(set (match_operand:SF 0 "register_operand" "=x,x,x")
- (unspec:SF [(match_operand:SF 1 "nonimmediate_operand" "0,x,m")]
+ [(set (match_operand:SF 0 "register_operand" "=x,x,x,x")
+ (unspec:SF [(match_operand:SF 1 "nonimmediate_operand" "0,x,m,ja")]
UNSPEC_RCP))]
"TARGET_SSE && TARGET_SSE_MATH"
"@
%vrcpss\t{%d1, %0|%0, %d1}
%vrcpss\t{%d1, %0|%0, %d1}
- %vrcpss\t{%1, %d0|%d0, %1}"
- [(set_attr "type" "sse")
+ rcpss\t{%1, %d0|%d0, %1}
+ vrcpss\t{%1, %d0|%d0, %1}"
+ [(set_attr "isa" "*,*,noavx,avx")
+ (set_attr "addr" "*,*,*,gpr16")
+ (set_attr "type" "sse")
(set_attr "atom_sse_attr" "rcp")
(set_attr "btver2_sse_attr" "rcp")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "SF")
- (set_attr "avx_partial_xmm_update" "false,false,true")
+ (set_attr "avx_partial_xmm_update" "false,false,true,true")
(set (attr "preferred_for_speed")
(cond [(match_test "TARGET_AVX")
(symbol_ref "true")
- (eq_attr "alternative" "1,2")
+ (eq_attr "alternative" "1,2,3")
(symbol_ref "!TARGET_SSE_PARTIAL_REG_DEPENDENCY")
]
(symbol_ref "true")))])
(set_attr "bdver1_decode" "direct")])
(define_insn "*rsqrtsf2_sse"
- [(set (match_operand:SF 0 "register_operand" "=x,x,x")
- (unspec:SF [(match_operand:SF 1 "nonimmediate_operand" "0,x,m")]
+ [(set (match_operand:SF 0 "register_operand" "=x,x,x,x")
+ (unspec:SF [(match_operand:SF 1 "nonimmediate_operand" "0,x,m,ja")]
UNSPEC_RSQRT))]
"TARGET_SSE && TARGET_SSE_MATH"
"@
%vrsqrtss\t{%d1, %0|%0, %d1}
%vrsqrtss\t{%d1, %0|%0, %d1}
- %vrsqrtss\t{%1, %d0|%d0, %1}"
- [(set_attr "type" "sse")
+ rsqrtss\t{%1, %d0|%d0, %1}
+ vrsqrtss\t{%1, %d0|%d0, %1}"
+ [(set_attr "isa" "*,*,noavx,avx")
+ (set_attr "addr" "*,*,*,gpr16")
+ (set_attr "type" "sse")
(set_attr "atom_sse_attr" "rcp")
(set_attr "btver2_sse_attr" "rcp")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "SF")
- (set_attr "avx_partial_xmm_update" "false,false,true")
+ (set_attr "avx_partial_xmm_update" "false,false,true,true")
(set (attr "preferred_for_speed")
(cond [(match_test "TARGET_AVX")
(symbol_ref "true")
- (eq_attr "alternative" "1,2")
+ (eq_attr "alternative" "1,2,3")
(symbol_ref "!TARGET_SSE_PARTIAL_REG_DEPENDENCY")
]
(symbol_ref "true")))])
(define_insn "sse4_1_round<mode>2"
[(set (match_operand:MODEFH 0 "register_operand" "=x,x,x,v,v")
(unspec:MODEFH
- [(match_operand:MODEFH 1 "nonimmediate_operand" "0,x,m,v,m")
+ [(match_operand:MODEFH 1 "nonimmediate_operand" "0,x,jm,v,m")
(match_operand:SI 2 "const_0_to_15_operand")]
UNSPEC_ROUND))]
"TARGET_SSE4_1"
[(set_attr "type" "ssecvt")
(set_attr "prefix_extra" "1,1,1,*,*")
(set_attr "length_immediate" "1")
+ (set_attr "addr" "*,*,gpr16,*,*")
(set_attr "prefix" "maybe_vex,maybe_vex,maybe_vex,evex,evex")
(set_attr "isa" "noavx512f,noavx512f,noavx512f,avx512f,avx512f")
(set_attr "avx_partial_xmm_update" "false,false,true,false,true")
DONE;
})
+(define_expand "roundhf2"
+ [(match_operand:HF 0 "register_operand")
+ (match_operand:HF 1 "register_operand")]
+ "TARGET_AVX512FP16 && !flag_trapping_math && !flag_rounding_math"
+{
+ ix86_expand_round_sse4 (operands[0], operands[1]);
+ DONE;
+})
+
(define_expand "round<mode>2"
[(match_operand:X87MODEF 0 "register_operand")
(match_operand:X87MODEF 1 "nonimmediate_operand")]
[(set_attr "type" "fpspc")
(set_attr "mode" "<MODE>")])
+(define_expand "lroundhf<mode>2"
+ [(set (match_operand:SWI248 0 "register_operand")
+ (unspec:SWI248 [(match_operand:HF 1 "nonimmediate_operand")]
+ UNSPEC_FIX_NOTRUNC))]
+ "TARGET_AVX512FP16 && !flag_trapping_math && !flag_rounding_math"
+{
+ ix86_expand_lround (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "lrinthf<mode>2"
+ [(set (match_operand:SWI48 0 "register_operand")
+ (unspec:SWI48 [(match_operand:HF 1 "nonimmediate_operand")]
+ UNSPEC_FIX_NOTRUNC))]
+ "TARGET_AVX512FP16")
+
(define_expand "lrint<MODEF:mode><SWI48:mode>2"
[(set (match_operand:SWI48 0 "register_operand")
(unspec:SWI48 [(match_operand:MODEF 1 "nonimmediate_operand")]
&& (!TARGET_SSE_MATH || TARGET_MIX_SSE_I387)
&& flag_unsafe_math_optimizations")
+(define_expand "l<rounding_insn>hf<mode>2"
+ [(set (match_operand:SWI48 0 "nonimmediate_operand")
+ (unspec:SWI48 [(match_operand:HF 1 "register_operand")]
+ FIST_ROUNDING))]
+ "TARGET_AVX512FP16"
+{
+ rtx tmp = gen_reg_rtx (HFmode);
+ emit_insn (gen_sse4_1_roundhf2 (tmp, operands[1],
+ GEN_INT (ROUND_<ROUNDING> | ROUND_NO_EXC)));
+ emit_insn (gen_fix_trunchf<mode>2 (operands[0], tmp));
+ DONE;
+})
+
(define_expand "l<rounding_insn><MODEF:mode><SWI48:mode>2"
[(parallel [(set (match_operand:SWI48 0 "nonimmediate_operand")
(unspec:SWI48 [(match_operand:MODEF 1 "register_operand")]
})
(define_insn "movmsk_df"
- [(set (match_operand:SI 0 "register_operand" "=r")
+ [(set (match_operand:SI 0 "register_operand" "=r,jr")
(unspec:SI
- [(match_operand:DF 1 "register_operand" "x")]
+ [(match_operand:DF 1 "register_operand" "x,x")]
UNSPEC_MOVMSK))]
"SSE_FLOAT_MODE_P (DFmode) && TARGET_SSE_MATH"
"%vmovmskpd\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssemov")
- (set_attr "prefix" "maybe_vex")
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "ssemov")
+ (set_attr "prefix" "maybe_evex")
(set_attr "mode" "DF")])
;; Use movmskpd in SSE mode to avoid store forwarding stall
DONE;
})
+(define_expand "stack_protect_set"
+ [(match_operand 0 "memory_operand")
+ (match_operand 1 "memory_operand")]
+ ""
+{
+ rtx scratch = gen_reg_rtx (word_mode);
+
+ emit_insn (gen_stack_protect_set_1
+ (ptr_mode, word_mode, operands[0], operands[1], scratch));
+ DONE;
+})
+
+(define_insn "@stack_protect_set_1_<PTR:mode>_<W:mode>"
+ [(set (match_operand:PTR 0 "memory_operand" "=m")
+ (unspec:PTR [(match_operand:PTR 1 "memory_operand" "m")]
+ UNSPEC_SP_SET))
+ (set (match_operand:W 2 "register_operand" "=&r") (const_int 0))
+ (clobber (reg:CC FLAGS_REG))]
+ ""
+{
+ output_asm_insn ("mov{<PTR:imodesuffix>}\t{%1, %<PTR:k>2|%<PTR:k>2, %1}",
+ operands);
+ output_asm_insn ("mov{<PTR:imodesuffix>}\t{%<PTR:k>2, %0|%0, %<PTR:k>2}",
+ operands);
+ if (!TARGET_USE_MOV0 || optimize_insn_for_size_p ())
+ return "xor{l}\t%k2, %k2";
+ else
+ return "mov{l}\t{$0, %k2|%k2, 0}";
+}
+ [(set_attr "type" "multi")])
+
+;; Patterns and peephole2s to optimize stack_protect_set_1_<mode>
+;; immediately followed by *mov{s,d}i_internal, where we can avoid
+;; the xor{l} above. We don't split this, so that scheduling or
+;; anything else doesn't separate the *stack_protect_set* pattern from
+;; the set of the register that overwrites the register with a new value.
+
+(define_peephole2
+ [(parallel [(set (match_operand:PTR 0 "memory_operand")
+ (unspec:PTR [(match_operand:PTR 1 "memory_operand")]
+ UNSPEC_SP_SET))
+ (set (match_operand 2 "general_reg_operand") (const_int 0))
+ (clobber (reg:CC FLAGS_REG))])
+ (set (match_operand 3 "general_reg_operand")
+ (match_operand 4 "const0_operand"))]
+ "GET_MODE (operands[2]) == word_mode
+ && GET_MODE_SIZE (GET_MODE (operands[3])) <= UNITS_PER_WORD
+ && peep2_reg_dead_p (0, operands[3])
+ && peep2_reg_dead_p (1, operands[2])"
+ [(parallel [(set (match_dup 0)
+ (unspec:PTR [(match_dup 1)] UNSPEC_SP_SET))
+ (set (match_dup 3) (const_int 0))
+ (clobber (reg:CC FLAGS_REG))])]
+ "operands[3] = gen_lowpart (word_mode, operands[3]);")
+
+(define_insn "*stack_protect_set_2_<mode>_si"
+ [(set (match_operand:PTR 0 "memory_operand" "=m")
+ (unspec:PTR [(match_operand:PTR 3 "memory_operand" "m")]
+ UNSPEC_SP_SET))
+ (set (match_operand:SI 1 "register_operand" "=&r")
+ (match_operand:SI 2 "general_operand" "g"))]
+ "reload_completed"
+{
+ output_asm_insn ("mov{<imodesuffix>}\t{%3, %<k>1|%<k>1, %3}", operands);
+ output_asm_insn ("mov{<imodesuffix>}\t{%<k>1, %0|%0, %<k>1}", operands);
+ if (pic_32bit_operand (operands[2], SImode)
+ || ix86_use_lea_for_mov (insn, operands + 1))
+ return "lea{l}\t{%E2, %1|%1, %E2}";
+ else
+ return "mov{l}\t{%2, %1|%1, %2}";
+}
+ [(set_attr "type" "multi")
+ (set_attr "length" "24")])
+
+(define_insn "*stack_protect_set_2_<mode>_di"
+ [(set (match_operand:PTR 0 "memory_operand" "=m,m,m")
+ (unspec:PTR [(match_operand:PTR 3 "memory_operand" "m,m,m")]
+ UNSPEC_SP_SET))
+ (set (match_operand:DI 1 "register_operand" "=&r,&r,&r")
+ (match_operand:DI 2 "general_operand" "Z,rem,i"))]
+ "TARGET_64BIT && reload_completed"
+{
+ output_asm_insn ("mov{<imodesuffix>}\t{%3, %<k>1|%<k>1, %3}", operands);
+ output_asm_insn ("mov{<imodesuffix>}\t{%<k>1, %0|%0, %<k>1}", operands);
+ if (pic_32bit_operand (operands[2], DImode))
+ return "lea{q}\t{%E2, %1|%1, %E2}";
+ else if (which_alternative == 0)
+ return "mov{l}\t{%k2, %k1|%k1, %k2}";
+ else if (which_alternative == 2)
+ return "movabs{q}\t{%2, %1|%1, %2}";
+ else if (ix86_use_lea_for_mov (insn, operands + 1))
+ return "lea{q}\t{%E2, %1|%1, %E2}";
+ else
+ return "mov{q}\t{%2, %1|%1, %2}";
+}
+ [(set_attr "type" "multi")
+ (set_attr "length" "24")])
+
+(define_peephole2
+ [(parallel [(set (match_operand:PTR 0 "memory_operand")
+ (unspec:PTR [(match_operand:PTR 1 "memory_operand")]
+ UNSPEC_SP_SET))
+ (set (match_operand 2 "general_reg_operand") (const_int 0))
+ (clobber (reg:CC FLAGS_REG))])
+ (set (match_operand:SWI48 3 "general_reg_operand")
+ (match_operand:SWI48 4 "general_gr_operand"))]
+ "GET_MODE (operands[2]) == word_mode
+ && peep2_reg_dead_p (0, operands[3])
+ && peep2_reg_dead_p (1, operands[2])"
+ [(parallel [(set (match_dup 0)
+ (unspec:PTR [(match_dup 1)] UNSPEC_SP_SET))
+ (set (match_dup 3) (match_dup 4))])])
+
+(define_peephole2
+ [(set (match_operand:SWI48 3 "general_reg_operand")
+ (match_operand:SWI48 4 "general_gr_operand"))
+ (parallel [(set (match_operand:PTR 0 "memory_operand")
+ (unspec:PTR [(match_operand:PTR 1 "memory_operand")]
+ UNSPEC_SP_SET))
+ (set (match_operand 2 "general_reg_operand") (const_int 0))
+ (clobber (reg:CC FLAGS_REG))])]
+ "GET_MODE (operands[2]) == word_mode
+ && peep2_reg_dead_p (0, operands[3])
+ && peep2_reg_dead_p (2, operands[2])
+ && !reg_mentioned_p (operands[3], operands[0])
+ && !reg_mentioned_p (operands[3], operands[1])"
+ [(parallel [(set (match_dup 0)
+ (unspec:PTR [(match_dup 1)] UNSPEC_SP_SET))
+ (set (match_dup 3) (match_dup 4))])])
+
+(define_insn "*stack_protect_set_3_<PTR:mode>_<SWI48:mode>"
+ [(set (match_operand:PTR 0 "memory_operand" "=m")
+ (unspec:PTR [(match_operand:PTR 3 "memory_operand" "m")]
+ UNSPEC_SP_SET))
+ (set (match_operand:SWI48 1 "register_operand" "=&r")
+ (match_operand:SWI48 2 "address_no_seg_operand" "Ts"))]
+ ""
+{
+ output_asm_insn ("mov{<PTR:imodesuffix>}\t{%3, %<PTR:k>1|%<PTR:k>1, %3}",
+ operands);
+ output_asm_insn ("mov{<PTR:imodesuffix>}\t{%<PTR:k>1, %0|%0, %<PTR:k>1}",
+ operands);
+ if (SImode_address_operand (operands[2], VOIDmode))
+ {
+ gcc_assert (TARGET_64BIT);
+ return "lea{l}\t{%E2, %k1|%k1, %E2}";
+ }
+ else
+ return "lea{<SWI48:imodesuffix>}\t{%E2, %1|%1, %E2}";
+}
+ [(set_attr "type" "multi")
+ (set_attr "length" "24")])
+
+(define_peephole2
+ [(parallel [(set (match_operand:PTR 0 "memory_operand")
+ (unspec:PTR [(match_operand:PTR 1 "memory_operand")]
+ UNSPEC_SP_SET))
+ (set (match_operand 2 "general_reg_operand") (const_int 0))
+ (clobber (reg:CC FLAGS_REG))])
+ (set (match_operand:SWI48 3 "general_reg_operand")
+ (match_operand:SWI48 4 "address_no_seg_operand"))]
+ "GET_MODE (operands[2]) == word_mode
+ && peep2_reg_dead_p (0, operands[3])
+ && peep2_reg_dead_p (1, operands[2])"
+ [(parallel [(set (match_dup 0)
+ (unspec:PTR [(match_dup 1)] UNSPEC_SP_SET))
+ (set (match_dup 3) (match_dup 4))])])
+
+(define_insn "*stack_protect_set_4z_<mode>_di"
+ [(set (match_operand:PTR 0 "memory_operand" "=m")
+ (unspec:PTR [(match_operand:PTR 3 "memory_operand" "m")]
+ UNSPEC_SP_SET))
+ (set (match_operand:DI 1 "register_operand" "=&r")
+ (zero_extend:DI (match_operand:SI 2 "nonimmediate_operand" "rm")))]
+ "TARGET_64BIT && reload_completed"
+{
+ output_asm_insn ("mov{<imodesuffix>}\t{%3, %<k>1|%<k>1, %3}", operands);
+ output_asm_insn ("mov{<imodesuffix>}\t{%<k>1, %0|%0, %<k>1}", operands);
+ if (ix86_use_lea_for_mov (insn, operands + 1))
+ return "lea{l}\t{%E2, %k1|%k1, %E2}";
+ else
+ return "mov{l}\t{%2, %k1|%k1, %2}";
+}
+ [(set_attr "type" "multi")
+ (set_attr "length" "24")])
+
+(define_insn "*stack_protect_set_4s_<mode>_di"
+ [(set (match_operand:PTR 0 "memory_operand" "=m")
+ (unspec:PTR [(match_operand:PTR 3 "memory_operand" "m")]
+ UNSPEC_SP_SET))
+ (set (match_operand:DI 1 "register_operand" "=&r")
+ (sign_extend:DI (match_operand:SI 2 "nonimmediate_operand" "rm")))]
+ "TARGET_64BIT && reload_completed"
+{
+ output_asm_insn ("mov{<imodesuffix>}\t{%3, %<k>1|%<k>1, %3}", operands);
+ output_asm_insn ("mov{<imodesuffix>}\t{%<k>1, %0|%0, %<k>1}", operands);
+ return "movs{lq|x}\t{%2, %1|%1, %2}";
+}
+ [(set_attr "type" "multi")
+ (set_attr "length" "24")])
+
+(define_peephole2
+ [(parallel [(set (match_operand:PTR 0 "memory_operand")
+ (unspec:PTR [(match_operand:PTR 1 "memory_operand")]
+ UNSPEC_SP_SET))
+ (set (match_operand 2 "general_reg_operand") (const_int 0))
+ (clobber (reg:CC FLAGS_REG))])
+ (set (match_operand:DI 3 "general_reg_operand")
+ (any_extend:DI
+ (match_operand:SI 4 "nonimmediate_gr_operand")))]
+ "TARGET_64BIT
+ && GET_MODE (operands[2]) == word_mode
+ && peep2_reg_dead_p (0, operands[3])
+ && peep2_reg_dead_p (1, operands[2])"
+ [(parallel [(set (match_dup 0)
+ (unspec:PTR [(match_dup 1)] UNSPEC_SP_SET))
+ (set (match_dup 3)
+ (any_extend:DI (match_dup 4)))])])
+
+(define_expand "stack_protect_test"
+ [(match_operand 0 "memory_operand")
+ (match_operand 1 "memory_operand")
+ (match_operand 2)]
+ ""
+{
+ rtx flags = gen_rtx_REG (CCZmode, FLAGS_REG);
+
+ emit_insn (gen_stack_protect_test_1
+ (ptr_mode, flags, operands[0], operands[1]));
+
+ emit_jump_insn (gen_cbranchcc4 (gen_rtx_EQ (VOIDmode, flags, const0_rtx),
+ flags, const0_rtx, operands[2]));
+ DONE;
+})
+
+(define_insn "@stack_protect_test_1_<mode>"
+ [(set (match_operand:CCZ 0 "flags_reg_operand")
+ (unspec:CCZ [(match_operand:PTR 1 "memory_operand" "m")
+ (match_operand:PTR 2 "memory_operand" "m")]
+ UNSPEC_SP_TEST))
+ (clobber (match_scratch:PTR 3 "=&r"))]
+ ""
+{
+ output_asm_insn ("mov{<imodesuffix>}\t{%1, %3|%3, %1}", operands);
+ return "sub{<imodesuffix>}\t{%2, %3|%3, %2}";
+}
+ [(set_attr "type" "multi")])
;; Avoid redundant prefixes by splitting HImode arithmetic to SImode.
;; Do not split instructions with mask registers.
(symbol_ref "memory_address_length (operands[0], false)"))
(set_attr "memory" "none")])
-(define_expand "stack_protect_set"
- [(match_operand 0 "memory_operand")
- (match_operand 1 "memory_operand")]
- ""
-{
- emit_insn (gen_stack_protect_set_1
- (ptr_mode, operands[0], operands[1]));
- DONE;
-})
-
-(define_insn "@stack_protect_set_1_<mode>"
- [(set (match_operand:PTR 0 "memory_operand" "=m")
- (unspec:PTR [(match_operand:PTR 1 "memory_operand" "m")]
- UNSPEC_SP_SET))
- (set (match_scratch:PTR 2 "=&r") (const_int 0))
- (clobber (reg:CC FLAGS_REG))]
- ""
-{
- output_asm_insn ("mov{<imodesuffix>}\t{%1, %2|%2, %1}", operands);
- output_asm_insn ("mov{<imodesuffix>}\t{%2, %0|%0, %2}", operands);
- return "xor{l}\t%k2, %k2";
-}
- [(set_attr "type" "multi")])
-
-;; Patterns and peephole2s to optimize stack_protect_set_1_<mode>
-;; immediately followed by *mov{s,d}i_internal to the same register,
-;; where we can avoid the xor{l} above. We don't split this, so that
-;; scheduling or anything else doesn't separate the *stack_protect_set*
-;; pattern from the set of the register that overwrites the register
-;; with a new value.
-(define_insn "*stack_protect_set_2_<mode>"
- [(set (match_operand:PTR 0 "memory_operand" "=m")
- (unspec:PTR [(match_operand:PTR 3 "memory_operand" "m")]
- UNSPEC_SP_SET))
- (set (match_operand:SI 1 "register_operand" "=&r")
- (match_operand:SI 2 "general_operand" "g"))
- (clobber (reg:CC FLAGS_REG))]
- "reload_completed
- && !reg_overlap_mentioned_p (operands[1], operands[2])"
-{
- output_asm_insn ("mov{<imodesuffix>}\t{%3, %<k>1|%<k>1, %3}", operands);
- output_asm_insn ("mov{<imodesuffix>}\t{%<k>1, %0|%0, %<k>1}", operands);
- if (pic_32bit_operand (operands[2], SImode)
- || ix86_use_lea_for_mov (insn, operands + 1))
- return "lea{l}\t{%E2, %1|%1, %E2}";
- else
- return "mov{l}\t{%2, %1|%1, %2}";
-}
- [(set_attr "type" "multi")
- (set_attr "length" "24")])
-
-(define_peephole2
- [(parallel [(set (match_operand:PTR 0 "memory_operand")
- (unspec:PTR [(match_operand:PTR 1 "memory_operand")]
- UNSPEC_SP_SET))
- (set (match_operand:PTR 2 "general_reg_operand") (const_int 0))
- (clobber (reg:CC FLAGS_REG))])
- (set (match_operand:SI 3 "general_reg_operand")
- (match_operand:SI 4))]
- "REGNO (operands[2]) == REGNO (operands[3])
- && general_operand (operands[4], SImode)
- && (general_reg_operand (operands[4], SImode)
- || memory_operand (operands[4], SImode)
- || immediate_operand (operands[4], SImode))
- && !reg_overlap_mentioned_p (operands[3], operands[4])"
- [(parallel [(set (match_dup 0)
- (unspec:PTR [(match_dup 1)] UNSPEC_SP_SET))
- (set (match_dup 3) (match_dup 4))
- (clobber (reg:CC FLAGS_REG))])])
-
-(define_insn "*stack_protect_set_3"
- [(set (match_operand:DI 0 "memory_operand" "=m,m,m")
- (unspec:DI [(match_operand:DI 3 "memory_operand" "m,m,m")]
- UNSPEC_SP_SET))
- (set (match_operand:DI 1 "register_operand" "=&r,r,r")
- (match_operand:DI 2 "general_operand" "Z,rem,i"))
- (clobber (reg:CC FLAGS_REG))]
- "TARGET_64BIT
- && reload_completed
- && !reg_overlap_mentioned_p (operands[1], operands[2])"
-{
- output_asm_insn ("mov{q}\t{%3, %1|%1, %3}", operands);
- output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", operands);
- if (pic_32bit_operand (operands[2], DImode))
- return "lea{q}\t{%E2, %1|%1, %E2}";
- else if (which_alternative == 0)
- return "mov{l}\t{%k2, %k1|%k1, %k2}";
- else if (which_alternative == 2)
- return "movabs{q}\t{%2, %1|%1, %2}";
- else if (ix86_use_lea_for_mov (insn, operands + 1))
- return "lea{q}\t{%E2, %1|%1, %E2}";
- else
- return "mov{q}\t{%2, %1|%1, %2}";
-}
- [(set_attr "type" "multi")
- (set_attr "length" "24")])
-
-(define_peephole2
- [(parallel [(set (match_operand:DI 0 "memory_operand")
- (unspec:DI [(match_operand:DI 1 "memory_operand")]
- UNSPEC_SP_SET))
- (set (match_operand:DI 2 "general_reg_operand") (const_int 0))
- (clobber (reg:CC FLAGS_REG))])
- (set (match_dup 2) (match_operand:DI 3))]
- "TARGET_64BIT
- && general_operand (operands[3], DImode)
- && (general_reg_operand (operands[3], DImode)
- || memory_operand (operands[3], DImode)
- || x86_64_zext_immediate_operand (operands[3], DImode)
- || x86_64_immediate_operand (operands[3], DImode)
- || (CONSTANT_P (operands[3])
- && (!flag_pic || LEGITIMATE_PIC_OPERAND_P (operands[3]))))
- && !reg_overlap_mentioned_p (operands[2], operands[3])"
- [(parallel [(set (match_dup 0)
- (unspec:PTR [(match_dup 1)] UNSPEC_SP_SET))
- (set (match_dup 2) (match_dup 3))
- (clobber (reg:CC FLAGS_REG))])])
-
-(define_expand "stack_protect_test"
- [(match_operand 0 "memory_operand")
- (match_operand 1 "memory_operand")
- (match_operand 2)]
- ""
-{
- rtx flags = gen_rtx_REG (CCZmode, FLAGS_REG);
-
- emit_insn (gen_stack_protect_test_1
- (ptr_mode, flags, operands[0], operands[1]));
-
- emit_jump_insn (gen_cbranchcc4 (gen_rtx_EQ (VOIDmode, flags, const0_rtx),
- flags, const0_rtx, operands[2]));
- DONE;
-})
-
-(define_insn "@stack_protect_test_1_<mode>"
- [(set (match_operand:CCZ 0 "flags_reg_operand")
- (unspec:CCZ [(match_operand:PTR 1 "memory_operand" "m")
- (match_operand:PTR 2 "memory_operand" "m")]
- UNSPEC_SP_TEST))
- (clobber (match_scratch:PTR 3 "=&r"))]
- ""
-{
- output_asm_insn ("mov{<imodesuffix>}\t{%1, %3|%3, %1}", operands);
- return "sub{<imodesuffix>}\t{%2, %3|%3, %2}";
-}
- [(set_attr "type" "multi")])
-
(define_insn "sse4_2_crc32<mode>"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI
(symbol_ref "ix86_attr_length_address_default (insn) + 3"))])
(define_insn "fxsave64"
- [(set (match_operand:BLK 0 "memory_operand" "=m")
+ [(set (match_operand:BLK 0 "memory_operand" "=jm")
(unspec_volatile:BLK [(const_int 0)] UNSPECV_FXSAVE64))]
"TARGET_64BIT && TARGET_FXSR"
"fxsave64\t%0"
[(set_attr "type" "other")
+ (set_attr "addr" "gpr16")
(set_attr "memory" "store")
(set (attr "length")
(symbol_ref "ix86_attr_length_address_default (insn) + 4"))])
(symbol_ref "ix86_attr_length_address_default (insn) + 3"))])
(define_insn "fxrstor64"
- [(unspec_volatile [(match_operand:BLK 0 "memory_operand" "m")]
+ [(unspec_volatile [(match_operand:BLK 0 "memory_operand" "jm")]
UNSPECV_FXRSTOR64)]
"TARGET_64BIT && TARGET_FXSR"
"fxrstor64\t%0"
[(set_attr "type" "other")
+ (set_attr "addr" "gpr16")
(set_attr "memory" "load")
(set (attr "length")
(symbol_ref "ix86_attr_length_address_default (insn) + 4"))])
(symbol_ref "ix86_attr_length_address_default (insn) + 3"))])
(define_insn "<xsave>_rex64"
- [(set (match_operand:BLK 0 "memory_operand" "=m")
+ [(set (match_operand:BLK 0 "memory_operand" "=jm")
(unspec_volatile:BLK
[(match_operand:SI 1 "register_operand" "a")
(match_operand:SI 2 "register_operand" "d")]
"<xsave>\t%0"
[(set_attr "type" "other")
(set_attr "memory" "store")
+ (set_attr "addr" "gpr16")
(set (attr "length")
(symbol_ref "ix86_attr_length_address_default (insn) + 3"))])
(define_insn "<xsave>"
- [(set (match_operand:BLK 0 "memory_operand" "=m")
+ [(set (match_operand:BLK 0 "memory_operand" "=jm")
(unspec_volatile:BLK
[(match_operand:SI 1 "register_operand" "a")
(match_operand:SI 2 "register_operand" "d")]
"<xsave>\t%0"
[(set_attr "type" "other")
(set_attr "memory" "store")
+ (set_attr "addr" "gpr16")
(set (attr "length")
(symbol_ref "ix86_attr_length_address_default (insn) + 4"))])
(define_insn "<xrstor>_rex64"
[(unspec_volatile:BLK
- [(match_operand:BLK 0 "memory_operand" "m")
+ [(match_operand:BLK 0 "memory_operand" "jm")
(match_operand:SI 1 "register_operand" "a")
(match_operand:SI 2 "register_operand" "d")]
ANY_XRSTOR)]
"<xrstor>\t%0"
[(set_attr "type" "other")
(set_attr "memory" "load")
+ (set_attr "addr" "gpr16")
(set (attr "length")
(symbol_ref "ix86_attr_length_address_default (insn) + 3"))])
(define_insn "<xrstor>64"
[(unspec_volatile:BLK
- [(match_operand:BLK 0 "memory_operand" "m")
+ [(match_operand:BLK 0 "memory_operand" "jm")
(match_operand:SI 1 "register_operand" "a")
(match_operand:SI 2 "register_operand" "d")]
ANY_XRSTOR64)]
"<xrstor>64\t%0"
[(set_attr "type" "other")
(set_attr "memory" "load")
+ (set_attr "addr" "gpr16")
(set (attr "length")
(symbol_ref "ix86_attr_length_address_default (insn) + 4"))])
DONE;
})
+(define_insn "urdmsr"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec_volatile:DI
+ [(match_operand:DI 1 "x86_64_szext_nonmemory_operand" "reZ")]
+ UNSPECV_URDMSR))]
+ "TARGET_USER_MSR && TARGET_64BIT"
+ "urdmsr\t{%1, %0|%0, %1}"
+ [(set_attr "prefix" "vex")
+ (set_attr "type" "other")])
+
+(define_insn "uwrmsr"
+ [(unspec_volatile
+ [(match_operand:DI 0 "x86_64_szext_nonmemory_operand" "reZ")
+ (match_operand:DI 1 "register_operand" "r")]
+ UNSPECV_UWRMSR)]
+ "TARGET_USER_MSR && TARGET_64BIT"
+ "uwrmsr\t{%1, %0|%0, %1}"
+ [(set_attr "prefix" "vex")
+ (set_attr "type" "other")])
+
(include "mmx.md")
(include "sse.md")
(include "sync.md")