# (fma, fms, fnma, and fnms) for both float and double.
proc check_effective_target_scalar_all_fma { } {
- return [istarget aarch64*-*-*]
+ if { [istarget aarch64*-*-*]
+ || [istarget loongarch*-*-*]} {
+ return 1
+ }
+ return 0
}
# Return 1 if the target supports compiling fixed-point,
&& [check_effective_target_s390_vx])
|| ([istarget riscv*-*-*]
&& [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx])
}}]
}
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vxe2])
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target supports signed double->int conversion
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target supports signed int->double conversion
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
#Return 1 if we're supporting __int128 for target, 0 otherwise.
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vxe2])
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vxe2])
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target supports unsigned float->int conversion
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vxe2])
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target supports vector integer char -> long long extend optab
proc check_effective_target_vect_ext_char_longlong { } {
return [check_cached_effective_target_indexed vect_ext_char_longlong {
expr { ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if peeling for alignment might be profitable on the target
&& [check_effective_target_s390_vx])
|| [istarget amdgcn-*-*]
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target supports hardware vector shift by register operation.
|| [istarget aarch64*-*-*]
|| ([istarget riscv*-*-*]
&& [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx])
}}]
}
&& [check_effective_target_s390_vx])
|| [istarget amdgcn-*-*]
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) } {
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) } {
set answer 1
} else {
set answer 0
expr { ([istarget aarch64*-*-*]
|| [is-effective-target arm_neon]
|| [istarget amdgcn-*-*])
+ || [istarget loongarch*-*-*]
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx]) }}]
}
|| [istarget aarch64*-*-*]
|| [is-effective-target arm_neon]
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target supports addition of char vectors for at least
&& [check_effective_target_s390_vx])
|| ([istarget riscv*-*-*]
&& [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx])
}}]
}
&& [check_effective_target_s390_vx])
|| [istarget amdgcn-*-*]
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target supports hardware vectors of long, 0 otherwise.
&& [check_effective_target_s390_vx])
|| [istarget amdgcn-*-*]
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) } {
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) } {
set answer 1
} else {
set answer 0
&& [check_effective_target_s390_vxe])
|| [istarget amdgcn-*-*]
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target supports hardware vectors of float without
&& [check_effective_target_s390_vx])
|| [istarget amdgcn-*-*]
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v])} }]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target supports conditional addition, subtraction,
&& [check_effective_target_has_arch_pwr8])
|| [istarget aarch64*-*-*]
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v])}}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx])}}]
}
&& [check_effective_target_s390_vx])
|| [istarget amdgcn-*-*]
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if, for some VF:
&& [check_effective_target_s390_vx])
|| [istarget amdgcn-*-*]
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target supports SLP permutation of 3 vectors when each
&& [check_effective_target_s390_vx])
|| [istarget amdgcn-*-*]
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target supports SLP permutation of 3 vectors when each
expr { [check_effective_target_vect_unpack]
|| [istarget powerpc*-*-*]
|| [istarget ia64-*-*]
+ || [istarget loongarch*-*-*]
|| [istarget riscv*-*-*] }}]
}
expr { [check_effective_target_vect_unpack]
|| [is-effective-target arm_neon]
|| [istarget ia64-*-*]
- || [istarget riscv*-*-*] }}]
+ || [istarget riscv*-*-*]
+ || [istarget loongarch*-*-*] }}]
}
# Return 1 if the target plus current options supports a vector
proc check_effective_target_vect_widen_sum_qi_to_si { } {
return [check_cached_effective_target_indexed vect_widen_sum_qi_to_si {
expr { [istarget powerpc*-*-*]
+ || [istarget loongarch*-*-*]
|| [istarget riscv*-*-*] }}]
}
|| ([istarget aarch64*-*-*]
&& ![check_effective_target_aarch64_sve])
|| [is-effective-target arm_neon]
+ || [is-effective-target loongarch*-*-*]
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx]))
|| [istarget amdgcn-*-*] }}]
&& ![check_effective_target_aarch64_sve])
|| [istarget i?86-*-*] || [istarget x86_64-*-*]
|| [is-effective-target arm_neon]
+ || [is-effective-target loongarch*-*-*]
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx]))
|| [istarget amdgcn-*-*] }}]
&& [check_effective_target_arm_little_endian])
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
+ || [istarget loongarch*-*-*]
|| [istarget amdgcn-*-*] }}]
}
return [check_cached_effective_target_indexed vect_widen_mult_hi_to_si_pattern {
expr { [istarget powerpc*-*-*]
|| [istarget ia64-*-*]
+ || [istarget loongarch*-*-*]
|| [istarget i?86-*-*] || [istarget x86_64-*-*]
|| ([is-effective-target arm_neon]
&& [check_effective_target_arm_little_endian])
return [check_cached_effective_target_indexed vect_widen_mult_si_to_di_pattern {
expr { [istarget ia64-*-*]
|| [istarget i?86-*-*] || [istarget x86_64-*-*]
+ || [istarget loongarch*-*-*]
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx]) }}]
}
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target plus current options supports a vector
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target plus current options supports a vector
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target plus current options supports a vector
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target plus current options supports a vector
|| ([istarget powerpc*-*-*]
&& [check_p9vector_hw_available])
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target plus current options supports both signed
return [expr { ([istarget aarch64*-*-*]
&& ![check_effective_target_aarch64_sve1_only])
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }]
}
# Return 1 if the target plus current options supports both signed
&& [check_effective_target_s390_vx])
|| [istarget amdgcn*-*-*]
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target plus current options supports a vector
&& [check_effective_target_s390_vx])
|| [istarget amdgcn*-*-*]
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target plus current options does not guarantee
|| ([istarget mips*-*-*] && [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
- || ([istarget riscv*-*-*]) } {
+ || ([istarget riscv*-*-*])
+ || ([istarget loongarch*-*-*]) } {
return 1
}
if { [istarget arm*-*-*]
proc check_effective_target_vect_scatter_store { } {
return [expr { [check_effective_target_aarch64_sve]
|| [istarget amdgcn*-*-*]
- || [check_effective_target_riscv_v] }]
+ || [check_effective_target_riscv_v]
+ || [check_effective_target_loongarch_sx] }]
}
# Return 1 if the target supports vector conditional operations, 0 otherwise.
&& [check_effective_target_s390_vx])
|| [istarget amdgcn-*-*]
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target supports vector conditional operations where
&& [check_effective_target_s390_vx])
|| [istarget amdgcn-*-*]
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target supports vector char multiplication, 0 otherwise.
&& [check_effective_target_s390_vx])
|| [istarget amdgcn-*-*]
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target supports vector short multiplication, 0 otherwise.
&& [check_effective_target_s390_vx])
|| [istarget amdgcn-*-*]
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target supports vector int multiplication, 0 otherwise.
&& [check_effective_target_s390_vx])
|| [istarget amdgcn-*-*]
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target supports 64 bit hardware vector
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) } {
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) } {
set answer 1
} else {
set answer 0
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target supports vector interleaving, 0 otherwise.
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
foreach N {2 3 4 5 6 7 8} {
|| [istarget aarch64*-*-*]
|| [istarget amdgcn-*-*]
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target supports hardware square root instructions.
&& [check_effective_target_s390_vx])
|| [istarget amdgcn-*-*]
|| ([istarget riscv*-*-*]
- && [check_effective_target_riscv_v]) }}]
+ && [check_effective_target_riscv_v])
+ || ([istarget loongarch*-*-*]
+ && [check_effective_target_loongarch_sx]) }}]
}
# Return 1 if the target supports vector lrint calls.
set et_vect_call_lrint 0
if { (([istarget i?86-*-*] || [istarget x86_64-*-*])
&& [check_effective_target_ilp32])
- || [istarget amdgcn-*-*] } {
+ || [istarget amdgcn-*-*]
+ || [istarget loongarch*-*-*] } {
set et_vect_call_lrint 1
}
proc check_effective_target_vect_call_btrunc { } {
return [check_cached_effective_target_indexed vect_call_btrunc {
expr { [istarget aarch64*-*-*]
- || [istarget amdgcn-*-*] }}]
+ || [istarget amdgcn-*-*]
+ || [istarget loongarch*-*-*] }}]
}
# Return 1 if the target supports vector btruncf calls.
proc check_effective_target_vect_call_btruncf { } {
return [check_cached_effective_target_indexed vect_call_btruncf {
expr { [istarget aarch64*-*-*]
- || [istarget amdgcn-*-*] }}]
+ || [istarget amdgcn-*-*]
+ || [istarget loongarch*-*-*] }}]
}
# Return 1 if the target supports vector ceil calls.
proc check_effective_target_vect_call_ceil { } {
return [check_cached_effective_target_indexed vect_call_ceil {
expr { [istarget aarch64*-*-*]
- || [istarget amdgcn-*-*] }}]
+ || [istarget amdgcn-*-*]
+ || [istarget loongarch*-*-*] }}]
}
# Return 1 if the target supports vector ceilf calls.
proc check_effective_target_vect_call_ceilf { } {
return [check_cached_effective_target_indexed vect_call_ceilf {
expr { [istarget aarch64*-*-*]
- || [istarget amdgcn-*-*] }}]
+ || [istarget amdgcn-*-*]
+ || [istarget loongarch*-*-*] }}]
}
# Return 1 if the target supports vector floor calls.
proc check_effective_target_vect_call_floor { } {
return [check_cached_effective_target_indexed vect_call_floor {
expr { [istarget aarch64*-*-*]
- || [istarget amdgcn-*-*] }}]
+ || [istarget amdgcn-*-*]
+ || [istarget loongarch*-*-*] }}]
}
# Return 1 if the target supports vector floorf calls.
proc check_effective_target_vect_call_floorf { } {
return [check_cached_effective_target_indexed vect_call_floorf {
expr { [istarget aarch64*-*-*]
- || [istarget amdgcn-*-*] }}]
+ || [istarget amdgcn-*-*]
+ || [istarget loongarch*-*-*] }}]
}
# Return 1 if the target supports vector lceil calls.
proc check_effective_target_vect_call_lceil { } {
return [check_cached_effective_target_indexed vect_call_lceil {
- expr { [istarget aarch64*-*-*] }}]
+ expr { [istarget aarch64*-*-*]
+ || [istarget loongarch*-*-*] }}]
}
# Return 1 if the target supports vector lfloor calls.
proc check_effective_target_vect_call_lfloor { } {
return [check_cached_effective_target_indexed vect_call_lfloor {
- expr { [istarget aarch64*-*-*] }}]
+ expr { [istarget aarch64*-*-*]
+ || [istarget loongarch*-*-*] }}]
}
# Return 1 if the target supports vector nearbyint calls.
return [expr { [check_effective_target_aarch64_sve]
|| [istarget amdgcn-*-*]
|| [check_effective_target_riscv_v]
+ || [check_effective_target_loongarch_sx]
|| [istarget i?86-*-*] || [istarget x86_64-*-*]}]
}
return [check_cached_effective_target section_anchors {
expr { [istarget powerpc*-*-*]
|| [istarget arm*-*-*]
- || [istarget aarch64*-*-*] }}]
+ || [istarget aarch64*-*-*]
+ || [istarget loongarch*-*-*] }}]
}
# Return 1 if the target supports atomic operations on "int_128" values.
set dg-do-what-default compile
}
} elseif [istarget loongarch*-*-*] {
- lappend DEFAULT_VECTCFLAGS "-mdouble-float" "-mlasx"
- if [check_effective_target_loongarch_asx_hw] {
+ # Set the default vectorization option to "-mlsx" due to the problem
+ # of non-aligned memory access when using 256-bit vectorization.
+ lappend DEFAULT_VECTCFLAGS "-mdouble-float" "-mlsx"
+ if [check_effective_target_loongarch_sx_hw] {
set dg-do-what-default run
} else {
set dg-do-what-default compile
proc check_effective_target_vect_max_reduc { } {
if { [istarget aarch64*-*-*] || [is-effective-target arm_neon]
- || [check_effective_target_riscv_v] } {
+ || [check_effective_target_riscv_v]
+ || [check_effective_target_loongarch_sx] } {
return 1
}
return 0
#if !defined(__loongarch_sx)
#error "LSX not defined"
#endif
- }]
+ } "-mlsx"]
}
proc check_effective_target_loongarch_sx_hw { } {
#if !defined(__loongarch_asx)
#error "LASX not defined"
#endif
- }]
+ } "-mlasx"]
}
proc check_effective_target_loongarch_asx_hw { } {