]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/optabs-query.c
poly_int: GET_MODE_BITSIZE
[thirdparty/gcc.git] / gcc / optabs-query.c
CommitLineData
947ed59a 1/* IR-agnostic target query functions relating to optabs
8e8f6434 2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
947ed59a 3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "target.h"
25#include "insn-codes.h"
26#include "optabs-query.h"
27#include "optabs-libfuncs.h"
28#include "insn-config.h"
29#include "rtl.h"
30#include "recog.h"
d37760c5 31#include "vec-perm-indices.h"
947ed59a 32
33struct target_optabs default_target_optabs;
34struct target_optabs *this_fn_optabs = &default_target_optabs;
35#if SWITCHABLE_TARGET
36struct target_optabs *this_target_optabs = &default_target_optabs;
37#endif
38
acdfe9e0 39/* Return the insn used to perform conversion OP from mode FROM_MODE
40 to mode TO_MODE; return CODE_FOR_nothing if the target does not have
41 such an insn, or if it is unsuitable for optimization type OPT_TYPE. */
42
43insn_code
44convert_optab_handler (convert_optab optab, machine_mode to_mode,
45 machine_mode from_mode, optimization_type opt_type)
46{
47 insn_code icode = convert_optab_handler (optab, to_mode, from_mode);
48 if (icode == CODE_FOR_nothing
49 || !targetm.optab_supported_p (optab, to_mode, from_mode, opt_type))
50 return CODE_FOR_nothing;
51 return icode;
52}
53
54/* Return the insn used to implement mode MODE of OP; return
55 CODE_FOR_nothing if the target does not have such an insn,
56 or if it is unsuitable for optimization type OPT_TYPE. */
57
58insn_code
59direct_optab_handler (convert_optab optab, machine_mode mode,
60 optimization_type opt_type)
61{
62 insn_code icode = direct_optab_handler (optab, mode);
63 if (icode == CODE_FOR_nothing
64 || !targetm.optab_supported_p (optab, mode, mode, opt_type))
65 return CODE_FOR_nothing;
66 return icode;
67}
68
947ed59a 69/* Enumerates the possible types of structure operand to an
70 extraction_insn. */
71enum extraction_type { ET_unaligned_mem, ET_reg };
72
73/* Check whether insv, extv or extzv pattern ICODE can be used for an
74 insertion or extraction of type TYPE on a structure of mode MODE.
75 Return true if so and fill in *INSN accordingly. STRUCT_OP is the
76 operand number of the structure (the first sign_extract or zero_extract
77 operand) and FIELD_OP is the operand number of the field (the other
78 side of the set from the sign_extract or zero_extract). */
79
80static bool
81get_traditional_extraction_insn (extraction_insn *insn,
82 enum extraction_type type,
83 machine_mode mode,
84 enum insn_code icode,
85 int struct_op, int field_op)
86{
87 const struct insn_data_d *data = &insn_data[icode];
88
89 machine_mode struct_mode = data->operand[struct_op].mode;
90 if (struct_mode == VOIDmode)
91 struct_mode = word_mode;
92 if (mode != struct_mode)
93 return false;
94
95 machine_mode field_mode = data->operand[field_op].mode;
96 if (field_mode == VOIDmode)
97 field_mode = word_mode;
98
99 machine_mode pos_mode = data->operand[struct_op + 2].mode;
100 if (pos_mode == VOIDmode)
101 pos_mode = word_mode;
102
103 insn->icode = icode;
54fea56d 104 insn->field_mode = as_a <scalar_int_mode> (field_mode);
105 if (type == ET_unaligned_mem)
106 insn->struct_mode = byte_mode;
107 else if (struct_mode == BLKmode)
108 insn->struct_mode = opt_scalar_int_mode ();
109 else
110 insn->struct_mode = as_a <scalar_int_mode> (struct_mode);
111 insn->pos_mode = as_a <scalar_int_mode> (pos_mode);
947ed59a 112 return true;
113}
114
115/* Return true if an optab exists to perform an insertion or extraction
116 of type TYPE in mode MODE. Describe the instruction in *INSN if so.
117
118 REG_OPTAB is the optab to use for register structures and
119 MISALIGN_OPTAB is the optab to use for misaligned memory structures.
120 POS_OP is the operand number of the bit position. */
121
122static bool
123get_optab_extraction_insn (struct extraction_insn *insn,
124 enum extraction_type type,
125 machine_mode mode, direct_optab reg_optab,
126 direct_optab misalign_optab, int pos_op)
127{
128 direct_optab optab = (type == ET_unaligned_mem ? misalign_optab : reg_optab);
129 enum insn_code icode = direct_optab_handler (optab, mode);
130 if (icode == CODE_FOR_nothing)
131 return false;
132
133 const struct insn_data_d *data = &insn_data[icode];
134
54fea56d 135 machine_mode pos_mode = data->operand[pos_op].mode;
136 if (pos_mode == VOIDmode)
137 pos_mode = word_mode;
138
947ed59a 139 insn->icode = icode;
54fea56d 140 insn->field_mode = as_a <scalar_int_mode> (mode);
141 if (type == ET_unaligned_mem)
142 insn->struct_mode = opt_scalar_int_mode ();
143 else
144 insn->struct_mode = insn->field_mode;
145 insn->pos_mode = as_a <scalar_int_mode> (pos_mode);
947ed59a 146 return true;
147}
148
149/* Return true if an instruction exists to perform an insertion or
150 extraction (PATTERN says which) of type TYPE in mode MODE.
151 Describe the instruction in *INSN if so. */
152
153static bool
154get_extraction_insn (extraction_insn *insn,
155 enum extraction_pattern pattern,
156 enum extraction_type type,
157 machine_mode mode)
158{
159 switch (pattern)
160 {
161 case EP_insv:
162 if (targetm.have_insv ()
163 && get_traditional_extraction_insn (insn, type, mode,
164 targetm.code_for_insv, 0, 3))
165 return true;
166 return get_optab_extraction_insn (insn, type, mode, insv_optab,
167 insvmisalign_optab, 2);
168
169 case EP_extv:
170 if (targetm.have_extv ()
171 && get_traditional_extraction_insn (insn, type, mode,
172 targetm.code_for_extv, 1, 0))
173 return true;
174 return get_optab_extraction_insn (insn, type, mode, extv_optab,
175 extvmisalign_optab, 3);
176
177 case EP_extzv:
178 if (targetm.have_extzv ()
179 && get_traditional_extraction_insn (insn, type, mode,
180 targetm.code_for_extzv, 1, 0))
181 return true;
182 return get_optab_extraction_insn (insn, type, mode, extzv_optab,
183 extzvmisalign_optab, 3);
184
185 default:
186 gcc_unreachable ();
187 }
188}
189
190/* Return true if an instruction exists to access a field of mode
191 FIELDMODE in a structure that has STRUCT_BITS significant bits.
192 Describe the "best" such instruction in *INSN if so. PATTERN and
193 TYPE describe the type of insertion or extraction we want to perform.
194
195 For an insertion, the number of significant structure bits includes
196 all bits of the target. For an extraction, it need only include the
197 most significant bit of the field. Larger widths are acceptable
198 in both cases. */
199
200static bool
201get_best_extraction_insn (extraction_insn *insn,
202 enum extraction_pattern pattern,
203 enum extraction_type type,
204 unsigned HOST_WIDE_INT struct_bits,
205 machine_mode field_mode)
206{
1a5d4b27 207 opt_scalar_int_mode mode_iter;
208 FOR_EACH_MODE_FROM (mode_iter, smallest_int_mode_for_size (struct_bits))
947ed59a 209 {
1a5d4b27 210 scalar_int_mode mode = mode_iter.require ();
947ed59a 211 if (get_extraction_insn (insn, pattern, type, mode))
212 {
1a5d4b27 213 FOR_EACH_MODE_FROM (mode_iter, mode)
947ed59a 214 {
1a5d4b27 215 mode = mode_iter.require ();
19a4dce4 216 if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (field_mode)
217 || TRULY_NOOP_TRUNCATION_MODES_P (insn->field_mode,
218 field_mode))
219 break;
947ed59a 220 get_extraction_insn (insn, pattern, type, mode);
947ed59a 221 }
222 return true;
223 }
947ed59a 224 }
225 return false;
226}
227
228/* Return true if an instruction exists to access a field of mode
229 FIELDMODE in a register structure that has STRUCT_BITS significant bits.
230 Describe the "best" such instruction in *INSN if so. PATTERN describes
231 the type of insertion or extraction we want to perform.
232
233 For an insertion, the number of significant structure bits includes
234 all bits of the target. For an extraction, it need only include the
235 most significant bit of the field. Larger widths are acceptable
236 in both cases. */
237
238bool
239get_best_reg_extraction_insn (extraction_insn *insn,
240 enum extraction_pattern pattern,
241 unsigned HOST_WIDE_INT struct_bits,
242 machine_mode field_mode)
243{
244 return get_best_extraction_insn (insn, pattern, ET_reg, struct_bits,
245 field_mode);
246}
247
248/* Return true if an instruction exists to access a field of BITSIZE
249 bits starting BITNUM bits into a memory structure. Describe the
250 "best" such instruction in *INSN if so. PATTERN describes the type
251 of insertion or extraction we want to perform and FIELDMODE is the
252 natural mode of the extracted field.
253
254 The instructions considered here only access bytes that overlap
255 the bitfield; they do not touch any surrounding bytes. */
256
257bool
258get_best_mem_extraction_insn (extraction_insn *insn,
259 enum extraction_pattern pattern,
260 HOST_WIDE_INT bitsize, HOST_WIDE_INT bitnum,
261 machine_mode field_mode)
262{
263 unsigned HOST_WIDE_INT struct_bits = (bitnum % BITS_PER_UNIT
264 + bitsize
265 + BITS_PER_UNIT - 1);
266 struct_bits -= struct_bits % BITS_PER_UNIT;
267 return get_best_extraction_insn (insn, pattern, ET_unaligned_mem,
268 struct_bits, field_mode);
269}
270
271/* Return the insn code used to extend FROM_MODE to TO_MODE.
272 UNSIGNEDP specifies zero-extension instead of sign-extension. If
273 no such operation exists, CODE_FOR_nothing will be returned. */
274
275enum insn_code
276can_extend_p (machine_mode to_mode, machine_mode from_mode,
277 int unsignedp)
278{
279 if (unsignedp < 0 && targetm.have_ptr_extend ())
280 return targetm.code_for_ptr_extend;
281
282 convert_optab tab = unsignedp ? zext_optab : sext_optab;
283 return convert_optab_handler (tab, to_mode, from_mode);
284}
285
286/* Return the insn code to convert fixed-point mode FIXMODE to floating-point
287 mode FLTMODE, or CODE_FOR_nothing if no such instruction exists.
288 UNSIGNEDP specifies whether FIXMODE is unsigned. */
289
290enum insn_code
291can_float_p (machine_mode fltmode, machine_mode fixmode,
292 int unsignedp)
293{
294 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
295 return convert_optab_handler (tab, fltmode, fixmode);
296}
297
298/* Return the insn code to convert floating-point mode FLTMODE to fixed-point
299 mode FIXMODE, or CODE_FOR_nothing if no such instruction exists.
300 UNSIGNEDP specifies whether FIXMODE is unsigned.
301
302 On a successful return, set *TRUNCP_PTR to true if it is necessary to
303 output an explicit FTRUNC before the instruction. */
304
305enum insn_code
306can_fix_p (machine_mode fixmode, machine_mode fltmode,
307 int unsignedp, bool *truncp_ptr)
308{
309 convert_optab tab;
310 enum insn_code icode;
311
312 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
313 icode = convert_optab_handler (tab, fixmode, fltmode);
314 if (icode != CODE_FOR_nothing)
315 {
316 *truncp_ptr = false;
317 return icode;
318 }
319
320 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
321 for this to work. We need to rework the fix* and ftrunc* patterns
322 and documentation. */
323 tab = unsignedp ? ufix_optab : sfix_optab;
324 icode = convert_optab_handler (tab, fixmode, fltmode);
325 if (icode != CODE_FOR_nothing
326 && optab_handler (ftrunc_optab, fltmode) != CODE_FOR_nothing)
327 {
328 *truncp_ptr = true;
329 return icode;
330 }
331
332 return CODE_FOR_nothing;
333}
334
335/* Return nonzero if a conditional move of mode MODE is supported.
336
337 This function is for combine so it can tell whether an insn that looks
338 like a conditional move is actually supported by the hardware. If we
339 guess wrong we lose a bit on optimization, but that's it. */
340/* ??? sparc64 supports conditionally moving integers values based on fp
341 comparisons, and vice versa. How do we handle them? */
342
343bool
344can_conditionally_move_p (machine_mode mode)
345{
346 return direct_optab_handler (movcc_optab, mode) != CODE_FOR_nothing;
347}
348
2fda2c81 349/* If a target doesn't implement a permute on a vector with multibyte
350 elements, we can try to do the same permute on byte elements.
351 If this makes sense for vector mode MODE then return the appropriate
352 byte vector mode. */
353
354opt_machine_mode
355qimode_for_vec_perm (machine_mode mode)
356{
357 machine_mode qimode;
358 if (GET_MODE_INNER (mode) != QImode
359 && mode_for_vector (QImode, GET_MODE_SIZE (mode)).exists (&qimode)
360 && VECTOR_MODE_P (qimode))
361 return qimode;
362 return opt_machine_mode ();
363}
364
d37760c5 365/* Return true if selector SEL can be represented in the integer
366 equivalent of vector mode MODE. */
367
368bool
369selector_fits_mode_p (machine_mode mode, const vec_perm_indices &sel)
370{
371 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (GET_MODE_INNER (mode));
372 return (mask == HOST_WIDE_INT_M1U
373 || sel.all_in_range_p (0, mask + 1));
374}
375
97f7d65e 376/* Return true if VEC_PERM_EXPRs with variable selector operands can be
377 expanded using SIMD extensions of the CPU. MODE is the mode of the
378 vectors being permuted. */
947ed59a 379
380bool
97f7d65e 381can_vec_perm_var_p (machine_mode mode)
947ed59a 382{
947ed59a 383 /* If the target doesn't implement a vector mode for the vector type,
384 then no operations are supported. */
385 if (!VECTOR_MODE_P (mode))
386 return false;
387
947ed59a 388 if (direct_optab_handler (vec_perm_optab, mode) != CODE_FOR_nothing)
389 return true;
390
391 /* We allow fallback to a QI vector mode, and adjust the mask. */
97f7d65e 392 machine_mode qimode;
fcd49bc5 393 if (!qimode_for_vec_perm (mode).exists (&qimode)
ba7efd65 394 || maybe_gt (GET_MODE_NUNITS (qimode), GET_MODE_MASK (QImode) + 1))
947ed59a 395 return false;
396
947ed59a 397 if (direct_optab_handler (vec_perm_optab, qimode) == CODE_FOR_nothing)
398 return false;
399
400 /* In order to support the lowering of variable permutations,
401 we need to support shifts and adds. */
97f7d65e 402 if (GET_MODE_UNIT_SIZE (mode) > 2
403 && optab_handler (ashl_optab, mode) == CODE_FOR_nothing
404 && optab_handler (vashl_optab, mode) == CODE_FOR_nothing)
405 return false;
406 if (optab_handler (add_optab, qimode) == CODE_FOR_nothing)
407 return false;
408
409 return true;
410}
411
412/* Return true if the target directly supports VEC_PERM_EXPRs on vectors
413 of mode MODE using the selector SEL. ALLOW_VARIABLE_P is true if it
414 is acceptable to force the selector into a register and use a variable
415 permute (if the target supports that).
416
417 Note that additional permutations representing whole-vector shifts may
418 also be handled via the vec_shr optab, but only where the second input
419 vector is entirely constant zeroes; this case is not dealt with here. */
420
421bool
422can_vec_perm_const_p (machine_mode mode, const vec_perm_indices &sel,
423 bool allow_variable_p)
424{
425 /* If the target doesn't implement a vector mode for the vector type,
426 then no operations are supported. */
427 if (!VECTOR_MODE_P (mode))
428 return false;
429
430 /* It's probably cheaper to test for the variable case first. */
d37760c5 431 if (allow_variable_p && selector_fits_mode_p (mode, sel))
97f7d65e 432 {
433 if (direct_optab_handler (vec_perm_optab, mode) != CODE_FOR_nothing)
434 return true;
435
436 /* Unlike can_vec_perm_var_p, we don't need to test for optabs
437 related computing the QImode selector, since that happens at
438 compile time. */
439 machine_mode qimode;
d37760c5 440 if (qimode_for_vec_perm (mode).exists (&qimode))
441 {
442 vec_perm_indices qimode_indices;
443 qimode_indices.new_expanded_vector (sel, GET_MODE_UNIT_SIZE (mode));
444 if (selector_fits_mode_p (qimode, qimode_indices)
445 && (direct_optab_handler (vec_perm_optab, qimode)
446 != CODE_FOR_nothing))
447 return true;
448 }
97f7d65e 449 }
450
d37760c5 451 if (targetm.vectorize.vec_perm_const != NULL)
947ed59a 452 {
d37760c5 453 if (targetm.vectorize.vec_perm_const (mode, NULL_RTX, NULL_RTX,
454 NULL_RTX, sel))
97f7d65e 455 return true;
456
457 /* ??? For completeness, we ought to check the QImode version of
458 vec_perm_const_optab. But all users of this implicit lowering
d37760c5 459 feature implement the variable vec_perm_optab, and the ia64
460 port specifically doesn't want us to lower V2SF operations
461 into integer operations. */
947ed59a 462 }
463
97f7d65e 464 return false;
947ed59a 465}
466
947ed59a 467/* Find a widening optab even if it doesn't widen as much as we want.
468 E.g. if from_mode is HImode, and to_mode is DImode, and there is no
d2a1b453 469 direct HI->SI insn, then return SI->DI, if that exists. */
947ed59a 470
471enum insn_code
472find_widening_optab_handler_and_mode (optab op, machine_mode to_mode,
473 machine_mode from_mode,
947ed59a 474 machine_mode *found_mode)
475{
d2a1b453 476 gcc_checking_assert (GET_MODE_CLASS (from_mode) == GET_MODE_CLASS (to_mode));
477 gcc_checking_assert (from_mode < to_mode);
478 FOR_EACH_MODE (from_mode, from_mode, to_mode)
947ed59a 479 {
d2a1b453 480 enum insn_code handler = convert_optab_handler (op, to_mode, from_mode);
947ed59a 481
482 if (handler != CODE_FOR_nothing)
483 {
484 if (found_mode)
485 *found_mode = from_mode;
486 return handler;
487 }
488 }
489
490 return CODE_FOR_nothing;
491}
492
493/* Return non-zero if a highpart multiply is supported of can be synthisized.
494 For the benefit of expand_mult_highpart, the return value is 1 for direct,
495 2 for even/odd widening, and 3 for hi/lo widening. */
496
497int
498can_mult_highpart_p (machine_mode mode, bool uns_p)
499{
500 optab op;
947ed59a 501
502 op = uns_p ? umul_highpart_optab : smul_highpart_optab;
503 if (optab_handler (op, mode) != CODE_FOR_nothing)
504 return 1;
505
506 /* If the mode is an integral vector, synth from widening operations. */
507 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
508 return 0;
509
ba7efd65 510 poly_int64 nunits = GET_MODE_NUNITS (mode);
947ed59a 511
512 op = uns_p ? vec_widen_umult_even_optab : vec_widen_smult_even_optab;
513 if (optab_handler (op, mode) != CODE_FOR_nothing)
514 {
515 op = uns_p ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
516 if (optab_handler (op, mode) != CODE_FOR_nothing)
517 {
c3fa7fe9 518 /* The encoding has 2 interleaved stepped patterns. */
519 vec_perm_builder sel (nunits, 2, 3);
ba7efd65 520 for (unsigned int i = 0; i < 6; ++i)
282dc861 521 sel.quick_push (!BYTES_BIG_ENDIAN
522 + (i & ~1)
523 + ((i & 1) ? nunits : 0));
1957c019 524 vec_perm_indices indices (sel, 2, nunits);
525 if (can_vec_perm_const_p (mode, indices))
947ed59a 526 return 2;
527 }
528 }
529
530 op = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
531 if (optab_handler (op, mode) != CODE_FOR_nothing)
532 {
533 op = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
534 if (optab_handler (op, mode) != CODE_FOR_nothing)
535 {
c3fa7fe9 536 /* The encoding has a single stepped pattern. */
537 vec_perm_builder sel (nunits, 1, 3);
ba7efd65 538 for (unsigned int i = 0; i < 3; ++i)
282dc861 539 sel.quick_push (2 * i + (BYTES_BIG_ENDIAN ? 0 : 1));
1957c019 540 vec_perm_indices indices (sel, 2, nunits);
541 if (can_vec_perm_const_p (mode, indices))
947ed59a 542 return 3;
543 }
544 }
545
546 return 0;
547}
548
549/* Return true if target supports vector masked load/store for mode. */
550
551bool
f636f094 552can_vec_mask_load_store_p (machine_mode mode,
553 machine_mode mask_mode,
554 bool is_load)
947ed59a 555{
556 optab op = is_load ? maskload_optab : maskstore_optab;
557 machine_mode vmode;
947ed59a 558
559 /* If mode is vector mode, check it directly. */
560 if (VECTOR_MODE_P (mode))
f636f094 561 return convert_optab_handler (op, mode, mask_mode) != CODE_FOR_nothing;
947ed59a 562
563 /* Otherwise, return true if there is some vector mode with
564 the mask load/store supported. */
565
566 /* See if there is any chance the mask load or store might be
567 vectorized. If not, punt. */
4c1a1be2 568 scalar_mode smode;
569 if (!is_a <scalar_mode> (mode, &smode))
570 return false;
571
572 vmode = targetm.vectorize.preferred_simd_mode (smode);
947ed59a 573 if (!VECTOR_MODE_P (vmode))
574 return false;
575
38d5f204 576 if ((targetm.vectorize.get_mask_mode
577 (GET_MODE_NUNITS (vmode), GET_MODE_SIZE (vmode)).exists (&mask_mode))
578 && convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing)
947ed59a 579 return true;
580
3106770a 581 auto_vector_sizes vector_sizes;
582 targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
583 for (unsigned int i = 0; i < vector_sizes.length (); ++i)
947ed59a 584 {
3106770a 585 poly_uint64 cur = vector_sizes[i];
586 poly_uint64 nunits;
587 if (!multiple_p (cur, GET_MODE_SIZE (smode), &nunits))
947ed59a 588 continue;
ab53cba7 589 if (mode_for_vector (smode, nunits).exists (&vmode)
38d5f204 590 && VECTOR_MODE_P (vmode)
591 && targetm.vectorize.get_mask_mode (nunits, cur).exists (&mask_mode)
592 && convert_optab_handler (op, vmode, mask_mode) != CODE_FOR_nothing)
593 return true;
947ed59a 594 }
595 return false;
596}
597
598/* Return true if there is a compare_and_swap pattern. */
599
600bool
601can_compare_and_swap_p (machine_mode mode, bool allow_libcall)
602{
603 enum insn_code icode;
604
605 /* Check for __atomic_compare_and_swap. */
606 icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
607 if (icode != CODE_FOR_nothing)
608 return true;
609
610 /* Check for __sync_compare_and_swap. */
611 icode = optab_handler (sync_compare_and_swap_optab, mode);
612 if (icode != CODE_FOR_nothing)
613 return true;
614 if (allow_libcall && optab_libfunc (sync_compare_and_swap_optab, mode))
615 return true;
616
617 /* No inline compare and swap. */
618 return false;
619}
620
621/* Return true if an atomic exchange can be performed. */
622
623bool
624can_atomic_exchange_p (machine_mode mode, bool allow_libcall)
625{
626 enum insn_code icode;
627
628 /* Check for __atomic_exchange. */
629 icode = direct_optab_handler (atomic_exchange_optab, mode);
630 if (icode != CODE_FOR_nothing)
631 return true;
632
633 /* Don't check __sync_test_and_set, as on some platforms that
634 has reduced functionality. Targets that really do support
635 a proper exchange should simply be updated to the __atomics. */
636
637 return can_compare_and_swap_p (mode, allow_libcall);
638}
639
d5f5fa27 640/* Return true if an atomic load can be performed without falling back to
641 a compare-and-swap. */
642
643bool
644can_atomic_load_p (machine_mode mode)
645{
646 enum insn_code icode;
647
648 /* Does the target supports the load directly? */
649 icode = direct_optab_handler (atomic_load_optab, mode);
650 if (icode != CODE_FOR_nothing)
651 return true;
652
653 /* If the size of the object is greater than word size on this target,
654 then we assume that a load will not be atomic. Also see
655 expand_atomic_load. */
3ce67adc 656 return known_le (GET_MODE_PRECISION (mode), BITS_PER_WORD);
d5f5fa27 657}
658
947ed59a 659/* Determine whether "1 << x" is relatively cheap in word_mode. */
660
661bool
662lshift_cheap_p (bool speed_p)
663{
664 /* FIXME: This should be made target dependent via this "this_target"
665 mechanism, similar to e.g. can_copy_init_p in gcse.c. */
666 static bool init[2] = { false, false };
667 static bool cheap[2] = { true, true };
668
669 /* If the targer has no lshift in word_mode, the operation will most
670 probably not be cheap. ??? Does GCC even work for such targets? */
671 if (optab_handler (ashl_optab, word_mode) == CODE_FOR_nothing)
672 return false;
673
674 if (!init[speed_p])
675 {
676 rtx reg = gen_raw_REG (word_mode, 10000);
677 int cost = set_src_cost (gen_rtx_ASHIFT (word_mode, const1_rtx, reg),
678 word_mode, speed_p);
679 cheap[speed_p] = cost < COSTS_N_INSNS (3);
680 init[speed_p] = true;
681 }
682
683 return cheap[speed_p];
684}