#ifndef GCC_ADDRESSES_H
#define GCC_ADDRESSES_H
-static inline enum reg_class
+inline enum reg_class
base_reg_class (machine_mode mode ATTRIBUTE_UNUSED,
addr_space_t as ATTRIBUTE_UNUSED,
enum rtx_code outer_code ATTRIBUTE_UNUSED,
REGNO_OK_FOR_BASE_P.
Arguments as for the REGNO_MODE_CODE_OK_FOR_BASE_P macro. */
-static inline bool
+inline bool
ok_for_base_p_1 (unsigned regno ATTRIBUTE_UNUSED,
machine_mode mode ATTRIBUTE_UNUSED,
addr_space_t as ATTRIBUTE_UNUSED,
/* Wrapper around ok_for_base_p_1, for use after register allocation is
complete. Arguments as for the called function. */
-static inline bool
+inline bool
regno_ok_for_base_p (unsigned regno, machine_mode mode, addr_space_t as,
enum rtx_code outer_code, enum rtx_code index_code)
{
/* Return the size of padding needed to insert after a protected
decl of SIZE. */
-static inline unsigned int
+inline unsigned int
asan_red_zone_size (unsigned int size)
{
unsigned int c = size & (ASAN_RED_ZONE_SIZE - 1);
/* Return how much a stack variable occupis on a stack
including a space for red zone. */
-static inline unsigned HOST_WIDE_INT
+inline unsigned HOST_WIDE_INT
asan_var_and_redzone_size (unsigned HOST_WIDE_INT size)
{
if (size <= 4)
/* Return TRUE if builtin with given FCODE will be intercepted by
libasan. */
-static inline bool
+inline bool
asan_intercepted_p (enum built_in_function fcode)
{
if (hwasan_sanitize_p ())
/* Return TRUE if we should instrument for use-after-scope sanity checking. */
-static inline bool
+inline bool
asan_sanitize_use_after_scope (void)
{
return (flag_sanitize_address_use_after_scope
/* Return true if DECL should be guarded on the stack. */
-static inline bool
+inline bool
asan_protect_stack_decl (tree decl)
{
return DECL_P (decl)
/* Return true when flag_sanitize & FLAG is non-zero. If FN is non-null,
remove all flags mentioned in "no_sanitize" of DECL_ATTRIBUTES. */
-static inline bool
+inline bool
sanitize_flags_p (unsigned int flag, const_tree fn = current_function_decl)
{
unsigned int result_flags = flag_sanitize & flag;
/* Return true when coverage sanitization should happend for FN function. */
-static inline bool
+inline bool
sanitize_coverage_p (const_tree fn = current_function_decl)
{
return (flag_sanitize_coverage
/* For a given IDENTIFIER_NODE, strip leading and trailing '_' characters
so that we have a canonical form of attribute names. */
-static inline tree
+inline tree
canonicalize_attr_name (tree attr_name)
{
size_t l = IDENTIFIER_LENGTH (attr_name);
/* Compare attribute identifiers ATTR1 and ATTR2 with length ATTR1_LEN and
ATTR2_LEN. */
-static inline bool
+inline bool
cmp_attribs (const char *attr1, size_t attr1_len,
const char *attr2, size_t attr2_len)
{
/* Compare attribute identifiers ATTR1 and ATTR2. */
-static inline bool
+inline bool
cmp_attribs (const char *attr1, const char *attr2)
{
return cmp_attribs (attr1, strlen (attr1), attr2, strlen (attr2));
/* Given an identifier node IDENT and a string ATTR_NAME, return true
if the identifier node is a valid attribute name for the string. */
-static inline bool
+inline bool
is_attribute_p (const char *attr_name, const_tree ident)
{
return cmp_attribs (attr_name, strlen (attr_name),
for standard attribute (NULL get_attribute_namespace) or "gnu"
namespace. */
-static inline bool
+inline bool
is_attribute_namespace_p (const char *attr_ns, const_tree attr)
{
tree ident = get_attribute_namespace (attr);
occurrences are wanted. ATTR_NAME must be in the form 'text' (not
'__text__'). */
-static inline tree
+inline tree
lookup_attribute (const char *attr_name, tree list)
{
if (CHECKING_P && attr_name[0] != '_')
/* Similar to lookup_attribute, but also match the attribute namespace.
ATTR_NS "" stands for either standard attribute or "gnu" namespace. */
-static inline tree
+inline tree
lookup_attribute (const char *attr_ns, const char *attr_name, tree list)
{
if (CHECKING_P && attr_name[0] != '_')
starts with ATTR_NAME. ATTR_NAME must be in the form 'text' (not
'__text__'). */
-static inline tree
+inline tree
lookup_attribute_by_prefix (const char *attr_name, tree list)
{
gcc_checking_assert (attr_name[0] != '_');
/* Returns true if BB has precisely one successor. */
-static inline bool
+inline bool
single_succ_p (const_basic_block bb)
{
return EDGE_COUNT (bb->succs) == 1;
/* Returns true if BB has precisely one predecessor. */
-static inline bool
+inline bool
single_pred_p (const_basic_block bb)
{
return EDGE_COUNT (bb->preds) == 1;
/* Returns the single successor edge of basic block BB. Aborts if
BB does not have exactly one successor. */
-static inline edge
+inline edge
single_succ_edge (const_basic_block bb)
{
gcc_checking_assert (single_succ_p (bb));
/* Returns the single predecessor edge of basic block BB. Aborts
if BB does not have exactly one predecessor. */
-static inline edge
+inline edge
single_pred_edge (const_basic_block bb)
{
gcc_checking_assert (single_pred_p (bb));
/* Returns the single successor block of basic block BB. Aborts
if BB does not have exactly one successor. */
-static inline basic_block
+inline basic_block
single_succ (const_basic_block bb)
{
return single_succ_edge (bb)->dest;
/* Returns the single predecessor block of basic block BB. Aborts
if BB does not have exactly one predecessor.*/
-static inline basic_block
+inline basic_block
single_pred (const_basic_block bb)
{
return single_pred_edge (bb)->src;
vec<edge, va_gc> **container;
};
-static inline vec<edge, va_gc> *
+inline vec<edge, va_gc> *
ei_container (edge_iterator i)
{
gcc_checking_assert (i.container);
#define ei_last(iter) ei_last_1 (&(iter))
/* Return an iterator pointing to the start of an edge vector. */
-static inline edge_iterator
+inline edge_iterator
ei_start_1 (vec<edge, va_gc> **ev)
{
edge_iterator i;
/* Return an iterator pointing to the last element of an edge
vector. */
-static inline edge_iterator
+inline edge_iterator
ei_last_1 (vec<edge, va_gc> **ev)
{
edge_iterator i;
}
/* Is the iterator `i' at the end of the sequence? */
-static inline bool
+inline bool
ei_end_p (edge_iterator i)
{
return (i.index == EDGE_COUNT (ei_container (i)));
/* Is the iterator `i' at one position before the end of the
sequence? */
-static inline bool
+inline bool
ei_one_before_end_p (edge_iterator i)
{
return (i.index + 1 == EDGE_COUNT (ei_container (i)));
}
/* Advance the iterator to the next element. */
-static inline void
+inline void
ei_next (edge_iterator *i)
{
gcc_checking_assert (i->index < EDGE_COUNT (ei_container (*i)));
}
/* Move the iterator to the previous element. */
-static inline void
+inline void
ei_prev (edge_iterator *i)
{
gcc_checking_assert (i->index > 0);
}
/* Return the edge pointed to by the iterator `i'. */
-static inline edge
+inline edge
ei_edge (edge_iterator i)
{
return EDGE_I (ei_container (i), i.index);
/* Return an edge pointed to by the iterator. Do it safely so that
NULL is returned when the iterator is pointing at the end of the
sequence. */
-static inline edge
+inline edge
ei_safe_edge (edge_iterator i)
{
return !ei_end_p (i) ? ei_edge (i) : NULL;
*Edge P is set to the next edge if we are to continue to iterate
and NULL otherwise. */
-static inline bool
+inline bool
ei_cond (edge_iterator ei, edge *p)
{
if (!ei_end_p (ei))
/* Return true if BB is in a transaction. */
-static inline bool
+inline bool
bb_in_transaction (basic_block bb)
{
return bb->flags & BB_IN_TRANSACTION;
}
/* Return true when one of the predecessor edges of BB is marked with EDGE_EH. */
-static inline bool
+inline bool
bb_has_eh_pred (basic_block bb)
{
edge e;
}
/* Return true when one of the predecessor edges of BB is marked with EDGE_ABNORMAL. */
-static inline bool
+inline bool
bb_has_abnormal_pred (basic_block bb)
{
edge e;
}
/* Return the fallthru edge in EDGES if it exists, NULL otherwise. */
-static inline edge
+inline edge
find_fallthru_edge (vec<edge, va_gc> *edges)
{
edge e;
/* Check tha probability is sane. */
-static inline void
+inline void
check_probability (int prob)
{
gcc_checking_assert (prob >= 0 && prob <= REG_BR_PROB_BASE);
/* Given PROB1 and PROB2, return PROB1*PROB2/REG_BR_PROB_BASE.
Used to combine BB probabilities. */
-static inline int
+inline int
combine_probabilities (int prob1, int prob2)
{
check_probability (prob1);
interface when potentially scaling up, so that SCALE is not
constrained to be < REG_BR_PROB_BASE. */
-static inline gcov_type
+inline gcov_type
apply_scale (gcov_type freq, gcov_type scale)
{
return RDIV (freq * scale, REG_BR_PROB_BASE);
/* Apply probability PROB on frequency or count FREQ. */
-static inline gcov_type
+inline gcov_type
apply_probability (gcov_type freq, int prob)
{
check_probability (prob);
/* Return inverse probability for PROB. */
-static inline int
+inline int
inverse_probability (int prob1)
{
check_probability (prob1);
/* Return true if BB has at least one abnormal outgoing edge. */
-static inline bool
+inline bool
has_abnormal_or_eh_outgoing_edge_p (basic_block bb)
{
edge e;
/* Return true when one of the predecessor edges of BB is marked with
EDGE_ABNORMAL_CALL or EDGE_EH. */
-static inline bool
+inline bool
has_abnormal_call_or_eh_pred_edge_p (basic_block bb)
{
edge e;
/* Initialize a bitmap header. OBSTACK indicates the bitmap obstack
to allocate from, NULL for GC'd bitmap. */
-static inline void
+inline void
bitmap_initialize (bitmap head, bitmap_obstack *obstack CXX_MEM_STAT_INFO)
{
head->first = head->current = NULL;
/* Release a bitmap (but not its head). This is suitable for pairing with
bitmap_initialize. */
-static inline void
+inline void
bitmap_release (bitmap head)
{
bitmap_clear (head);
/* Initialize a single bitmap iterator. START_BIT is the first bit to
iterate from. */
-static inline void
+inline void
bmp_iter_set_init (bitmap_iterator *bi, const_bitmap map,
unsigned start_bit, unsigned *bit_no)
{
/* Initialize an iterator to iterate over the intersection of two
bitmaps. START_BIT is the bit to commence from. */
-static inline void
+inline void
bmp_iter_and_init (bitmap_iterator *bi, const_bitmap map1, const_bitmap map2,
unsigned start_bit, unsigned *bit_no)
{
/* Initialize an iterator to iterate over the bits in MAP1 & ~MAP2. */
-static inline void
+inline void
bmp_iter_and_compl_init (bitmap_iterator *bi,
const_bitmap map1, const_bitmap map2,
unsigned start_bit, unsigned *bit_no)
/* Advance to the next bit in BI. We don't advance to the next
nonzero bit yet. */
-static inline void
+inline void
bmp_iter_next (bitmap_iterator *bi, unsigned *bit_no)
{
bi->bits >>= 1;
/* Advance to first set bit in BI. */
-static inline void
+inline void
bmp_iter_next_bit (bitmap_iterator * bi, unsigned *bit_no)
{
#if (GCC_VERSION >= 3004)
already advanced past the just iterated bit. Return true if there
is a bit to iterate. */
-static inline bool
+inline bool
bmp_iter_set (bitmap_iterator *bi, unsigned *bit_no)
{
/* If our current word is nonzero, it contains the bit we want. */
bitmaps. We will have already advanced past the just iterated bit.
Return true if there is a bit to iterate. */
-static inline bool
+inline bool
bmp_iter_and (bitmap_iterator *bi, unsigned *bit_no)
{
/* If our current word is nonzero, it contains the bit we want. */
complemented bitmaps. We will have already advanced past the just
iterated bit. */
-static inline bool
+inline bool
bmp_iter_and_compl (bitmap_iterator *bi, unsigned *bit_no)
{
/* If our current word is nonzero, it contains the bit we want. */
minimum or op1 is not -1, because e.g. (long long) INT_MIN / -1 is
well defined INT_MAX + 1LL if long long is wider than int, but INT_MIN / -1
is UB. */
-static inline bool
+inline bool
may_shorten_divmod (tree op0, tree op1)
{
tree type0 = TREE_TYPE (op0);
const char *);
/* Return next tree in the chain for chain_next walking of tree nodes. */
-static inline tree
+inline tree
c_tree_chain_next (tree t)
{
/* TREE_CHAIN of a type is TYPE_STUB_DECL, which is different
/* Return true if the next token from PARSER has the indicated
TYPE. */
-static inline bool
+inline bool
c_parser_next_token_is (c_parser *parser, enum cpp_ttype type)
{
return c_parser_peek_token (parser)->type == type;
/* Return true if the next token from PARSER does not have the
indicated TYPE. */
-static inline bool
+inline bool
c_parser_next_token_is_not (c_parser *parser, enum cpp_ttype type)
{
return !c_parser_next_token_is (parser, type);
/* Return true if the next token from PARSER is the indicated
KEYWORD. */
-static inline bool
+inline bool
c_parser_next_token_is_keyword (c_parser *parser, enum rid keyword)
{
return c_parser_peek_token (parser)->keyword == keyword;
/* Check control flow invariants, if internal consistency checks are
enabled. */
-static inline void
+inline void
checking_verify_flow_info (void)
{
/* TODO: Add a separate option for -fchecking=cfg. */
#define LOOP_C_FINITE (1 << 1)
/* Set C to the LOOP constraint. */
-static inline void
+inline void
loop_constraint_set (class loop *loop, unsigned c)
{
loop->constraints |= c;
}
/* Clear C from the LOOP constraint. */
-static inline void
+inline void
loop_constraint_clear (class loop *loop, unsigned c)
{
loop->constraints &= ~c;
}
/* Check if C is set in the LOOP constraint. */
-static inline bool
+inline bool
loop_constraint_set_p (class loop *loop, unsigned c)
{
return (loop->constraints & c) == c;
extern class niter_desc *get_simple_loop_desc (class loop *loop);
extern void free_simple_loop_desc (class loop *loop);
-static inline class niter_desc *
+inline class niter_desc *
simple_loop_desc (class loop *loop)
{
return loop->simple_loop_desc;
/* Returns the loop with index NUM from FNs loop tree. */
-static inline class loop *
+inline class loop *
get_loop (struct function *fn, unsigned num)
{
return (*loops_for_fn (fn)->larray)[num];
/* Returns the number of superloops of LOOP. */
-static inline unsigned
+inline unsigned
loop_depth (const class loop *loop)
{
return vec_safe_length (loop->superloops);
/* Returns the immediate superloop of LOOP, or NULL if LOOP is the outermost
loop. */
-static inline class loop *
+inline class loop *
loop_outer (const class loop *loop)
{
unsigned n = vec_safe_length (loop->superloops);
/* Returns true if LOOP has at least one exit edge. */
-static inline bool
+inline bool
loop_has_exit_edges (const class loop *loop)
{
return loop->exits->next->e != NULL;
/* Returns the number of loops in FN (including the removed
ones and the fake loop that forms the root of the loop tree). */
-static inline unsigned
+inline unsigned
number_of_loops (struct function *fn)
{
struct loops *loops = loops_for_fn (fn);
/* Returns true if state of the loops satisfies all properties
described by FLAGS. */
-static inline bool
+inline bool
loops_state_satisfies_p (function *fn, unsigned flags)
{
return (loops_for_fn (fn)->state & flags) == flags;
}
-static inline bool
+inline bool
loops_state_satisfies_p (unsigned flags)
{
return loops_state_satisfies_p (cfun, flags);
/* Sets FLAGS to the loops state. */
-static inline void
+inline void
loops_state_set (function *fn, unsigned flags)
{
loops_for_fn (fn)->state |= flags;
}
-static inline void
+inline void
loops_state_set (unsigned flags)
{
loops_state_set (cfun, flags);
/* Clears FLAGS from the loops state. */
-static inline void
+inline void
loops_state_clear (function *fn, unsigned flags)
{
loops_for_fn (fn)->state &= ~flags;
}
-static inline void
+inline void
loops_state_clear (unsigned flags)
{
if (!current_loops)
/* Check loop structure invariants, if internal consistency checks are
enabled. */
-static inline void
+inline void
checking_verify_loop_structure (void)
{
/* VERIFY_LOOP_STRUCTURE essentially asserts that no loops need fixups.
extern auto_vec<basic_block> get_loop_hot_path (const class loop *loop);
/* Returns the outermost loop of the loop nest that contains LOOP.*/
-static inline class loop *
+inline class loop *
loop_outermost (class loop *loop)
{
unsigned n = vec_safe_length (loop->superloops);
/* Converts VAL to widest_int. */
-static inline widest_int
+inline widest_int
gcov_type_to_wide_int (gcov_type val)
{
HOST_WIDE_INT a[2];
/* Return true if DECL should have entry in symbol table if used.
Those are functions and static & external variables. */
-static inline bool
+inline bool
decl_in_symtab_p (const_tree decl)
{
return (TREE_CODE (decl) == FUNCTION_DECL
/* Return true if the TM_CLONE bit is set for a given FNDECL. */
-static inline bool
+inline bool
decl_is_tm_clone (const_tree fndecl)
{
cgraph_node *n = cgraph_node::get (fndecl);
the name documents the intent. We require that no GC can occur
within the fprintf call. */
-static inline const char *
+inline const char *
xstrdup_for_dump (const char *transient_str)
{
return ggc_strdup (transient_str);
/* We have an expression tree T that represents a call, either CALL_EXPR
or AGGR_INIT_EXPR. Return a reference to the Nth argument. */
-static inline tree&
+inline tree&
get_nth_callarg (tree t, int n)
{
switch (TREE_CODE (t))
/* Return the canonical value for VAL, following the equivalence chain
towards the earliest (== lowest uid) equivalent value. */
-static inline cselib_val *
+inline cselib_val *
canonical_cselib_val (cselib_val *val)
{
cselib_val *canon;
/* Return nonzero if we can prove that X and Y contain the same value, taking
our gathered information into account. */
-static inline int
+inline int
rtx_equal_for_cselib_p (rtx x, rtx y)
{
if (x == y)
widest_int streamer_read_widest_int (class lto_input_block *);
/* Returns a new bit-packing context for bit-packing into S. */
-static inline struct bitpack_d
+inline struct bitpack_d
bitpack_create (struct lto_output_stream *s)
{
struct bitpack_d bp;
}
/* Pack the NBITS bit sized value VAL into the bit-packing context BP. */
-static inline void
+inline void
bp_pack_value (struct bitpack_d *bp, bitpack_word_t val, unsigned nbits)
{
bitpack_word_t word = bp->word;
/* Pack VAL into the bit-packing context BP, using NBITS for each
coefficient. */
-static inline void
+inline void
bp_pack_poly_value (struct bitpack_d *bp,
const poly_int<NUM_POLY_INT_COEFFS, bitpack_word_t> &val,
unsigned nbits)
}
/* Finishes bit-packing of BP. */
-static inline void
+inline void
streamer_write_bitpack (struct bitpack_d *bp)
{
streamer_write_uhwi_stream ((struct lto_output_stream *) bp->stream,
}
/* Returns a new bit-packing context for bit-unpacking from IB. */
-static inline struct bitpack_d
+inline struct bitpack_d
streamer_read_bitpack (class lto_input_block *ib)
{
struct bitpack_d bp;
}
/* Unpacks NBITS bits from the bit-packing context BP and returns them. */
-static inline bitpack_word_t
+inline bitpack_word_t
bp_unpack_value (struct bitpack_d *bp, unsigned nbits)
{
bitpack_word_t mask, val;
/* Unpacks a polynomial value from the bit-packing context BP in which each
coefficient has NBITS bits. */
-static inline poly_int<NUM_POLY_INT_COEFFS, bitpack_word_t>
+inline poly_int<NUM_POLY_INT_COEFFS, bitpack_word_t>
bp_unpack_poly_value (struct bitpack_d *bp, unsigned nbits)
{
poly_int_pod<NUM_POLY_INT_COEFFS, bitpack_word_t> x;
/* Write a character to the output block. */
-static inline void
+inline void
streamer_write_char_stream (struct lto_output_stream *obs, char c)
{
/* No space left. */
/* Read byte from the input block. */
-static inline unsigned char
+inline unsigned char
streamer_read_uchar (class lto_input_block *ib)
{
if (ib->p >= ib->len)
to be compile time constant.
Be host independent, limit range to 31bits. */
-static inline void
+inline void
streamer_write_hwi_in_range (struct lto_output_stream *obs,
HOST_WIDE_INT min,
HOST_WIDE_INT max,
/* Input VAL into OBS and verify it is in range MIN...MAX that is supposed
to be compile time constant. PURPOSE is used for error reporting. */
-static inline HOST_WIDE_INT
+inline HOST_WIDE_INT
streamer_read_hwi_in_range (class lto_input_block *ib,
const char *purpose,
HOST_WIDE_INT min,
to be compile time constant.
Be host independent, limit range to 31bits. */
-static inline void
+inline void
bp_pack_int_in_range (struct bitpack_d *bp,
HOST_WIDE_INT min,
HOST_WIDE_INT max,
/* Input VAL into BP and verify it is in range MIN...MAX that is supposed
to be compile time constant. PURPOSE is used for error reporting. */
-static inline HOST_WIDE_INT
+inline HOST_WIDE_INT
bp_unpack_int_in_range (struct bitpack_d *bp,
const char *purpose,
HOST_WIDE_INT min,
/* Output the start of a record with TAG to output block OB. */
-static inline void
+inline void
streamer_write_record_start (struct output_block *ob, enum LTO_tags tag)
{
streamer_write_enum (ob->main_stream, LTO_tags, LTO_NUM_TAGS, tag);
/* Return the next tag in the input block IB. */
-static inline enum LTO_tags
+inline enum LTO_tags
streamer_read_record_start (class lto_input_block *ib)
{
return streamer_read_enum (ib, LTO_tags, LTO_NUM_TAGS);
/* Allocate decl_to_instance_map with COUNT slots to begin wtih, if it
* hasn't been allocated yet. */
-static inline decl_to_instance_map_t *
+inline decl_to_instance_map_t *
maybe_create_decl_to_instance_map (int count = 13)
{
if (!decl_to_instance_map)
Public functions access functions for the dataflow problems.
----------------------------------------------------------------------------*/
-static inline struct df_scan_bb_info *
+inline struct df_scan_bb_info *
df_scan_get_bb_info (unsigned int index)
{
if (index < df_scan->block_info_size)
return NULL;
}
-static inline class df_rd_bb_info *
+inline class df_rd_bb_info *
df_rd_get_bb_info (unsigned int index)
{
if (index < df_rd->block_info_size)
return NULL;
}
-static inline class df_lr_bb_info *
+inline class df_lr_bb_info *
df_lr_get_bb_info (unsigned int index)
{
if (index < df_lr->block_info_size)
return NULL;
}
-static inline class df_md_bb_info *
+inline class df_md_bb_info *
df_md_get_bb_info (unsigned int index)
{
if (index < df_md->block_info_size)
return NULL;
}
-static inline class df_live_bb_info *
+inline class df_live_bb_info *
df_live_get_bb_info (unsigned int index)
{
if (index < df_live->block_info_size)
return NULL;
}
-static inline class df_word_lr_bb_info *
+inline class df_word_lr_bb_info *
df_word_lr_get_bb_info (unsigned int index)
{
if (index < df_word_lr->block_info_size)
return NULL;
}
-static inline class df_mir_bb_info *
+inline class df_mir_bb_info *
df_mir_get_bb_info (unsigned int index)
{
if (index < df_mir->block_info_size)
choose different dataflow problems depending on the optimization
level. */
-static inline bitmap
+inline bitmap
df_get_live_out (basic_block bb)
{
gcc_checking_assert (df_lr);
choose different dataflow problems depending on the optimization
level. */
-static inline bitmap
+inline bitmap
df_get_live_in (basic_block bb)
{
gcc_checking_assert (df_lr);
/* Get basic block info. */
/* Get the artificial defs for a basic block. */
-static inline df_ref
+inline df_ref
df_get_artificial_defs (unsigned int bb_index)
{
return df_scan_get_bb_info (bb_index)->artificial_defs;
/* Get the artificial uses for a basic block. */
-static inline df_ref
+inline df_ref
df_get_artificial_uses (unsigned int bb_index)
{
return df_scan_get_bb_info (bb_index)->artificial_uses;
/* If INSN defines exactly one register, return the associated reference,
otherwise return null. */
-static inline df_ref
+inline df_ref
df_single_def (const df_insn_info *info)
{
df_ref defs = DF_INSN_INFO_DEFS (info);
/* If INSN uses exactly one register, return the associated reference,
otherwise return null. */
-static inline df_ref
+inline df_ref
df_single_use (const df_insn_info *info)
{
df_ref uses = DF_INSN_INFO_USES (info);
diagnostic_client_data_hooks *m_client_data_hooks;
};
-static inline void
+inline void
diagnostic_inhibit_notes (diagnostic_context * context)
{
context->inhibit_notes_p = true;
/* Override the option index to be used for reporting a
diagnostic. */
-static inline void
+inline void
diagnostic_override_option_index (diagnostic_info *info, int optidx)
{
info->option_index = optidx;
/* Return the location associated to this diagnostic. Parameter WHICH
specifies which location. By default, expand the first one. */
-static inline location_t
+inline location_t
diagnostic_location (const diagnostic_info * diagnostic, int which = 0)
{
return diagnostic->message.get_location (which);
/* Return the number of locations to be printed in DIAGNOSTIC. */
-static inline unsigned int
+inline unsigned int
diagnostic_num_locations (const diagnostic_info * diagnostic)
{
return diagnostic->message.m_richloc->get_num_locations ();
consistency. Parameter WHICH specifies which location. By default,
expand the first one. */
-static inline expanded_location
+inline expanded_location
diagnostic_expand_location (const diagnostic_info * diagnostic, int which = 0)
{
return diagnostic->richloc->get_expanded_location (which);
caret line. This is used to build a prefix and also to determine
whether to print one or two caret lines. */
-static inline bool
+inline bool
diagnostic_same_line (const diagnostic_context *context,
expanded_location s1, expanded_location s2)
{
/* Verify invariants of computed dominance information, if internal consistency
checks are enabled. */
-static inline void
+inline void
checking_verify_dominators (cdi_direction dir)
{
if (flag_checking)
typedef enum dump_flag dump_flags_t;
-static inline dump_flags_t
+inline dump_flags_t
operator| (dump_flags_t lhs, dump_flags_t rhs)
{
return (dump_flags_t)((std::underlying_type<dump_flags_t>::type)lhs
| (std::underlying_type<dump_flags_t>::type)rhs);
}
-static inline dump_flags_t
+inline dump_flags_t
operator& (dump_flags_t lhs, dump_flags_t rhs)
{
return (dump_flags_t)((std::underlying_type<dump_flags_t>::type)lhs
& (std::underlying_type<dump_flags_t>::type)rhs);
}
-static inline dump_flags_t
+inline dump_flags_t
operator~ (dump_flags_t flags)
{
return (dump_flags_t)~((std::underlying_type<dump_flags_t>::type)flags);
}
-static inline dump_flags_t &
+inline dump_flags_t &
operator|= (dump_flags_t &lhs, dump_flags_t rhs)
{
lhs = (dump_flags_t)((std::underlying_type<dump_flags_t>::type)lhs
return lhs;
}
-static inline dump_flags_t &
+inline dump_flags_t &
operator&= (dump_flags_t &lhs, dump_flags_t rhs)
{
lhs = (dump_flags_t)((std::underlying_type<dump_flags_t>::type)lhs
typedef enum optgroup_flag optgroup_flags_t;
-static inline optgroup_flags_t
+inline optgroup_flags_t
operator| (optgroup_flags_t lhs, optgroup_flags_t rhs)
{
return (optgroup_flags_t)((std::underlying_type<dump_flags_t>::type)lhs
| (std::underlying_type<dump_flags_t>::type)rhs);
}
-static inline optgroup_flags_t &
+inline optgroup_flags_t &
operator|= (optgroup_flags_t &lhs, optgroup_flags_t rhs)
{
lhs = (optgroup_flags_t)((std::underlying_type<dump_flags_t>::type)lhs
extern void set_dump_file (FILE *new_dump_file);
/* Return true if any of the dumps is enabled, false otherwise. */
-static inline bool
+inline bool
dump_enabled_p (void)
{
return dumps_are_enabled;
/* Return the current sequence. */
-static inline struct sequence_stack *
+inline struct sequence_stack *
get_current_sequence (void)
{
return &crtl->emit.seq;
/* Return the outermost sequence. */
-static inline struct sequence_stack *
+inline struct sequence_stack *
get_topmost_sequence (void)
{
struct sequence_stack *seq, *top;
/* Return the first insn of the current sequence or current function. */
-static inline rtx_insn *
+inline rtx_insn *
get_insns (void)
{
return get_current_sequence ()->first;
/* Specify a new insn as the first in the chain. */
-static inline void
+inline void
set_first_insn (rtx_insn *insn)
{
gcc_checking_assert (!insn || !PREV_INSN (insn));
/* Return the last insn emitted in current sequence or current function. */
-static inline rtx_insn *
+inline rtx_insn *
get_last_insn (void)
{
return get_current_sequence ()->last;
/* Specify a new insn as the last in the chain. */
-static inline void
+inline void
set_last_insn (rtx_insn *insn)
{
gcc_checking_assert (!insn || !NEXT_INSN (insn));
/* Return a number larger than any instruction's uid in this function. */
-static inline int
+inline int
get_max_uid (void)
{
return crtl->emit.x_cur_insn_uid;
/* Pre-order iteration within the eh_region tree. */
-static inline eh_region
+inline eh_region
ehr_next (eh_region r, eh_region start)
{
if (r->inner)
/* Return a pointer to the alg_hash_entry at IDX. */
-static inline struct alg_hash_entry *
+inline struct alg_hash_entry *
alg_hash_entry_ptr (int idx)
{
return &this_target_expmed->x_alg_hash[idx];
/* Return true if the x_alg_hash field might have been used. */
-static inline bool
+inline bool
alg_hash_used_p (void)
{
return this_target_expmed->x_alg_hash_used_p;
/* Set whether the x_alg_hash field might have been used. */
-static inline void
+inline void
set_alg_hash_used_p (bool usedp)
{
this_target_expmed->x_alg_hash_used_p = usedp;
/* Compute an index into the cost arrays by mode class. */
-static inline int
+inline int
expmed_mode_index (machine_mode mode)
{
switch (GET_MODE_CLASS (mode))
a particular operation performed in MODE is cheap when optimizing
for SPEED. */
-static inline bool *
+inline bool *
expmed_op_cheap_ptr (struct expmed_op_cheap *eoc, bool speed,
machine_mode mode)
{
/* Return a pointer to a cost contained in COSTS when a particular
operation is performed in MODE when optimizing for SPEED. */
-static inline int *
+inline int *
expmed_op_cost_ptr (struct expmed_op_costs *costs, bool speed,
machine_mode mode)
{
/* Subroutine of {set_,}sdiv_pow2_cheap. Not to be used otherwise. */
-static inline bool *
+inline bool *
sdiv_pow2_cheap_ptr (bool speed, machine_mode mode)
{
return expmed_op_cheap_ptr (&this_target_expmed->x_sdiv_pow2_cheap,
/* Set whether a signed division by a power of 2 is cheap in MODE
when optimizing for SPEED. */
-static inline void
+inline void
set_sdiv_pow2_cheap (bool speed, machine_mode mode, bool cheap_p)
{
*sdiv_pow2_cheap_ptr (speed, mode) = cheap_p;
/* Return whether a signed division by a power of 2 is cheap in MODE
when optimizing for SPEED. */
-static inline bool
+inline bool
sdiv_pow2_cheap (bool speed, machine_mode mode)
{
return *sdiv_pow2_cheap_ptr (speed, mode);
/* Subroutine of {set_,}smod_pow2_cheap. Not to be used otherwise. */
-static inline bool *
+inline bool *
smod_pow2_cheap_ptr (bool speed, machine_mode mode)
{
return expmed_op_cheap_ptr (&this_target_expmed->x_smod_pow2_cheap,
/* Set whether a signed modulo by a power of 2 is CHEAP in MODE when
optimizing for SPEED. */
-static inline void
+inline void
set_smod_pow2_cheap (bool speed, machine_mode mode, bool cheap)
{
*smod_pow2_cheap_ptr (speed, mode) = cheap;
/* Return whether a signed modulo by a power of 2 is cheap in MODE
when optimizing for SPEED. */
-static inline bool
+inline bool
smod_pow2_cheap (bool speed, machine_mode mode)
{
return *smod_pow2_cheap_ptr (speed, mode);
/* Subroutine of {set_,}zero_cost. Not to be used otherwise. */
-static inline int *
+inline int *
zero_cost_ptr (bool speed)
{
return &this_target_expmed->x_zero_cost[speed];
/* Set the COST of loading zero when optimizing for SPEED. */
-static inline void
+inline void
set_zero_cost (bool speed, int cost)
{
*zero_cost_ptr (speed) = cost;
/* Return the COST of loading zero when optimizing for SPEED. */
-static inline int
+inline int
zero_cost (bool speed)
{
return *zero_cost_ptr (speed);
/* Subroutine of {set_,}add_cost. Not to be used otherwise. */
-static inline int *
+inline int *
add_cost_ptr (bool speed, machine_mode mode)
{
return expmed_op_cost_ptr (&this_target_expmed->x_add_cost, speed, mode);
/* Set the COST of computing an add in MODE when optimizing for SPEED. */
-static inline void
+inline void
set_add_cost (bool speed, machine_mode mode, int cost)
{
*add_cost_ptr (speed, mode) = cost;
/* Return the cost of computing an add in MODE when optimizing for SPEED. */
-static inline int
+inline int
add_cost (bool speed, machine_mode mode)
{
return *add_cost_ptr (speed, mode);
/* Subroutine of {set_,}neg_cost. Not to be used otherwise. */
-static inline int *
+inline int *
neg_cost_ptr (bool speed, machine_mode mode)
{
return expmed_op_cost_ptr (&this_target_expmed->x_neg_cost, speed, mode);
/* Set the COST of computing a negation in MODE when optimizing for SPEED. */
-static inline void
+inline void
set_neg_cost (bool speed, machine_mode mode, int cost)
{
*neg_cost_ptr (speed, mode) = cost;
/* Return the cost of computing a negation in MODE when optimizing for
SPEED. */
-static inline int
+inline int
neg_cost (bool speed, machine_mode mode)
{
return *neg_cost_ptr (speed, mode);
/* Subroutine of {set_,}shift_cost. Not to be used otherwise. */
-static inline int *
+inline int *
shift_cost_ptr (bool speed, machine_mode mode, int bits)
{
return expmed_op_cost_ptr (&this_target_expmed->x_shift_cost[bits],
/* Set the COST of doing a shift in MODE by BITS when optimizing for SPEED. */
-static inline void
+inline void
set_shift_cost (bool speed, machine_mode mode, int bits, int cost)
{
*shift_cost_ptr (speed, mode, bits) = cost;
/* Return the cost of doing a shift in MODE by BITS when optimizing for
SPEED. */
-static inline int
+inline int
shift_cost (bool speed, machine_mode mode, int bits)
{
return *shift_cost_ptr (speed, mode, bits);
/* Subroutine of {set_,}shiftadd_cost. Not to be used otherwise. */
-static inline int *
+inline int *
shiftadd_cost_ptr (bool speed, machine_mode mode, int bits)
{
return expmed_op_cost_ptr (&this_target_expmed->x_shiftadd_cost[bits],
/* Set the COST of doing a shift in MODE by BITS followed by an add when
optimizing for SPEED. */
-static inline void
+inline void
set_shiftadd_cost (bool speed, machine_mode mode, int bits, int cost)
{
*shiftadd_cost_ptr (speed, mode, bits) = cost;
/* Return the cost of doing a shift in MODE by BITS followed by an add
when optimizing for SPEED. */
-static inline int
+inline int
shiftadd_cost (bool speed, machine_mode mode, int bits)
{
return *shiftadd_cost_ptr (speed, mode, bits);
/* Subroutine of {set_,}shiftsub0_cost. Not to be used otherwise. */
-static inline int *
+inline int *
shiftsub0_cost_ptr (bool speed, machine_mode mode, int bits)
{
return expmed_op_cost_ptr (&this_target_expmed->x_shiftsub0_cost[bits],
/* Set the COST of doing a shift in MODE by BITS and then subtracting a
value when optimizing for SPEED. */
-static inline void
+inline void
set_shiftsub0_cost (bool speed, machine_mode mode, int bits, int cost)
{
*shiftsub0_cost_ptr (speed, mode, bits) = cost;
/* Return the cost of doing a shift in MODE by BITS and then subtracting
a value when optimizing for SPEED. */
-static inline int
+inline int
shiftsub0_cost (bool speed, machine_mode mode, int bits)
{
return *shiftsub0_cost_ptr (speed, mode, bits);
/* Subroutine of {set_,}shiftsub1_cost. Not to be used otherwise. */
-static inline int *
+inline int *
shiftsub1_cost_ptr (bool speed, machine_mode mode, int bits)
{
return expmed_op_cost_ptr (&this_target_expmed->x_shiftsub1_cost[bits],
/* Set the COST of subtracting a shift in MODE by BITS from a value when
optimizing for SPEED. */
-static inline void
+inline void
set_shiftsub1_cost (bool speed, machine_mode mode, int bits, int cost)
{
*shiftsub1_cost_ptr (speed, mode, bits) = cost;
/* Return the cost of subtracting a shift in MODE by BITS from a value
when optimizing for SPEED. */
-static inline int
+inline int
shiftsub1_cost (bool speed, machine_mode mode, int bits)
{
return *shiftsub1_cost_ptr (speed, mode, bits);
/* Subroutine of {set_,}mul_cost. Not to be used otherwise. */
-static inline int *
+inline int *
mul_cost_ptr (bool speed, machine_mode mode)
{
return expmed_op_cost_ptr (&this_target_expmed->x_mul_cost, speed, mode);
/* Set the COST of doing a multiplication in MODE when optimizing for
SPEED. */
-static inline void
+inline void
set_mul_cost (bool speed, machine_mode mode, int cost)
{
*mul_cost_ptr (speed, mode) = cost;
/* Return the cost of doing a multiplication in MODE when optimizing
for SPEED. */
-static inline int
+inline int
mul_cost (bool speed, machine_mode mode)
{
return *mul_cost_ptr (speed, mode);
/* Subroutine of {set_,}sdiv_cost. Not to be used otherwise. */
-static inline int *
+inline int *
sdiv_cost_ptr (bool speed, machine_mode mode)
{
return expmed_op_cost_ptr (&this_target_expmed->x_sdiv_cost, speed, mode);
/* Set the COST of doing a signed division in MODE when optimizing
for SPEED. */
-static inline void
+inline void
set_sdiv_cost (bool speed, machine_mode mode, int cost)
{
*sdiv_cost_ptr (speed, mode) = cost;
/* Return the cost of doing a signed division in MODE when optimizing
for SPEED. */
-static inline int
+inline int
sdiv_cost (bool speed, machine_mode mode)
{
return *sdiv_cost_ptr (speed, mode);
/* Subroutine of {set_,}udiv_cost. Not to be used otherwise. */
-static inline int *
+inline int *
udiv_cost_ptr (bool speed, machine_mode mode)
{
return expmed_op_cost_ptr (&this_target_expmed->x_udiv_cost, speed, mode);
/* Set the COST of doing an unsigned division in MODE when optimizing
for SPEED. */
-static inline void
+inline void
set_udiv_cost (bool speed, machine_mode mode, int cost)
{
*udiv_cost_ptr (speed, mode) = cost;
/* Return the cost of doing an unsigned division in MODE when
optimizing for SPEED. */
-static inline int
+inline int
udiv_cost (bool speed, machine_mode mode)
{
return *udiv_cost_ptr (speed, mode);
/* Subroutine of {set_,}mul_widen_cost. Not to be used otherwise. */
-static inline int *
+inline int *
mul_widen_cost_ptr (bool speed, machine_mode mode)
{
gcc_assert (GET_MODE_CLASS (mode) == MODE_INT);
/* Set the COST for computing a widening multiplication in MODE when
optimizing for SPEED. */
-static inline void
+inline void
set_mul_widen_cost (bool speed, machine_mode mode, int cost)
{
*mul_widen_cost_ptr (speed, mode) = cost;
/* Return the cost for computing a widening multiplication in MODE when
optimizing for SPEED. */
-static inline int
+inline int
mul_widen_cost (bool speed, machine_mode mode)
{
return *mul_widen_cost_ptr (speed, mode);
/* Subroutine of {set_,}mul_highpart_cost. Not to be used otherwise. */
-static inline int *
+inline int *
mul_highpart_cost_ptr (bool speed, machine_mode mode)
{
gcc_assert (GET_MODE_CLASS (mode) == MODE_INT);
/* Set the COST for computing the high part of a multiplication in MODE
when optimizing for SPEED. */
-static inline void
+inline void
set_mul_highpart_cost (bool speed, machine_mode mode, int cost)
{
*mul_highpart_cost_ptr (speed, mode) = cost;
/* Return the cost for computing the high part of a multiplication in MODE
when optimizing for SPEED. */
-static inline int
+inline int
mul_highpart_cost (bool speed, machine_mode mode)
{
return *mul_highpart_cost_ptr (speed, mode);
/* Subroutine of {set_,}convert_cost. Not to be used otherwise. */
-static inline int *
+inline int *
convert_cost_ptr (machine_mode to_mode, machine_mode from_mode,
bool speed)
{
/* Set the COST for converting from FROM_MODE to TO_MODE when optimizing
for SPEED. */
-static inline void
+inline void
set_convert_cost (machine_mode to_mode, machine_mode from_mode,
bool speed, int cost)
{
/* Return the cost for converting from FROM_MODE to TO_MODE when optimizing
for SPEED. */
-static inline int
+inline int
convert_cost (machine_mode to_mode, machine_mode from_mode,
bool speed)
{
extern rtx emit_block_op_via_libcall (enum built_in_function, rtx, rtx, rtx,
bool);
-static inline rtx
+inline rtx
emit_block_copy_via_libcall (rtx dst, rtx src, rtx size, bool tailcall = false)
{
return emit_block_op_via_libcall (BUILT_IN_MEMCPY, dst, src, size, tailcall);
}
-static inline rtx
+inline rtx
emit_block_move_via_libcall (rtx dst, rtx src, rtx size, bool tailcall = false)
{
return emit_block_op_via_libcall (BUILT_IN_MEMMOVE, dst, src, size, tailcall);
}
-static inline rtx
+inline rtx
emit_block_comp_via_libcall (rtx dst, rtx src, rtx size, bool tailcall = false)
{
return emit_block_op_via_libcall (BUILT_IN_MEMCMP, dst, src, size, tailcall);
extern rtx copy_blkmode_to_reg (machine_mode, tree);
/* Mark REG as holding a parameter for the next CALL_INSN. */
-static inline void
+inline void
use_reg (rtx *fusage, rtx reg)
{
use_reg_mode (fusage, reg, VOIDmode);
}
/* Mark REG as clobbered by the call with FUSAGE as CALL_INSN_FUNCTION_USAGE. */
-static inline void
+inline void
clobber_reg (rtx *fusage, rtx reg)
{
clobber_reg_mode (fusage, reg, VOIDmode);
/* Generate code for computing expression EXP.
An rtx for the computed value is returned. The value is never null.
In the case of a void EXP, const0_rtx is returned. */
-static inline rtx
+inline rtx
expand_expr (tree exp, rtx target, machine_mode mode,
enum expand_modifier modifier)
{
return expand_expr_real (exp, target, mode, modifier, NULL, false);
}
-static inline rtx
+inline rtx
expand_normal (tree exp)
{
return expand_expr_real (exp, NULL_RTX, VOIDmode, EXPAND_NORMAL, NULL, false);
/* Return a CONST_FIXED from a bit payload and machine mode MODE.
The bits in PAYLOAD are sign-extended/zero-extended according to MODE. */
-static inline rtx
+inline rtx
const_fixed_from_double_int (double_int payload,
scalar_mode mode)
{
int get_c_kind (const char *, CInteropKind_t *);
const char *gfc_closest_fuzzy_match (const char *, char **);
-static inline void
+inline void
vec_push (char **&optr, size_t &osz, const char *elt)
{
/* {auto,}vec.safe_push () replacement. Don't ask.. */
char gfc_type_letter (bt, bool logical_equals_int = false);
int gfc_type_abi_kind (bt, int);
-static inline int
+inline int
gfc_type_abi_kind (gfc_typespec *ts)
{
return gfc_type_abi_kind (ts->type, ts->kind);
const char *get_file_srcdir_relative_path (const input_file *inpf);
/* Get the name of an input file. */
-static inline const char*
+inline const char*
get_input_file_name (const input_file *inpf)
{
if (inpf)
some GC roots may be missed, which is a much harder-to-debug problem.
*/
-static inline lang_bitmap
+inline lang_bitmap
get_lang_bitmap (const input_file* inpf)
{
if (inpf == NULL)
/* Set the bitmap returned by get_lang_bitmap. The only legitimate
callers of this function are read_input_list & read_state_*. */
-static inline void
+inline void
set_lang_bitmap (input_file* inpf, lang_bitmap n)
{
gcc_assert (inpf);
/* Test if a type is a union or a structure, perhaps a language
specific one. */
-static inline bool
+inline bool
union_or_struct_p (enum typekind kind)
{
return (kind == TYPE_UNION
|| kind == TYPE_USER_STRUCT);
}
-static inline bool
+inline bool
union_or_struct_p (const_type_p x)
{
return union_or_struct_p (x->kind);
}
/* Give the file location of a type, if any. */
-static inline struct fileloc*
+inline struct fileloc*
type_fileloc (type_p t)
{
if (!t)
/* Return true if a conversion from either type of TYPE1 and TYPE2
to the other is not required. Otherwise return false. */
-static inline bool
+inline bool
types_compatible_p (tree type1, tree type2)
{
return (type1 == type2
/* Return true if TYPE is a suitable type for a scalar register variable. */
-static inline bool
+inline bool
is_gimple_reg_type (tree type)
{
return !AGGREGATE_TYPE_P (type);
/* Return true if T is a variable. */
-static inline bool
+inline bool
is_gimple_variable (tree t)
{
return (TREE_CODE (t) == VAR_DECL
/* Return true if T is a GIMPLE identifier (something with an address). */
-static inline bool
+inline bool
is_gimple_id (tree t)
{
return (is_gimple_variable (t)
/* Return true if OP, an SSA name or a DECL is a virtual operand. */
-static inline bool
+inline bool
virtual_operand_p (tree op)
{
if (TREE_CODE (op) == SSA_NAME)
/* Return true if T is something whose address can be taken. */
-static inline bool
+inline bool
is_gimple_addressable (tree t)
{
return (is_gimple_id (t) || handled_component_p (t)
/* Return true if T is a valid gimple constant. */
-static inline bool
+inline bool
is_gimple_constant (const_tree t)
{
switch (TREE_CODE (t))
/* A wrapper around extract_ops_from_tree with 3 ops, for callers which
expect to see only a maximum of two operands. */
-static inline void
+inline void
extract_ops_from_tree (tree expr, enum tree_code *code, tree *op0,
tree *op1)
{
/* Given a valid GIMPLE_CALL function address return the FUNCTION_DECL
associated with the callee if known. Otherwise return NULL_TREE. */
-static inline tree
+inline tree
gimple_call_addr_fndecl (const_tree fn)
{
if (fn && TREE_CODE (fn) == ADDR_EXPR)
/* Return a new iterator pointing to GIMPLE_SEQ's first statement. */
-static inline gimple_stmt_iterator
+inline gimple_stmt_iterator
gsi_start (gimple_seq &seq)
{
gimple_stmt_iterator i;
return i;
}
-static inline gimple_stmt_iterator
+inline gimple_stmt_iterator
gsi_none (void)
{
gimple_stmt_iterator i;
/* Return a new iterator pointing to the first statement in basic block BB. */
-static inline gimple_stmt_iterator
+inline gimple_stmt_iterator
gsi_start_bb (basic_block bb)
{
gimple_stmt_iterator i;
/* Return a new iterator initially pointing to GIMPLE_SEQ's last statement. */
-static inline gimple_stmt_iterator
+inline gimple_stmt_iterator
gsi_last (gimple_seq &seq)
{
gimple_stmt_iterator i;
/* Return a new iterator pointing to the last statement in basic block BB. */
-static inline gimple_stmt_iterator
+inline gimple_stmt_iterator
gsi_last_bb (basic_block bb)
{
gimple_stmt_iterator i;
/* Return true if I is at the end of its sequence. */
-static inline bool
+inline bool
gsi_end_p (gimple_stmt_iterator i)
{
return i.ptr == NULL;
/* Return true if I is one statement before the end of its sequence. */
-static inline bool
+inline bool
gsi_one_before_end_p (gimple_stmt_iterator i)
{
return i.ptr != NULL && i.ptr->next == NULL;
/* Advance the iterator to the next gimple statement. */
-static inline void
+inline void
gsi_next (gimple_stmt_iterator *i)
{
i->ptr = i->ptr->next;
/* Advance the iterator to the previous gimple statement. */
-static inline void
+inline void
gsi_prev (gimple_stmt_iterator *i)
{
gimple *prev = i->ptr->prev;
/* Return the current stmt. */
-static inline gimple *
+inline gimple *
gsi_stmt (gimple_stmt_iterator i)
{
return i.ptr;
/* Return a block statement iterator that points to the first
non-label statement in block BB. */
-static inline gimple_stmt_iterator
+inline gimple_stmt_iterator
gsi_after_labels (basic_block bb)
{
gimple_stmt_iterator gsi = gsi_start_bb (bb);
/* Return a statement iterator that points to the first
non-label statement in sequence SEQ. */
-static inline gimple_stmt_iterator
+inline gimple_stmt_iterator
gsi_after_labels (gimple_seq &seq)
{
gimple_stmt_iterator gsi = gsi_start (seq);
/* Advance the iterator to the next non-debug gimple statement. */
-static inline void
+inline void
gsi_next_nondebug (gimple_stmt_iterator *i)
{
do
/* Advance the iterator to the previous non-debug gimple statement. */
-static inline void
+inline void
gsi_prev_nondebug (gimple_stmt_iterator *i)
{
do
/* Return a new iterator pointing to the first non-debug statement in
SEQ. */
-static inline gimple_stmt_iterator
+inline gimple_stmt_iterator
gsi_start_nondebug (gimple_seq seq)
{
gimple_stmt_iterator gsi = gsi_start (seq);
/* Return a new iterator pointing to the first non-debug statement in
basic block BB. */
-static inline gimple_stmt_iterator
+inline gimple_stmt_iterator
gsi_start_nondebug_bb (basic_block bb)
{
gimple_stmt_iterator i = gsi_start_bb (bb);
/* Return a new iterator pointing to the first non-debug non-label statement in
basic block BB. */
-static inline gimple_stmt_iterator
+inline gimple_stmt_iterator
gsi_start_nondebug_after_labels_bb (basic_block bb)
{
gimple_stmt_iterator i = gsi_after_labels (bb);
/* Return a new iterator pointing to the last non-debug statement in
basic block BB. */
-static inline gimple_stmt_iterator
+inline gimple_stmt_iterator
gsi_last_nondebug_bb (basic_block bb)
{
gimple_stmt_iterator i = gsi_last_bb (bb);
/* Return true if I is followed only by debug statements in its
sequence. */
-static inline bool
+inline bool
gsi_one_nondebug_before_end_p (gimple_stmt_iterator i)
{
if (gsi_one_before_end_p (i))
/* Advance I statement iterator to the next non-virtual GIMPLE_PHI
statement. */
-static inline void
+inline void
gsi_next_nonvirtual_phi (gphi_iterator *i)
{
do
/* Return a new iterator pointing to the first non-virtual phi statement in
basic block BB. */
-static inline gphi_iterator
+inline gphi_iterator
gsi_start_nonvirtual_phis (basic_block bb)
{
gphi_iterator i = gsi_start_phis (bb);
/* Return the basic block associated with this iterator. */
-static inline basic_block
+inline basic_block
gsi_bb (gimple_stmt_iterator i)
{
return i.bb;
/* Return the sequence associated with this iterator. */
-static inline gimple_seq
+inline gimple_seq
gsi_seq (gimple_stmt_iterator i)
{
return *i.seq;
/* Determine whether SEQ is a nondebug singleton. */
-static inline bool
+inline bool
gimple_seq_nondebug_singleton_p (gimple_seq seq)
{
gimple_stmt_iterator gsi;
/* Return the predictor of GIMPLE_PREDICT statement GS. */
-static inline enum br_predictor
+inline enum br_predictor
gimple_predict_predictor (const gimple *gs)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
/* Set the predictor of GIMPLE_PREDICT statement GS to PREDICT. */
-static inline void
+inline void
gimple_predict_set_predictor (gimple *gs, enum br_predictor predictor)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
/* Return the outcome of GIMPLE_PREDICT statement GS. */
-static inline enum prediction
+inline enum prediction
gimple_predict_outcome (const gimple *gs)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
/* Set the outcome of GIMPLE_PREDICT statement GS to OUTCOME. */
-static inline void
+inline void
gimple_predict_set_outcome (gimple *gs, enum prediction outcome)
{
GIMPLE_CHECK (gs, GIMPLE_PREDICT);
/* Return true if GS is a GIMPLE_PREDICT statement. */
-static inline bool
+inline bool
is_gimple_predict (const gimple *gs)
{
return gimple_code (gs) == GIMPLE_PREDICT;
// Return the type of range which statement S calculates. If the type is
// unsupported or no type can be determined, return NULL_TREE.
-static inline tree
+inline tree
gimple_range_type (const gimple *s)
{
tree lhs = gimple_get_lhs (s);
// Return EXP if it is an SSA_NAME with a type supported by gimple ranges.
-static inline tree
+inline tree
gimple_range_ssa_p (tree exp)
{
if (exp && TREE_CODE (exp) == SSA_NAME &&
// Return true if TYPE1 and TYPE2 are compatible range types.
-static inline bool
+inline bool
range_compatible_p (tree type1, tree type2)
{
// types_compatible_p requires conversion in both directions to be useless.
gimple_in_ssa_p is queried by gimplifier in various early stages before SSA
infrastructure is initialized. Check for presence of the datastructures
at first place. */
-static inline bool
+inline bool
gimple_in_ssa_p (const struct function *fun)
{
return fun && fun->gimple_df && fun->gimple_df->in_ssa_p;
}
/* Artificial variable used for the virtual operand FUD chain. */
-static inline tree
+inline tree
gimple_vop (const struct function *fun)
{
gcc_checking_assert (fun && fun->gimple_df);
/* Return the set of VUSE operand for statement G. */
-static inline use_operand_p
+inline use_operand_p
gimple_vuse_op (const gimple *g)
{
struct use_optype_d *ops;
/* Return the set of VDEF operand for statement G. */
-static inline def_operand_p
+inline def_operand_p
gimple_vdef_op (gimple *g)
{
gimple_statement_with_memory_ops *mem_ops_stmt =
/* Mark statement S as modified, and update it. */
-static inline void
+inline void
update_stmt (gimple *s)
{
if (gimple_has_ops (s))
/* Update statement S if it has been optimized. */
-static inline void
+inline void
update_stmt_if_modified (gimple *s)
{
if (gimple_modified_p (s))
/* Mark statement S as modified, and update it. */
-static inline void
+inline void
update_stmt_fn (struct function *fn, gimple *s)
{
if (gimple_has_ops (s))
(CODE), ERROR_MARK); \
} while (0)
template <typename T>
-static inline T
+inline T
GIMPLE_CHECK2(const gimple *gs,
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
const char *file = __builtin_FILE (),
return ret;
}
template <typename T>
-static inline T
+inline T
GIMPLE_CHECK2(gimple *gs,
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
const char *file = __builtin_FILE (),
#define gcc_gimple_checking_assert(EXPR) ((void)(0 && (EXPR)))
#define GIMPLE_CHECK(GS, CODE) (void)0
template <typename T>
-static inline T
+inline T
GIMPLE_CHECK2(gimple *gs)
{
return as_a <T> (gs);
}
template <typename T>
-static inline T
+inline T
GIMPLE_CHECK2(const gimple *gs)
{
return as_a <T> (gs);
} elt_t;
/* Get the number of the next statement uid to be allocated. */
-static inline unsigned int
+inline unsigned int
gimple_stmt_max_uid (struct function *fn)
{
return fn->last_stmt_uid;
}
/* Set the number of the next statement uid to be allocated. */
-static inline void
+inline void
set_gimple_stmt_max_uid (struct function *fn, unsigned int maxid)
{
fn->last_stmt_uid = maxid;
}
/* Set the number of the next statement uid to be allocated. */
-static inline unsigned int
+inline unsigned int
inc_gimple_stmt_max_uid (struct function *fn)
{
return fn->last_stmt_uid++;
/* Return the first node in GIMPLE sequence S. */
-static inline gimple_seq_node
+inline gimple_seq_node
gimple_seq_first (gimple_seq s)
{
return s;
/* Return the first statement in GIMPLE sequence S. */
-static inline gimple *
+inline gimple *
gimple_seq_first_stmt (gimple_seq s)
{
gimple_seq_node n = gimple_seq_first (s);
/* Return the first statement in GIMPLE sequence S as a gbind *,
verifying that it has code GIMPLE_BIND in a checked build. */
-static inline gbind *
+inline gbind *
gimple_seq_first_stmt_as_a_bind (gimple_seq s)
{
gimple_seq_node n = gimple_seq_first (s);
/* Return the last node in GIMPLE sequence S. */
-static inline gimple_seq_node
+inline gimple_seq_node
gimple_seq_last (gimple_seq s)
{
return s ? s->prev : NULL;
/* Return the last statement in GIMPLE sequence S. */
-static inline gimple *
+inline gimple *
gimple_seq_last_stmt (gimple_seq s)
{
gimple_seq_node n = gimple_seq_last (s);
/* Set the last node in GIMPLE sequence *PS to LAST. */
-static inline void
+inline void
gimple_seq_set_last (gimple_seq *ps, gimple_seq_node last)
{
(*ps)->prev = last;
/* Set the first node in GIMPLE sequence *PS to FIRST. */
-static inline void
+inline void
gimple_seq_set_first (gimple_seq *ps, gimple_seq_node first)
{
*ps = first;
/* Return true if GIMPLE sequence S is empty. */
-static inline bool
+inline bool
gimple_seq_empty_p (gimple_seq s)
{
return s == NULL;
/* Allocate a new sequence and initialize its first element with STMT. */
-static inline gimple_seq
+inline gimple_seq
gimple_seq_alloc_with_stmt (gimple *stmt)
{
gimple_seq seq = NULL;
/* Returns the sequence of statements in BB. */
-static inline gimple_seq
+inline gimple_seq
bb_seq (const_basic_block bb)
{
return (!(bb->flags & BB_RTL)) ? bb->il.gimple.seq : NULL;
}
-static inline gimple_seq *
+inline gimple_seq *
bb_seq_addr (basic_block bb)
{
return (!(bb->flags & BB_RTL)) ? &bb->il.gimple.seq : NULL;
/* Sets the sequence of statements in BB to SEQ. */
-static inline void
+inline void
set_bb_seq (basic_block bb, gimple_seq seq)
{
gcc_checking_assert (!(bb->flags & BB_RTL));
/* Return the code for GIMPLE statement G. */
-static inline enum gimple_code
+inline enum gimple_code
gimple_code (const gimple *g)
{
return g->code;
/* Return the GSS code used by a GIMPLE code. */
-static inline enum gimple_statement_structure_enum
+inline enum gimple_statement_structure_enum
gss_for_code (enum gimple_code code)
{
gcc_gimple_checking_assert ((unsigned int)code < LAST_AND_UNUSED_GIMPLE_CODE);
/* Return which GSS code is used by GS. */
-static inline enum gimple_statement_structure_enum
+inline enum gimple_statement_structure_enum
gimple_statement_structure (gimple *gs)
{
return gss_for_code (gimple_code (gs));
/* Return true if statement G has sub-statements. This is only true for
High GIMPLE statements. */
-static inline bool
+inline bool
gimple_has_substatements (gimple *g)
{
switch (gimple_code (g))
/* Return the basic block holding statement G. */
-static inline basic_block
+inline basic_block
gimple_bb (const gimple *g)
{
return g->bb;
/* Return the lexical scope block holding statement G. */
-static inline tree
+inline tree
gimple_block (const gimple *g)
{
return LOCATION_BLOCK (g->location);
}
/* Forward declare. */
-static inline void gimple_set_location (gimple *, location_t);
+inline void gimple_set_location (gimple *, location_t);
/* Set BLOCK to be the lexical scope block holding statement G. */
-static inline void
+inline void
gimple_set_block (gimple *g, tree block)
{
gimple_set_location (g, set_block (g->location, block));
/* Return location information for statement G. */
-static inline location_t
+inline location_t
gimple_location (const gimple *g)
{
return g->location;
/* Return location information for statement G if g is not NULL.
Otherwise, UNKNOWN_LOCATION is returned. */
-static inline location_t
+inline location_t
gimple_location_safe (const gimple *g)
{
return g ? gimple_location (g) : UNKNOWN_LOCATION;
/* Set location information for statement G. */
-static inline void
+inline void
gimple_set_location (gimple *g, location_t location)
{
/* Copy the no-warning data to the statement location. */
/* Return address of the location information for statement G. */
-static inline location_t *
+inline location_t *
gimple_location_ptr (gimple *g)
{
return &g->location;
/* Return true if G contains location information. */
-static inline bool
+inline bool
gimple_has_location (const gimple *g)
{
return LOCATION_LOCUS (gimple_location (g)) != UNKNOWN_LOCATION;
/* Return non-artificial location information for statement G. */
-static inline location_t
+inline location_t
gimple_nonartificial_location (const gimple *g)
{
location_t *ploc = NULL;
/* Return the file name of the location of STMT. */
-static inline const char *
+inline const char *
gimple_filename (const gimple *stmt)
{
return LOCATION_FILE (gimple_location (stmt));
/* Return the line number of the location of STMT. */
-static inline int
+inline int
gimple_lineno (const gimple *stmt)
{
return LOCATION_LINE (gimple_location (stmt));
/* Determine whether SEQ is a singleton. */
-static inline bool
+inline bool
gimple_seq_singleton_p (gimple_seq seq)
{
return ((gimple_seq_first (seq) != NULL)
/* Return true if no warnings should be emitted for statement STMT. */
-static inline bool
+inline bool
gimple_no_warning_p (const gimple *stmt)
{
return stmt->no_warning;
/* Set the no_warning flag of STMT to NO_WARNING. */
-static inline void
+inline void
gimple_set_no_warning (gimple *stmt, bool no_warning)
{
stmt->no_warning = (unsigned) no_warning;
struct gimple.
*/
-static inline void
+inline void
gimple_set_visited (gimple *stmt, bool visited_p)
{
stmt->visited = (unsigned) visited_p;
statement by reading the comments of the 'visited' data member of
struct gimple. */
-static inline bool
+inline bool
gimple_visited_p (gimple *stmt)
{
return stmt->visited;
You can learn more about the PLF property by reading the comment of
the 'plf' data member of struct gimple_statement_structure. */
-static inline void
+inline void
gimple_set_plf (gimple *stmt, enum plf_mask plf, bool val_p)
{
if (val_p)
You can learn more about the plf property by reading the comment of
the 'plf' data member of struct gimple_statement_structure. */
-static inline unsigned int
+inline unsigned int
gimple_plf (gimple *stmt, enum plf_mask plf)
{
return stmt->plf & ((unsigned int) plf);
contains any useful value when the pass starts and thus can set it
to any value it sees fit. */
-static inline void
+inline void
gimple_set_uid (gimple *g, unsigned uid)
{
g->uid = uid;
contains any useful value when the pass starts and thus can set it
to any value it sees fit. */
-static inline unsigned
+inline unsigned
gimple_uid (const gimple *g)
{
return g->uid;
/* Make statement G a singleton sequence. */
-static inline void
+inline void
gimple_init_singleton (gimple *g)
{
g->next = NULL;
/* Return true if GIMPLE statement G has register or memory operands. */
-static inline bool
+inline bool
gimple_has_ops (const gimple *g)
{
return gimple_code (g) >= GIMPLE_COND && gimple_code (g) <= GIMPLE_RETURN;
/* Return true if GIMPLE statement G has memory operands. */
-static inline bool
+inline bool
gimple_has_mem_ops (const gimple *g)
{
return gimple_code (g) >= GIMPLE_ASSIGN && gimple_code (g) <= GIMPLE_RETURN;
/* Return the set of USE operands for statement G. */
-static inline struct use_optype_d *
+inline struct use_optype_d *
gimple_use_ops (const gimple *g)
{
const gimple_statement_with_ops *ops_stmt =
/* Set USE to be the set of USE operands for statement G. */
-static inline void
+inline void
gimple_set_use_ops (gimple *g, struct use_optype_d *use)
{
gimple_statement_with_ops *ops_stmt =
/* Return the single VUSE operand of the statement G. */
-static inline tree
+inline tree
gimple_vuse (const gimple *g)
{
const gimple_statement_with_memory_ops *mem_ops_stmt =
/* Return the single VDEF operand of the statement G. */
-static inline tree
+inline tree
gimple_vdef (const gimple *g)
{
const gimple_statement_with_memory_ops *mem_ops_stmt =
/* Return the single VUSE operand of the statement G. */
-static inline tree *
+inline tree *
gimple_vuse_ptr (gimple *g)
{
gimple_statement_with_memory_ops *mem_ops_stmt =
/* Return the single VDEF operand of the statement G. */
-static inline tree *
+inline tree *
gimple_vdef_ptr (gimple *g)
{
gimple_statement_with_memory_ops *mem_ops_stmt =
/* Set the single VUSE operand of the statement G. */
-static inline void
+inline void
gimple_set_vuse (gimple *g, tree vuse)
{
gimple_statement_with_memory_ops *mem_ops_stmt =
/* Set the single VDEF operand of the statement G. */
-static inline void
+inline void
gimple_set_vdef (gimple *g, tree vdef)
{
gimple_statement_with_memory_ops *mem_ops_stmt =
/* Return true if statement G has operands and the modified field has
been set. */
-static inline bool
+inline bool
gimple_modified_p (const gimple *g)
{
return (gimple_has_ops (g)) ? (bool) g->modified : false;
/* Set the MODIFIED flag to MODIFIEDP, iff the gimple statement G has
a MODIFIED field. */
-static inline void
+inline void
gimple_set_modified (gimple *s, bool modifiedp)
{
if (gimple_has_ops (s))
/* Return true if statement STMT contains volatile operands. */
-static inline bool
+inline bool
gimple_has_volatile_ops (const gimple *stmt)
{
if (gimple_has_mem_ops (stmt))
/* Set the HAS_VOLATILE_OPS flag to VOLATILEP. */
-static inline void
+inline void
gimple_set_has_volatile_ops (gimple *stmt, bool volatilep)
{
if (gimple_has_mem_ops (stmt))
/* Return true if STMT is in a transaction. */
-static inline bool
+inline bool
gimple_in_transaction (const gimple *stmt)
{
return bb_in_transaction (gimple_bb (stmt));
/* Return true if statement STMT may access memory. */
-static inline bool
+inline bool
gimple_references_memory_p (gimple *stmt)
{
return gimple_has_mem_ops (stmt) && gimple_vuse (stmt);
/* Return the subcode for OMP statement S. */
-static inline unsigned
+inline unsigned
gimple_omp_subcode (const gimple *s)
{
gcc_gimple_checking_assert (gimple_code (s) >= GIMPLE_OMP_ATOMIC_LOAD
/* Set the subcode for OMP statement S to SUBCODE. */
-static inline void
+inline void
gimple_omp_set_subcode (gimple *s, unsigned int subcode)
{
/* We only have 16 bits for the subcode. Assert that we are not
/* Set the nowait flag on OMP_RETURN statement S. */
-static inline void
+inline void
gimple_omp_return_set_nowait (gimple *s)
{
GIMPLE_CHECK (s, GIMPLE_OMP_RETURN);
/* Return true if OMP return statement G has the GF_OMP_RETURN_NOWAIT
flag set. */
-static inline bool
+inline bool
gimple_omp_return_nowait_p (const gimple *g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_RETURN);
/* Set the LHS of OMP return. */
-static inline void
+inline void
gimple_omp_return_set_lhs (gimple *g, tree lhs)
{
gimple_statement_omp_return *omp_return_stmt =
/* Get the LHS of OMP return. */
-static inline tree
+inline tree
gimple_omp_return_lhs (const gimple *g)
{
const gimple_statement_omp_return *omp_return_stmt =
/* Return a pointer to the LHS of OMP return. */
-static inline tree *
+inline tree *
gimple_omp_return_lhs_ptr (gimple *g)
{
gimple_statement_omp_return *omp_return_stmt =
/* Return true if OMP section statement G has the GF_OMP_SECTION_LAST
flag set. */
-static inline bool
+inline bool
gimple_omp_section_last_p (const gimple *g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_SECTION);
/* Set the GF_OMP_SECTION_LAST flag on G. */
-static inline void
+inline void
gimple_omp_section_set_last (gimple *g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_SECTION);
/* Return true if OMP ordered construct is stand-alone
(G has the GF_OMP_ORDERED_STANDALONE flag set). */
-static inline bool
+inline bool
gimple_omp_ordered_standalone_p (const gimple *g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ORDERED);
/* Set the GF_OMP_ORDERED_STANDALONE flag on G. */
-static inline void
+inline void
gimple_omp_ordered_standalone (gimple *g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_ORDERED);
/* Return true if OMP parallel statement G has the
GF_OMP_PARALLEL_COMBINED flag set. */
-static inline bool
+inline bool
gimple_omp_parallel_combined_p (const gimple *g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL);
/* Set the GF_OMP_PARALLEL_COMBINED field in G depending on the boolean
value of COMBINED_P. */
-static inline void
+inline void
gimple_omp_parallel_set_combined_p (gimple *g, bool combined_p)
{
GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL);
/* Return true if OMP atomic load/store statement G has the
GF_OMP_ATOMIC_NEED_VALUE flag set. */
-static inline bool
+inline bool
gimple_omp_atomic_need_value_p (const gimple *g)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
/* Set the GF_OMP_ATOMIC_NEED_VALUE flag on G. */
-static inline void
+inline void
gimple_omp_atomic_set_need_value (gimple *g)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
/* Return true if OMP atomic load/store statement G has the
GF_OMP_ATOMIC_WEAK flag set. */
-static inline bool
+inline bool
gimple_omp_atomic_weak_p (const gimple *g)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
/* Set the GF_OMP_ATOMIC_WEAK flag on G. */
-static inline void
+inline void
gimple_omp_atomic_set_weak (gimple *g)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
/* Return the memory order of the OMP atomic load/store statement G. */
-static inline enum omp_memory_order
+inline enum omp_memory_order
gimple_omp_atomic_memory_order (const gimple *g)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
/* Set the memory order on G. */
-static inline void
+inline void
gimple_omp_atomic_set_memory_order (gimple *g, enum omp_memory_order mo)
{
if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD)
/* Return the number of operands for statement GS. */
-static inline unsigned
+inline unsigned
gimple_num_ops (const gimple *gs)
{
return gs->num_ops;
/* Set the number of operands for statement GS. */
-static inline void
+inline void
gimple_set_num_ops (gimple *gs, unsigned num_ops)
{
gs->num_ops = num_ops;
/* Return the array of operands for statement GS. */
-static inline tree *
+inline tree *
gimple_ops (gimple *gs)
{
size_t off;
/* Return operand I for statement GS. */
-static inline tree
+inline tree
gimple_op (const gimple *gs, unsigned i)
{
if (gimple_has_ops (gs))
/* Return a pointer to operand I for statement GS. */
-static inline tree *
+inline tree *
gimple_op_ptr (gimple *gs, unsigned i)
{
if (gimple_has_ops (gs))
/* Set operand I of statement GS to OP. */
-static inline void
+inline void
gimple_set_op (gimple *gs, unsigned i, tree op)
{
gcc_gimple_checking_assert (gimple_has_ops (gs) && i < gimple_num_ops (gs));
/* Return true if GS is a GIMPLE_ASSIGN. */
-static inline bool
+inline bool
is_gimple_assign (const gimple *gs)
{
return gimple_code (gs) == GIMPLE_ASSIGN;
/* Determine if expression CODE is one of the valid expressions that can
be used on the RHS of GIMPLE assignments. */
-static inline enum gimple_rhs_class
+inline enum gimple_rhs_class
get_gimple_rhs_class (enum tree_code code)
{
return (enum gimple_rhs_class) gimple_rhs_class_table[(int) code];
/* Return the LHS of assignment statement GS. */
-static inline tree
+inline tree
gimple_assign_lhs (const gassign *gs)
{
return gs->op[0];
}
-static inline tree
+inline tree
gimple_assign_lhs (const gimple *gs)
{
const gassign *ass = GIMPLE_CHECK2<const gassign *> (gs);
/* Return a pointer to the LHS of assignment statement GS. */
-static inline tree *
+inline tree *
gimple_assign_lhs_ptr (gassign *gs)
{
return &gs->op[0];
}
-static inline tree *
+inline tree *
gimple_assign_lhs_ptr (gimple *gs)
{
gassign *ass = GIMPLE_CHECK2<gassign *> (gs);
/* Set LHS to be the LHS operand of assignment statement GS. */
-static inline void
+inline void
gimple_assign_set_lhs (gassign *gs, tree lhs)
{
gs->op[0] = lhs;
SSA_NAME_DEF_STMT (lhs) = gs;
}
-static inline void
+inline void
gimple_assign_set_lhs (gimple *gs, tree lhs)
{
gassign *ass = GIMPLE_CHECK2<gassign *> (gs);
/* Return the first operand on the RHS of assignment statement GS. */
-static inline tree
+inline tree
gimple_assign_rhs1 (const gassign *gs)
{
return gs->op[1];
}
-static inline tree
+inline tree
gimple_assign_rhs1 (const gimple *gs)
{
const gassign *ass = GIMPLE_CHECK2<const gassign *> (gs);
/* Return a pointer to the first operand on the RHS of assignment
statement GS. */
-static inline tree *
+inline tree *
gimple_assign_rhs1_ptr (gassign *gs)
{
return &gs->op[1];
}
-static inline tree *
+inline tree *
gimple_assign_rhs1_ptr (gimple *gs)
{
gassign *ass = GIMPLE_CHECK2<gassign *> (gs);
/* Set RHS to be the first operand on the RHS of assignment statement GS. */
-static inline void
+inline void
gimple_assign_set_rhs1 (gassign *gs, tree rhs)
{
gs->op[1] = rhs;
}
-static inline void
+inline void
gimple_assign_set_rhs1 (gimple *gs, tree rhs)
{
gassign *ass = GIMPLE_CHECK2<gassign *> (gs);
/* Return the second operand on the RHS of assignment statement GS.
If GS does not have two operands, NULL is returned instead. */
-static inline tree
+inline tree
gimple_assign_rhs2 (const gassign *gs)
{
if (gimple_num_ops (gs) >= 3)
return NULL_TREE;
}
-static inline tree
+inline tree
gimple_assign_rhs2 (const gimple *gs)
{
const gassign *ass = GIMPLE_CHECK2<const gassign *> (gs);
/* Return a pointer to the second operand on the RHS of assignment
statement GS. */
-static inline tree *
+inline tree *
gimple_assign_rhs2_ptr (gassign *gs)
{
gcc_gimple_checking_assert (gimple_num_ops (gs) >= 3);
return &gs->op[2];
}
-static inline tree *
+inline tree *
gimple_assign_rhs2_ptr (gimple *gs)
{
gassign *ass = GIMPLE_CHECK2<gassign *> (gs);
/* Set RHS to be the second operand on the RHS of assignment statement GS. */
-static inline void
+inline void
gimple_assign_set_rhs2 (gassign *gs, tree rhs)
{
gcc_gimple_checking_assert (gimple_num_ops (gs) >= 3);
gs->op[2] = rhs;
}
-static inline void
+inline void
gimple_assign_set_rhs2 (gimple *gs, tree rhs)
{
gassign *ass = GIMPLE_CHECK2<gassign *> (gs);
/* Return the third operand on the RHS of assignment statement GS.
If GS does not have two operands, NULL is returned instead. */
-static inline tree
+inline tree
gimple_assign_rhs3 (const gassign *gs)
{
if (gimple_num_ops (gs) >= 4)
return NULL_TREE;
}
-static inline tree
+inline tree
gimple_assign_rhs3 (const gimple *gs)
{
const gassign *ass = GIMPLE_CHECK2<const gassign *> (gs);
/* Return a pointer to the third operand on the RHS of assignment
statement GS. */
-static inline tree *
+inline tree *
gimple_assign_rhs3_ptr (gimple *gs)
{
gassign *ass = GIMPLE_CHECK2<gassign *> (gs);
/* Set RHS to be the third operand on the RHS of assignment statement GS. */
-static inline void
+inline void
gimple_assign_set_rhs3 (gassign *gs, tree rhs)
{
gcc_gimple_checking_assert (gimple_num_ops (gs) >= 4);
gs->op[3] = rhs;
}
-static inline void
+inline void
gimple_assign_set_rhs3 (gimple *gs, tree rhs)
{
gassign *ass = GIMPLE_CHECK2<gassign *> (gs);
/* A wrapper around 3 operand gimple_assign_set_rhs_with_ops, for callers
which expect to see only two operands. */
-static inline void
+inline void
gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *gsi, enum tree_code code,
tree op1, tree op2)
{
/* A wrapper around 3 operand gimple_assign_set_rhs_with_ops, for callers
which expect to see only one operands. */
-static inline void
+inline void
gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *gsi, enum tree_code code,
tree op1)
{
/* Returns true if GS is a nontemporal move. */
-static inline bool
+inline bool
gimple_assign_nontemporal_move_p (const gassign *gs)
{
return gs->nontemporal_move;
/* Sets nontemporal move flag of GS to NONTEMPORAL. */
-static inline void
+inline void
gimple_assign_set_nontemporal_move (gimple *gs, bool nontemporal)
{
GIMPLE_CHECK (gs, GIMPLE_ASSIGN);
statement GS. In case that the RHS is a single object, returns the
tree code of the object. */
-static inline enum tree_code
+inline enum tree_code
gimple_assign_rhs_code (const gassign *gs)
{
enum tree_code code = (enum tree_code) gs->subcode;
return code;
}
-static inline enum tree_code
+inline enum tree_code
gimple_assign_rhs_code (const gimple *gs)
{
const gassign *ass = GIMPLE_CHECK2<const gassign *> (gs);
/* Set CODE to be the code for the expression computed on the RHS of
assignment S. */
-static inline void
+inline void
gimple_assign_set_rhs_code (gimple *s, enum tree_code code)
{
GIMPLE_CHECK (s, GIMPLE_ASSIGN);
the rhs of assignment statement GS.
This will never return GIMPLE_INVALID_RHS. */
-static inline enum gimple_rhs_class
+inline enum gimple_rhs_class
gimple_assign_rhs_class (const gimple *gs)
{
return get_gimple_rhs_class (gimple_assign_rhs_code (gs));
any RHS operand, including those that perform an operation
and do not have the semantics of a copy, such as COND_EXPR. */
-static inline bool
+inline bool
gimple_assign_single_p (const gimple *gs)
{
return (is_gimple_assign (gs)
/* Return true if GS performs a store to its lhs. */
-static inline bool
+inline bool
gimple_store_p (const gimple *gs)
{
tree lhs = gimple_get_lhs (gs);
/* Return true if GS is an assignment that loads from its rhs1. */
-static inline bool
+inline bool
gimple_assign_load_p (const gimple *gs)
{
tree rhs;
/* Return true if S is a type-cast assignment. */
-static inline bool
+inline bool
gimple_assign_cast_p (const gimple *s)
{
if (is_gimple_assign (s))
/* Return true if S is a clobber statement. */
-static inline bool
+inline bool
gimple_clobber_p (const gimple *s)
{
return gimple_assign_single_p (s)
/* Return true if S is a clobber statement. */
-static inline bool
+inline bool
gimple_clobber_p (const gimple *s, enum clobber_kind kind)
{
return gimple_clobber_p (s)
/* Return true if GS is a GIMPLE_CALL. */
-static inline bool
+inline bool
is_gimple_call (const gimple *gs)
{
return gimple_code (gs) == GIMPLE_CALL;
/* Return the LHS of call statement GS. */
-static inline tree
+inline tree
gimple_call_lhs (const gcall *gs)
{
return gs->op[0];
}
-static inline tree
+inline tree
gimple_call_lhs (const gimple *gs)
{
const gcall *gc = GIMPLE_CHECK2<const gcall *> (gs);
/* Return a pointer to the LHS of call statement GS. */
-static inline tree *
+inline tree *
gimple_call_lhs_ptr (gcall *gs)
{
return &gs->op[0];
}
-static inline tree *
+inline tree *
gimple_call_lhs_ptr (gimple *gs)
{
gcall *gc = GIMPLE_CHECK2<gcall *> (gs);
/* Set LHS to be the LHS operand of call statement GS. */
-static inline void
+inline void
gimple_call_set_lhs (gcall *gs, tree lhs)
{
gs->op[0] = lhs;
SSA_NAME_DEF_STMT (lhs) = gs;
}
-static inline void
+inline void
gimple_call_set_lhs (gimple *gs, tree lhs)
{
gcall *gc = GIMPLE_CHECK2<gcall *> (gs);
/* Return true if call GS calls an internal-only function, as enumerated
by internal_fn. */
-static inline bool
+inline bool
gimple_call_internal_p (const gcall *gs)
{
return (gs->subcode & GF_CALL_INTERNAL) != 0;
}
-static inline bool
+inline bool
gimple_call_internal_p (const gimple *gs)
{
const gcall *gc = GIMPLE_CHECK2<const gcall *> (gs);
/* Return true if call GS is marked as nocf_check. */
-static inline bool
+inline bool
gimple_call_nocf_check_p (const gcall *gs)
{
return (gs->subcode & GF_CALL_NOCF_CHECK) != 0;
/* Mark statement GS as nocf_check call. */
-static inline void
+inline void
gimple_call_set_nocf_check (gcall *gs, bool nocf_check)
{
if (nocf_check)
/* Return the target of internal call GS. */
-static inline enum internal_fn
+inline enum internal_fn
gimple_call_internal_fn (const gcall *gs)
{
gcc_gimple_checking_assert (gimple_call_internal_p (gs));
return gs->u.internal_fn;
}
-static inline enum internal_fn
+inline enum internal_fn
gimple_call_internal_fn (const gimple *gs)
{
const gcall *gc = GIMPLE_CHECK2<const gcall *> (gs);
/* Return true, if this internal gimple call is unique. */
-static inline bool
+inline bool
gimple_call_internal_unique_p (const gcall *gs)
{
return gimple_call_internal_fn (gs) == IFN_UNIQUE;
}
-static inline bool
+inline bool
gimple_call_internal_unique_p (const gimple *gs)
{
const gcall *gc = GIMPLE_CHECK2<const gcall *> (gs);
/* Return true if GS is an internal function FN. */
-static inline bool
+inline bool
gimple_call_internal_p (const gimple *gs, internal_fn fn)
{
return (is_gimple_call (gs)
/* If CTRL_ALTERING_P is true, mark GIMPLE_CALL S to be a stmt
that could alter control flow. */
-static inline void
+inline void
gimple_call_set_ctrl_altering (gcall *s, bool ctrl_altering_p)
{
if (ctrl_altering_p)
s->subcode &= ~GF_CALL_CTRL_ALTERING;
}
-static inline void
+inline void
gimple_call_set_ctrl_altering (gimple *s, bool ctrl_altering_p)
{
gcall *gc = GIMPLE_CHECK2<gcall *> (s);
/* Return true if call GS calls an func whose GF_CALL_CTRL_ALTERING
flag is set. Such call could not be a stmt in the middle of a bb. */
-static inline bool
+inline bool
gimple_call_ctrl_altering_p (const gcall *gs)
{
return (gs->subcode & GF_CALL_CTRL_ALTERING) != 0;
}
-static inline bool
+inline bool
gimple_call_ctrl_altering_p (const gimple *gs)
{
const gcall *gc = GIMPLE_CHECK2<const gcall *> (gs);
/* Return the function type of the function called by GS. */
-static inline tree
+inline tree
gimple_call_fntype (const gcall *gs)
{
if (gimple_call_internal_p (gs))
return gs->u.fntype;
}
-static inline tree
+inline tree
gimple_call_fntype (const gimple *gs)
{
const gcall *call_stmt = GIMPLE_CHECK2<const gcall *> (gs);
/* Set the type of the function called by CALL_STMT to FNTYPE. */
-static inline void
+inline void
gimple_call_set_fntype (gcall *call_stmt, tree fntype)
{
gcc_gimple_checking_assert (!gimple_call_internal_p (call_stmt));
/* Return the tree node representing the function called by call
statement GS. */
-static inline tree
+inline tree
gimple_call_fn (const gcall *gs)
{
return gs->op[1];
}
-static inline tree
+inline tree
gimple_call_fn (const gimple *gs)
{
const gcall *gc = GIMPLE_CHECK2<const gcall *> (gs);
/* Return a pointer to the tree node representing the function called by call
statement GS. */
-static inline tree *
+inline tree *
gimple_call_fn_ptr (gcall *gs)
{
return &gs->op[1];
}
-static inline tree *
+inline tree *
gimple_call_fn_ptr (gimple *gs)
{
gcall *gc = GIMPLE_CHECK2<gcall *> (gs);
/* Set FN to be the function called by call statement GS. */
-static inline void
+inline void
gimple_call_set_fn (gcall *gs, tree fn)
{
gcc_gimple_checking_assert (!gimple_call_internal_p (gs));
/* Set FNDECL to be the function called by call statement GS. */
-static inline void
+inline void
gimple_call_set_fndecl (gcall *gs, tree decl)
{
gcc_gimple_checking_assert (!gimple_call_internal_p (gs));
build_pointer_type (TREE_TYPE (decl)), decl);
}
-static inline void
+inline void
gimple_call_set_fndecl (gimple *gs, tree decl)
{
gcall *gc = GIMPLE_CHECK2<gcall *> (gs);
/* Set internal function FN to be the function called by call statement CALL_STMT. */
-static inline void
+inline void
gimple_call_set_internal_fn (gcall *call_stmt, enum internal_fn fn)
{
gcc_gimple_checking_assert (gimple_call_internal_p (call_stmt));
Otherwise return NULL. This function is analogous to
get_callee_fndecl in tree land. */
-static inline tree
+inline tree
gimple_call_fndecl (const gcall *gs)
{
return gimple_call_addr_fndecl (gimple_call_fn (gs));
}
-static inline tree
+inline tree
gimple_call_fndecl (const gimple *gs)
{
const gcall *gc = GIMPLE_CHECK2<const gcall *> (gs);
/* Return the type returned by call statement GS. */
-static inline tree
+inline tree
gimple_call_return_type (const gcall *gs)
{
tree type = gimple_call_fntype (gs);
/* Return the static chain for call statement GS. */
-static inline tree
+inline tree
gimple_call_chain (const gcall *gs)
{
return gs->op[2];
}
-static inline tree
+inline tree
gimple_call_chain (const gimple *gs)
{
const gcall *gc = GIMPLE_CHECK2<const gcall *> (gs);
/* Return a pointer to the static chain for call statement CALL_STMT. */
-static inline tree *
+inline tree *
gimple_call_chain_ptr (gcall *call_stmt)
{
return &call_stmt->op[2];
/* Set CHAIN to be the static chain for call statement CALL_STMT. */
-static inline void
+inline void
gimple_call_set_chain (gcall *call_stmt, tree chain)
{
call_stmt->op[2] = chain;
/* Return the number of arguments used by call statement GS. */
-static inline unsigned
+inline unsigned
gimple_call_num_args (const gcall *gs)
{
return gimple_num_ops (gs) - 3;
}
-static inline unsigned
+inline unsigned
gimple_call_num_args (const gimple *gs)
{
const gcall *gc = GIMPLE_CHECK2<const gcall *> (gs);
/* Return the argument at position INDEX for call statement GS. */
-static inline tree
+inline tree
gimple_call_arg (const gcall *gs, unsigned index)
{
gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 3);
return gs->op[index + 3];
}
-static inline tree
+inline tree
gimple_call_arg (const gimple *gs, unsigned index)
{
const gcall *gc = GIMPLE_CHECK2<const gcall *> (gs);
/* Return a pointer to the argument at position INDEX for call
statement GS. */
-static inline tree *
+inline tree *
gimple_call_arg_ptr (gcall *gs, unsigned index)
{
gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 3);
return &gs->op[index + 3];
}
-static inline tree *
+inline tree *
gimple_call_arg_ptr (gimple *gs, unsigned index)
{
gcall *gc = GIMPLE_CHECK2<gcall *> (gs);
/* Set ARG to be the argument at position INDEX for call statement GS. */
-static inline void
+inline void
gimple_call_set_arg (gcall *gs, unsigned index, tree arg)
{
gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 3);
gs->op[index + 3] = arg;
}
-static inline void
+inline void
gimple_call_set_arg (gimple *gs, unsigned index, tree arg)
{
gcall *gc = GIMPLE_CHECK2<gcall *> (gs);
(i.e., a call just before the exit of a function). These calls are
candidate for tail call optimization. */
-static inline void
+inline void
gimple_call_set_tail (gcall *s, bool tail_p)
{
if (tail_p)
/* Return true if GIMPLE_CALL S is marked as a tail call. */
-static inline bool
+inline bool
gimple_call_tail_p (const gcall *s)
{
return (s->subcode & GF_CALL_TAILCALL) != 0;
/* Mark (or clear) call statement S as requiring tail call optimization. */
-static inline void
+inline void
gimple_call_set_must_tail (gcall *s, bool must_tail_p)
{
if (must_tail_p)
/* Return true if call statement has been marked as requiring
tail call optimization. */
-static inline bool
+inline bool
gimple_call_must_tail_p (const gcall *s)
{
return (s->subcode & GF_CALL_MUST_TAIL_CALL) != 0;
slot optimization. This transformation uses the target of the call
expansion as the return slot for calls that return in memory. */
-static inline void
+inline void
gimple_call_set_return_slot_opt (gcall *s, bool return_slot_opt_p)
{
if (return_slot_opt_p)
/* Return true if S is marked for return slot optimization. */
-static inline bool
+inline bool
gimple_call_return_slot_opt_p (const gcall *s)
{
return (s->subcode & GF_CALL_RETURN_SLOT_OPT) != 0;
/* If FROM_THUNK_P is true, mark GIMPLE_CALL S as being the jump from a
thunk to the thunked-to function. */
-static inline void
+inline void
gimple_call_set_from_thunk (gcall *s, bool from_thunk_p)
{
if (from_thunk_p)
/* Return true if GIMPLE_CALL S is a jump from a thunk. */
-static inline bool
+inline bool
gimple_call_from_thunk_p (gcall *s)
{
return (s->subcode & GF_CALL_FROM_THUNK) != 0;
/* If FROM_NEW_OR_DELETE_P is true, mark GIMPLE_CALL S as being a call
to operator new or delete created from a new or delete expression. */
-static inline void
+inline void
gimple_call_set_from_new_or_delete (gcall *s, bool from_new_or_delete_p)
{
if (from_new_or_delete_p)
/* Return true if GIMPLE_CALL S is a call to operator new or delete from
from a new or delete expression. */
-static inline bool
+inline bool
gimple_call_from_new_or_delete (const gcall *s)
{
return (s->subcode & GF_CALL_FROM_NEW_OR_DELETE) != 0;
/* If PASS_ARG_PACK_P is true, GIMPLE_CALL S is a stdarg call that needs the
argument pack in its argument list. */
-static inline void
+inline void
gimple_call_set_va_arg_pack (gcall *s, bool pass_arg_pack_p)
{
if (pass_arg_pack_p)
/* Return true if GIMPLE_CALL S is a stdarg call that needs the
argument pack in its argument list. */
-static inline bool
+inline bool
gimple_call_va_arg_pack_p (const gcall *s)
{
return (s->subcode & GF_CALL_VA_ARG_PACK) != 0;
/* Return true if S is a noreturn call. */
-static inline bool
+inline bool
gimple_call_noreturn_p (const gcall *s)
{
return (gimple_call_flags (s) & ECF_NORETURN) != 0;
}
-static inline bool
+inline bool
gimple_call_noreturn_p (const gimple *s)
{
const gcall *gc = GIMPLE_CHECK2<const gcall *> (s);
/* If NOTHROW_P is true, GIMPLE_CALL S is a call that is known to not throw
even if the called function can throw in other cases. */
-static inline void
+inline void
gimple_call_set_nothrow (gcall *s, bool nothrow_p)
{
if (nothrow_p)
/* Return true if S is a nothrow call. */
-static inline bool
+inline bool
gimple_call_nothrow_p (gcall *s)
{
return (gimple_call_flags (s) & ECF_NOTHROW) != 0;
stack_save/stack_restore calls and hence can't lead to unbounded
stack growth even when they occur in loops. */
-static inline void
+inline void
gimple_call_set_alloca_for_var (gcall *s, bool for_var)
{
if (for_var)
/* Return true of S is a call to builtin_alloca emitted for VLA objects. */
-static inline bool
+inline bool
gimple_call_alloca_for_var_p (gcall *s)
{
return (s->subcode & GF_CALL_ALLOCA_FOR_VAR) != 0;
}
-static inline bool
+inline bool
gimple_call_alloca_for_var_p (gimple *s)
{
const gcall *gc = GIMPLE_CHECK2<gcall *> (s);
/* If BY_DESCRIPTOR_P is true, GIMPLE_CALL S is an indirect call for which
pointers to nested function are descriptors instead of trampolines. */
-static inline void
+inline void
gimple_call_set_by_descriptor (gcall *s, bool by_descriptor_p)
{
if (by_descriptor_p)
/* Return true if S is a by-descriptor call. */
-static inline bool
+inline bool
gimple_call_by_descriptor_p (gcall *s)
{
return (s->subcode & GF_CALL_BY_DESCRIPTOR) != 0;
/* Copy all the GF_CALL_* flags from ORIG_CALL to DEST_CALL. */
-static inline void
+inline void
gimple_call_copy_flags (gcall *dest_call, gcall *orig_call)
{
dest_call->subcode = orig_call->subcode;
/* Return a pointer to the points-to solution for the set of call-used
variables of the call CALL_STMT. */
-static inline struct pt_solution *
+inline struct pt_solution *
gimple_call_use_set (gcall *call_stmt)
{
return &call_stmt->call_used;
/* As above, but const. */
-static inline const pt_solution *
+inline const pt_solution *
gimple_call_use_set (const gcall *call_stmt)
{
return &call_stmt->call_used;
/* Return a pointer to the points-to solution for the set of call-used
variables of the call CALL_STMT. */
-static inline struct pt_solution *
+inline struct pt_solution *
gimple_call_clobber_set (gcall *call_stmt)
{
return &call_stmt->call_clobbered;
/* As above, but const. */
-static inline const pt_solution *
+inline const pt_solution *
gimple_call_clobber_set (const gcall *call_stmt)
{
return &call_stmt->call_clobbered;
/* Returns true if this is a GIMPLE_ASSIGN or a GIMPLE_CALL with a
non-NULL lhs. */
-static inline bool
+inline bool
gimple_has_lhs (const gimple *stmt)
{
if (is_gimple_assign (stmt))
/* Return the code of the predicate computed by conditional statement GS. */
-static inline enum tree_code
+inline enum tree_code
gimple_cond_code (const gcond *gs)
{
return (enum tree_code) gs->subcode;
}
-static inline enum tree_code
+inline enum tree_code
gimple_cond_code (const gimple *gs)
{
const gcond *gc = GIMPLE_CHECK2<const gcond *> (gs);
/* Set CODE to be the predicate code for the conditional statement GS. */
-static inline void
+inline void
gimple_cond_set_code (gcond *gs, enum tree_code code)
{
gs->subcode = code;
/* Return the LHS of the predicate computed by conditional statement GS. */
-static inline tree
+inline tree
gimple_cond_lhs (const gcond *gs)
{
return gs->op[0];
}
-static inline tree
+inline tree
gimple_cond_lhs (const gimple *gs)
{
const gcond *gc = GIMPLE_CHECK2<const gcond *> (gs);
/* Return the pointer to the LHS of the predicate computed by conditional
statement GS. */
-static inline tree *
+inline tree *
gimple_cond_lhs_ptr (gcond *gs)
{
return &gs->op[0];
/* Set LHS to be the LHS operand of the predicate computed by
conditional statement GS. */
-static inline void
+inline void
gimple_cond_set_lhs (gcond *gs, tree lhs)
{
gs->op[0] = lhs;
/* Return the RHS operand of the predicate computed by conditional GS. */
-static inline tree
+inline tree
gimple_cond_rhs (const gcond *gs)
{
return gs->op[1];
}
-static inline tree
+inline tree
gimple_cond_rhs (const gimple *gs)
{
const gcond *gc = GIMPLE_CHECK2<const gcond *> (gs);
/* Return the pointer to the RHS operand of the predicate computed by
conditional GS. */
-static inline tree *
+inline tree *
gimple_cond_rhs_ptr (gcond *gs)
{
return &gs->op[1];
/* Set RHS to be the RHS operand of the predicate computed by
conditional statement GS. */
-static inline void
+inline void
gimple_cond_set_rhs (gcond *gs, tree rhs)
{
gs->op[1] = rhs;
/* Return the label used by conditional statement GS when its
predicate evaluates to true. */
-static inline tree
+inline tree
gimple_cond_true_label (const gcond *gs)
{
return gs->op[2];
/* Set LABEL to be the label used by conditional statement GS when its
predicate evaluates to true. */
-static inline void
+inline void
gimple_cond_set_true_label (gcond *gs, tree label)
{
gs->op[2] = label;
/* Set LABEL to be the label used by conditional statement GS when its
predicate evaluates to false. */
-static inline void
+inline void
gimple_cond_set_false_label (gcond *gs, tree label)
{
gs->op[3] = label;
/* Return the label used by conditional statement GS when its
predicate evaluates to false. */
-static inline tree
+inline tree
gimple_cond_false_label (const gcond *gs)
{
return gs->op[3];
/* Set the conditional COND_STMT to be of the form 'if (1 == 0)'. */
-static inline void
+inline void
gimple_cond_make_false (gcond *gs)
{
gimple_cond_set_lhs (gs, boolean_false_node);
/* Set the conditional COND_STMT to be of the form 'if (1 == 1)'. */
-static inline void
+inline void
gimple_cond_make_true (gcond *gs)
{
gimple_cond_set_lhs (gs, boolean_true_node);
/* Check if conditional statemente GS is of the form 'if (1 == 1)',
'if (0 == 0)', 'if (1 != 0)' or 'if (0 != 1)' */
-static inline bool
+inline bool
gimple_cond_true_p (const gcond *gs)
{
tree lhs = gimple_cond_lhs (gs);
/* Check if conditional statement GS is of the form 'if (1 != 1)',
'if (0 != 0)', 'if (1 == 0)' or 'if (0 == 1)' */
-static inline bool
+inline bool
gimple_cond_false_p (const gcond *gs)
{
tree lhs = gimple_cond_lhs (gs);
/* Set the code, LHS and RHS of GIMPLE_COND STMT from CODE, LHS and RHS. */
-static inline void
+inline void
gimple_cond_set_condition (gcond *stmt, enum tree_code code, tree lhs,
tree rhs)
{
consistency. This is useful when the caller needs to deal with the
three kinds of computation that GIMPLE supports. */
-static inline enum tree_code
+inline enum tree_code
gimple_expr_code (const gimple *stmt)
{
if (const gassign *ass = dyn_cast<const gassign *> (stmt))
/* Return the LABEL_DECL node used by GIMPLE_LABEL statement GS. */
-static inline tree
+inline tree
gimple_label_label (const glabel *gs)
{
return gs->op[0];
/* Set LABEL to be the LABEL_DECL node used by GIMPLE_LABEL statement
GS. */
-static inline void
+inline void
gimple_label_set_label (glabel *gs, tree label)
{
gs->op[0] = label;
/* Return the destination of the unconditional jump GS. */
-static inline tree
+inline tree
gimple_goto_dest (const gimple *gs)
{
GIMPLE_CHECK (gs, GIMPLE_GOTO);
/* Set DEST to be the destination of the unconditonal jump GS. */
-static inline void
+inline void
gimple_goto_set_dest (ggoto *gs, tree dest)
{
gs->op[0] = dest;
/* Return the variables declared in the GIMPLE_BIND statement GS. */
-static inline tree
+inline tree
gimple_bind_vars (const gbind *bind_stmt)
{
return bind_stmt->vars;
/* Set VARS to be the set of variables declared in the GIMPLE_BIND
statement GS. */
-static inline void
+inline void
gimple_bind_set_vars (gbind *bind_stmt, tree vars)
{
bind_stmt->vars = vars;
/* Append VARS to the set of variables declared in the GIMPLE_BIND
statement GS. */
-static inline void
+inline void
gimple_bind_append_vars (gbind *bind_stmt, tree vars)
{
bind_stmt->vars = chainon (bind_stmt->vars, vars);
}
-static inline gimple_seq *
+inline gimple_seq *
gimple_bind_body_ptr (gbind *bind_stmt)
{
return &bind_stmt->body;
/* Return the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */
-static inline gimple_seq
+inline gimple_seq
gimple_bind_body (const gbind *gs)
{
return *gimple_bind_body_ptr (const_cast <gbind *> (gs));
/* Set SEQ to be the GIMPLE sequence contained in the GIMPLE_BIND
statement GS. */
-static inline void
+inline void
gimple_bind_set_body (gbind *bind_stmt, gimple_seq seq)
{
bind_stmt->body = seq;
/* Append a statement to the end of a GIMPLE_BIND's body. */
-static inline void
+inline void
gimple_bind_add_stmt (gbind *bind_stmt, gimple *stmt)
{
gimple_seq_add_stmt (&bind_stmt->body, stmt);
/* Append a sequence of statements to the end of a GIMPLE_BIND's body. */
-static inline void
+inline void
gimple_bind_add_seq (gbind *bind_stmt, gimple_seq seq)
{
gimple_seq_add_seq (&bind_stmt->body, seq);
/* Return the TREE_BLOCK node associated with GIMPLE_BIND statement
GS. This is analogous to the BIND_EXPR_BLOCK field in trees. */
-static inline tree
+inline tree
gimple_bind_block (const gbind *bind_stmt)
{
return bind_stmt->block;
/* Set BLOCK to be the TREE_BLOCK node associated with GIMPLE_BIND
statement GS. */
-static inline void
+inline void
gimple_bind_set_block (gbind *bind_stmt, tree block)
{
gcc_gimple_checking_assert (block == NULL_TREE
/* Return the number of input operands for GIMPLE_ASM ASM_STMT. */
-static inline unsigned
+inline unsigned
gimple_asm_ninputs (const gasm *asm_stmt)
{
return asm_stmt->ni;
/* Return the number of output operands for GIMPLE_ASM ASM_STMT. */
-static inline unsigned
+inline unsigned
gimple_asm_noutputs (const gasm *asm_stmt)
{
return asm_stmt->no;
/* Return the number of clobber operands for GIMPLE_ASM ASM_STMT. */
-static inline unsigned
+inline unsigned
gimple_asm_nclobbers (const gasm *asm_stmt)
{
return asm_stmt->nc;
/* Return the number of label operands for GIMPLE_ASM ASM_STMT. */
-static inline unsigned
+inline unsigned
gimple_asm_nlabels (const gasm *asm_stmt)
{
return asm_stmt->nl;
/* Return input operand INDEX of GIMPLE_ASM ASM_STMT. */
-static inline tree
+inline tree
gimple_asm_input_op (const gasm *asm_stmt, unsigned index)
{
gcc_gimple_checking_assert (index < asm_stmt->ni);
/* Set IN_OP to be input operand INDEX in GIMPLE_ASM ASM_STMT. */
-static inline void
+inline void
gimple_asm_set_input_op (gasm *asm_stmt, unsigned index, tree in_op)
{
gcc_gimple_checking_assert (index < asm_stmt->ni
/* Return output operand INDEX of GIMPLE_ASM ASM_STMT. */
-static inline tree
+inline tree
gimple_asm_output_op (const gasm *asm_stmt, unsigned index)
{
gcc_gimple_checking_assert (index < asm_stmt->no);
/* Set OUT_OP to be output operand INDEX in GIMPLE_ASM ASM_STMT. */
-static inline void
+inline void
gimple_asm_set_output_op (gasm *asm_stmt, unsigned index, tree out_op)
{
gcc_gimple_checking_assert (index < asm_stmt->no
/* Return clobber operand INDEX of GIMPLE_ASM ASM_STMT. */
-static inline tree
+inline tree
gimple_asm_clobber_op (const gasm *asm_stmt, unsigned index)
{
gcc_gimple_checking_assert (index < asm_stmt->nc);
/* Set CLOBBER_OP to be clobber operand INDEX in GIMPLE_ASM ASM_STMT. */
-static inline void
+inline void
gimple_asm_set_clobber_op (gasm *asm_stmt, unsigned index, tree clobber_op)
{
gcc_gimple_checking_assert (index < asm_stmt->nc
/* Return label operand INDEX of GIMPLE_ASM ASM_STMT. */
-static inline tree
+inline tree
gimple_asm_label_op (const gasm *asm_stmt, unsigned index)
{
gcc_gimple_checking_assert (index < asm_stmt->nl);
/* Set LABEL_OP to be label operand INDEX in GIMPLE_ASM ASM_STMT. */
-static inline void
+inline void
gimple_asm_set_label_op (gasm *asm_stmt, unsigned index, tree label_op)
{
gcc_gimple_checking_assert (index < asm_stmt->nl
/* Return the string representing the assembly instruction in
GIMPLE_ASM ASM_STMT. */
-static inline const char *
+inline const char *
gimple_asm_string (const gasm *asm_stmt)
{
return asm_stmt->string;
/* Return true if ASM_STMT is marked volatile. */
-static inline bool
+inline bool
gimple_asm_volatile_p (const gasm *asm_stmt)
{
return (asm_stmt->subcode & GF_ASM_VOLATILE) != 0;
/* If VOLATILE_P is true, mark asm statement ASM_STMT as volatile. */
-static inline void
+inline void
gimple_asm_set_volatile (gasm *asm_stmt, bool volatile_p)
{
if (volatile_p)
/* Return true if ASM_STMT is marked inline. */
-static inline bool
+inline bool
gimple_asm_inline_p (const gasm *asm_stmt)
{
return (asm_stmt->subcode & GF_ASM_INLINE) != 0;
/* If INLINE_P is true, mark asm statement ASM_STMT as inline. */
-static inline void
+inline void
gimple_asm_set_inline (gasm *asm_stmt, bool inline_p)
{
if (inline_p)
/* If INPUT_P is true, mark asm ASM_STMT as an ASM_INPUT. */
-static inline void
+inline void
gimple_asm_set_input (gasm *asm_stmt, bool input_p)
{
if (input_p)
/* Return true if asm ASM_STMT is an ASM_INPUT. */
-static inline bool
+inline bool
gimple_asm_input_p (const gasm *asm_stmt)
{
return (asm_stmt->subcode & GF_ASM_INPUT) != 0;
/* Return the types handled by GIMPLE_CATCH statement CATCH_STMT. */
-static inline tree
+inline tree
gimple_catch_types (const gcatch *catch_stmt)
{
return catch_stmt->types;
/* Return a pointer to the types handled by GIMPLE_CATCH statement CATCH_STMT. */
-static inline tree *
+inline tree *
gimple_catch_types_ptr (gcatch *catch_stmt)
{
return &catch_stmt->types;
/* Return a pointer to the GIMPLE sequence representing the body of
the handler of GIMPLE_CATCH statement CATCH_STMT. */
-static inline gimple_seq *
+inline gimple_seq *
gimple_catch_handler_ptr (gcatch *catch_stmt)
{
return &catch_stmt->handler;
/* Return the GIMPLE sequence representing the body of the handler of
GIMPLE_CATCH statement CATCH_STMT. */
-static inline gimple_seq
+inline gimple_seq
gimple_catch_handler (const gcatch *catch_stmt)
{
return *gimple_catch_handler_ptr (const_cast <gcatch *> (catch_stmt));
/* Set T to be the set of types handled by GIMPLE_CATCH CATCH_STMT. */
-static inline void
+inline void
gimple_catch_set_types (gcatch *catch_stmt, tree t)
{
catch_stmt->types = t;
/* Set HANDLER to be the body of GIMPLE_CATCH CATCH_STMT. */
-static inline void
+inline void
gimple_catch_set_handler (gcatch *catch_stmt, gimple_seq handler)
{
catch_stmt->handler = handler;
/* Return the types handled by GIMPLE_EH_FILTER statement GS. */
-static inline tree
+inline tree
gimple_eh_filter_types (const gimple *gs)
{
const geh_filter *eh_filter_stmt = as_a <const geh_filter *> (gs);
/* Return a pointer to the types handled by GIMPLE_EH_FILTER statement
GS. */
-static inline tree *
+inline tree *
gimple_eh_filter_types_ptr (gimple *gs)
{
geh_filter *eh_filter_stmt = as_a <geh_filter *> (gs);
/* Return a pointer to the sequence of statement to execute when
GIMPLE_EH_FILTER statement fails. */
-static inline gimple_seq *
+inline gimple_seq *
gimple_eh_filter_failure_ptr (gimple *gs)
{
geh_filter *eh_filter_stmt = as_a <geh_filter *> (gs);
/* Return the sequence of statement to execute when GIMPLE_EH_FILTER
statement fails. */
-static inline gimple_seq
+inline gimple_seq
gimple_eh_filter_failure (const gimple *gs)
{
return *gimple_eh_filter_failure_ptr (const_cast <gimple *> (gs));
/* Set TYPES to be the set of types handled by GIMPLE_EH_FILTER
EH_FILTER_STMT. */
-static inline void
+inline void
gimple_eh_filter_set_types (geh_filter *eh_filter_stmt, tree types)
{
eh_filter_stmt->types = types;
/* Set FAILURE to be the sequence of statements to execute on failure
for GIMPLE_EH_FILTER EH_FILTER_STMT. */
-static inline void
+inline void
gimple_eh_filter_set_failure (geh_filter *eh_filter_stmt,
gimple_seq failure)
{
/* Get the function decl to be called by the MUST_NOT_THROW region. */
-static inline tree
+inline tree
gimple_eh_must_not_throw_fndecl (const geh_mnt *eh_mnt_stmt)
{
return eh_mnt_stmt->fndecl;
/* Set the function decl to be called by GS to DECL. */
-static inline void
+inline void
gimple_eh_must_not_throw_set_fndecl (geh_mnt *eh_mnt_stmt,
tree decl)
{
/* GIMPLE_EH_ELSE accessors. */
-static inline gimple_seq *
+inline gimple_seq *
gimple_eh_else_n_body_ptr (geh_else *eh_else_stmt)
{
return &eh_else_stmt->n_body;
}
-static inline gimple_seq
+inline gimple_seq
gimple_eh_else_n_body (const geh_else *eh_else_stmt)
{
return *gimple_eh_else_n_body_ptr (const_cast <geh_else *> (eh_else_stmt));
}
-static inline gimple_seq *
+inline gimple_seq *
gimple_eh_else_e_body_ptr (geh_else *eh_else_stmt)
{
return &eh_else_stmt->e_body;
}
-static inline gimple_seq
+inline gimple_seq
gimple_eh_else_e_body (const geh_else *eh_else_stmt)
{
return *gimple_eh_else_e_body_ptr (const_cast <geh_else *> (eh_else_stmt));
}
-static inline void
+inline void
gimple_eh_else_set_n_body (geh_else *eh_else_stmt, gimple_seq seq)
{
eh_else_stmt->n_body = seq;
}
-static inline void
+inline void
gimple_eh_else_set_e_body (geh_else *eh_else_stmt, gimple_seq seq)
{
eh_else_stmt->e_body = seq;
/* Return the kind of try block represented by GIMPLE_TRY GS. This is
either GIMPLE_TRY_CATCH or GIMPLE_TRY_FINALLY. */
-static inline enum gimple_try_flags
+inline enum gimple_try_flags
gimple_try_kind (const gimple *gs)
{
GIMPLE_CHECK (gs, GIMPLE_TRY);
/* Set the kind of try block represented by GIMPLE_TRY GS. */
-static inline void
+inline void
gimple_try_set_kind (gtry *gs, enum gimple_try_flags kind)
{
gcc_gimple_checking_assert (kind == GIMPLE_TRY_CATCH
/* Return the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */
-static inline bool
+inline bool
gimple_try_catch_is_cleanup (const gimple *gs)
{
gcc_gimple_checking_assert (gimple_try_kind (gs) == GIMPLE_TRY_CATCH);
/* Return a pointer to the sequence of statements used as the
body for GIMPLE_TRY GS. */
-static inline gimple_seq *
+inline gimple_seq *
gimple_try_eval_ptr (gimple *gs)
{
gtry *try_stmt = as_a <gtry *> (gs);
/* Return the sequence of statements used as the body for GIMPLE_TRY GS. */
-static inline gimple_seq
+inline gimple_seq
gimple_try_eval (const gimple *gs)
{
return *gimple_try_eval_ptr (const_cast <gimple *> (gs));
/* Return a pointer to the sequence of statements used as the cleanup body for
GIMPLE_TRY GS. */
-static inline gimple_seq *
+inline gimple_seq *
gimple_try_cleanup_ptr (gimple *gs)
{
gtry *try_stmt = as_a <gtry *> (gs);
/* Return the sequence of statements used as the cleanup body for
GIMPLE_TRY GS. */
-static inline gimple_seq
+inline gimple_seq
gimple_try_cleanup (const gimple *gs)
{
return *gimple_try_cleanup_ptr (const_cast <gimple *> (gs));
/* Set the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */
-static inline void
+inline void
gimple_try_set_catch_is_cleanup (gtry *g, bool catch_is_cleanup)
{
gcc_gimple_checking_assert (gimple_try_kind (g) == GIMPLE_TRY_CATCH);
/* Set EVAL to be the sequence of statements to use as the body for
GIMPLE_TRY TRY_STMT. */
-static inline void
+inline void
gimple_try_set_eval (gtry *try_stmt, gimple_seq eval)
{
try_stmt->eval = eval;
/* Set CLEANUP to be the sequence of statements to use as the cleanup
body for GIMPLE_TRY TRY_STMT. */
-static inline void
+inline void
gimple_try_set_cleanup (gtry *try_stmt, gimple_seq cleanup)
{
try_stmt->cleanup = cleanup;
/* Return a pointer to the cleanup sequence for cleanup statement GS. */
-static inline gimple_seq *
+inline gimple_seq *
gimple_wce_cleanup_ptr (gimple *gs)
{
gimple_statement_wce *wce_stmt = as_a <gimple_statement_wce *> (gs);
/* Return the cleanup sequence for cleanup statement GS. */
-static inline gimple_seq
+inline gimple_seq
gimple_wce_cleanup (gimple *gs)
{
return *gimple_wce_cleanup_ptr (gs);
/* Set CLEANUP to be the cleanup sequence for GS. */
-static inline void
+inline void
gimple_wce_set_cleanup (gimple *gs, gimple_seq cleanup)
{
gimple_statement_wce *wce_stmt = as_a <gimple_statement_wce *> (gs);
/* Return the CLEANUP_EH_ONLY flag for a WCE tuple. */
-static inline bool
+inline bool
gimple_wce_cleanup_eh_only (const gimple *gs)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
/* Set the CLEANUP_EH_ONLY flag for a WCE tuple. */
-static inline void
+inline void
gimple_wce_set_cleanup_eh_only (gimple *gs, bool eh_only_p)
{
GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR);
/* Return the maximum number of arguments supported by GIMPLE_PHI GS. */
-static inline unsigned
+inline unsigned
gimple_phi_capacity (const gimple *gs)
{
const gphi *phi_stmt = as_a <const gphi *> (gs);
be exactly the number of incoming edges for the basic block holding
GS. */
-static inline unsigned
+inline unsigned
gimple_phi_num_args (const gimple *gs)
{
const gphi *phi_stmt = as_a <const gphi *> (gs);
/* Return the SSA name created by GIMPLE_PHI GS. */
-static inline tree
+inline tree
gimple_phi_result (const gphi *gs)
{
return gs->result;
}
-static inline tree
+inline tree
gimple_phi_result (const gimple *gs)
{
const gphi *phi_stmt = as_a <const gphi *> (gs);
/* Return a pointer to the SSA name created by GIMPLE_PHI GS. */
-static inline tree *
+inline tree *
gimple_phi_result_ptr (gphi *gs)
{
return &gs->result;
}
-static inline tree *
+inline tree *
gimple_phi_result_ptr (gimple *gs)
{
gphi *phi_stmt = as_a <gphi *> (gs);
/* Set RESULT to be the SSA name created by GIMPLE_PHI PHI. */
-static inline void
+inline void
gimple_phi_set_result (gphi *phi, tree result)
{
phi->result = result;
/* Return the PHI argument corresponding to incoming edge INDEX for
GIMPLE_PHI GS. */
-static inline struct phi_arg_d *
+inline struct phi_arg_d *
gimple_phi_arg (gphi *gs, unsigned index)
{
gcc_gimple_checking_assert (index < gs->nargs);
return &(gs->args[index]);
}
-static inline const phi_arg_d *
+inline const phi_arg_d *
gimple_phi_arg (const gphi *gs, unsigned index)
{
gcc_gimple_checking_assert (index < gs->nargs);
return &(gs->args[index]);
}
-static inline struct phi_arg_d *
+inline struct phi_arg_d *
gimple_phi_arg (gimple *gs, unsigned index)
{
gphi *phi_stmt = as_a <gphi *> (gs);
/* Set PHIARG to be the argument corresponding to incoming edge INDEX
for GIMPLE_PHI PHI. */
-static inline void
+inline void
gimple_phi_set_arg (gphi *phi, unsigned index, struct phi_arg_d * phiarg)
{
gcc_gimple_checking_assert (index < phi->nargs);
/* Return the PHI nodes for basic block BB, or NULL if there are no
PHI nodes. */
-static inline gimple_seq
+inline gimple_seq
phi_nodes (const_basic_block bb)
{
gcc_checking_assert (!(bb->flags & BB_RTL));
/* Return a pointer to the PHI nodes for basic block BB. */
-static inline gimple_seq *
+inline gimple_seq *
phi_nodes_ptr (basic_block bb)
{
gcc_checking_assert (!(bb->flags & BB_RTL));
/* Return the tree operand for argument I of PHI node GS. */
-static inline tree
+inline tree
gimple_phi_arg_def (const gphi *gs, size_t index)
{
return gimple_phi_arg (gs, index)->def;
}
-static inline tree
+inline tree
gimple_phi_arg_def (gimple *gs, size_t index)
{
return gimple_phi_arg (gs, index)->def;
/* Return a pointer to the tree operand for argument I of phi node PHI. */
-static inline tree *
+inline tree *
gimple_phi_arg_def_ptr (gphi *phi, size_t index)
{
return &gimple_phi_arg (phi, index)->def;
/* Return the edge associated with argument I of phi node PHI. */
-static inline edge
+inline edge
gimple_phi_arg_edge (const gphi *phi, size_t i)
{
return EDGE_PRED (gimple_bb (phi), i);
/* Return the source location of gimple argument I of phi node PHI. */
-static inline location_t
+inline location_t
gimple_phi_arg_location (const gphi *phi, size_t i)
{
return gimple_phi_arg (phi, i)->locus;
/* Return the source location of the argument on edge E of phi node PHI. */
-static inline location_t
+inline location_t
gimple_phi_arg_location_from_edge (gphi *phi, edge e)
{
return gimple_phi_arg (phi, e->dest_idx)->locus;
/* Set the source location of gimple argument I of phi node PHI to LOC. */
-static inline void
+inline void
gimple_phi_arg_set_location (gphi *phi, size_t i, location_t loc)
{
gimple_phi_arg (phi, i)->locus = loc;
/* Return address of source location of gimple argument I of phi node PHI. */
-static inline location_t *
+inline location_t *
gimple_phi_arg_location_ptr (gphi *phi, size_t i)
{
return &gimple_phi_arg (phi, i)->locus;
/* Return TRUE if argument I of phi node PHI has a location record. */
-static inline bool
+inline bool
gimple_phi_arg_has_location (const gphi *phi, size_t i)
{
return gimple_phi_arg_location (phi, i) != UNKNOWN_LOCATION;
/* Return the number of arguments that can be accessed by gimple_arg. */
-static inline unsigned
+inline unsigned
gimple_num_args (const gimple *gs)
{
if (auto phi = dyn_cast<const gphi *> (gs))
If it's a call, return function argument I.
If it's a PHI, return the value of PHI argument I. */
-static inline tree
+inline tree
gimple_arg (const gimple *gs, unsigned int i)
{
if (auto phi = dyn_cast<const gphi *> (gs))
/* Return a pointer to gimple_arg (GS, I). */
-static inline tree *
+inline tree *
gimple_arg_ptr (gimple *gs, unsigned int i)
{
if (auto phi = dyn_cast<gphi *> (gs))
/* Return the region number for GIMPLE_RESX RESX_STMT. */
-static inline int
+inline int
gimple_resx_region (const gresx *resx_stmt)
{
return resx_stmt->region;
/* Set REGION to be the region number for GIMPLE_RESX RESX_STMT. */
-static inline void
+inline void
gimple_resx_set_region (gresx *resx_stmt, int region)
{
resx_stmt->region = region;
/* Return the region number for GIMPLE_EH_DISPATCH EH_DISPATCH_STMT. */
-static inline int
+inline int
gimple_eh_dispatch_region (const geh_dispatch *eh_dispatch_stmt)
{
return eh_dispatch_stmt->region;
/* Set REGION to be the region number for GIMPLE_EH_DISPATCH
EH_DISPATCH_STMT. */
-static inline void
+inline void
gimple_eh_dispatch_set_region (geh_dispatch *eh_dispatch_stmt, int region)
{
eh_dispatch_stmt->region = region;
/* Return the number of labels associated with the switch statement GS. */
-static inline unsigned
+inline unsigned
gimple_switch_num_labels (const gswitch *gs)
{
unsigned num_ops;
/* Set NLABELS to be the number of labels for the switch statement GS. */
-static inline void
+inline void
gimple_switch_set_num_labels (gswitch *g, unsigned nlabels)
{
GIMPLE_CHECK (g, GIMPLE_SWITCH);
/* Return the index variable used by the switch statement GS. */
-static inline tree
+inline tree
gimple_switch_index (const gswitch *gs)
{
return gs->op[0];
/* Return a pointer to the index variable for the switch statement GS. */
-static inline tree *
+inline tree *
gimple_switch_index_ptr (gswitch *gs)
{
return &gs->op[0];
/* Set INDEX to be the index variable for switch statement GS. */
-static inline void
+inline void
gimple_switch_set_index (gswitch *gs, tree index)
{
gcc_gimple_checking_assert (SSA_VAR_P (index) || CONSTANT_CLASS_P (index));
/* Return the label numbered INDEX. The default label is 0, followed by any
labels in a switch statement. */
-static inline tree
+inline tree
gimple_switch_label (const gswitch *gs, unsigned index)
{
gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1);
/* Set the label number INDEX to LABEL. 0 is always the default label. */
-static inline void
+inline void
gimple_switch_set_label (gswitch *gs, unsigned index, tree label)
{
gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1
/* Return the default label for a switch statement. */
-static inline tree
+inline tree
gimple_switch_default_label (const gswitch *gs)
{
tree label = gimple_switch_label (gs, 0);
/* Set the default label for a switch statement. */
-static inline void
+inline void
gimple_switch_set_default_label (gswitch *gs, tree label)
{
gcc_checking_assert (!CASE_LOW (label) && !CASE_HIGH (label));
/* Return true if GS is a GIMPLE_DEBUG statement. */
-static inline bool
+inline bool
is_gimple_debug (const gimple *gs)
{
return gimple_code (gs) == GIMPLE_DEBUG;
/* Return the first nondebug statement in GIMPLE sequence S. */
-static inline gimple *
+inline gimple *
gimple_seq_first_nondebug_stmt (gimple_seq s)
{
gimple_seq_node n = gimple_seq_first (s);
/* Return the last nondebug statement in GIMPLE sequence S. */
-static inline gimple *
+inline gimple *
gimple_seq_last_nondebug_stmt (gimple_seq s)
{
gimple_seq_node n;
/* Return true if S is a GIMPLE_DEBUG BIND statement. */
-static inline bool
+inline bool
gimple_debug_bind_p (const gimple *s)
{
if (is_gimple_debug (s))
/* Return the variable bound in a GIMPLE_DEBUG bind statement. */
-static inline tree
+inline tree
gimple_debug_bind_get_var (const gimple *dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
/* Return the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
-static inline tree
+inline tree
gimple_debug_bind_get_value (const gimple *dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
/* Return a pointer to the value bound to the variable in a
GIMPLE_DEBUG bind statement. */
-static inline tree *
+inline tree *
gimple_debug_bind_get_value_ptr (gimple *dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
/* Set the variable bound in a GIMPLE_DEBUG bind statement. */
-static inline void
+inline void
gimple_debug_bind_set_var (gimple *dbg, tree var)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
/* Set the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
-static inline void
+inline void
gimple_debug_bind_set_value (gimple *dbg, tree value)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
/* Remove the value bound to the variable in a GIMPLE_DEBUG bind
statement. */
-static inline void
+inline void
gimple_debug_bind_reset_value (gimple *dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
/* Return true if the GIMPLE_DEBUG bind statement is bound to a
value. */
-static inline bool
+inline bool
gimple_debug_bind_has_value_p (gimple *dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
/* Return true if S is a GIMPLE_DEBUG SOURCE BIND statement. */
-static inline bool
+inline bool
gimple_debug_source_bind_p (const gimple *s)
{
if (is_gimple_debug (s))
/* Return the variable bound in a GIMPLE_DEBUG source bind statement. */
-static inline tree
+inline tree
gimple_debug_source_bind_get_var (const gimple *dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
/* Return the value bound to the variable in a GIMPLE_DEBUG source bind
statement. */
-static inline tree
+inline tree
gimple_debug_source_bind_get_value (const gimple *dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
/* Return a pointer to the value bound to the variable in a
GIMPLE_DEBUG source bind statement. */
-static inline tree *
+inline tree *
gimple_debug_source_bind_get_value_ptr (gimple *dbg)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
/* Set the variable bound in a GIMPLE_DEBUG source bind statement. */
-static inline void
+inline void
gimple_debug_source_bind_set_var (gimple *dbg, tree var)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
/* Set the value bound to the variable in a GIMPLE_DEBUG source bind
statement. */
-static inline void
+inline void
gimple_debug_source_bind_set_value (gimple *dbg, tree value)
{
GIMPLE_CHECK (dbg, GIMPLE_DEBUG);
/* Return true if S is a GIMPLE_DEBUG BEGIN_STMT statement. */
-static inline bool
+inline bool
gimple_debug_begin_stmt_p (const gimple *s)
{
if (is_gimple_debug (s))
/* Return true if S is a GIMPLE_DEBUG INLINE_ENTRY statement. */
-static inline bool
+inline bool
gimple_debug_inline_entry_p (const gimple *s)
{
if (is_gimple_debug (s))
/* Return true if S is a GIMPLE_DEBUG non-binding marker statement. */
-static inline bool
+inline bool
gimple_debug_nonbind_marker_p (const gimple *s)
{
if (is_gimple_debug (s))
/* Return the line number for EXPR, or return -1 if we have no line
number information for it. */
-static inline int
+inline int
get_lineno (const gimple *stmt)
{
location_t loc;
/* Return a pointer to the body for the OMP statement GS. */
-static inline gimple_seq *
+inline gimple_seq *
gimple_omp_body_ptr (gimple *gs)
{
return &static_cast <gimple_statement_omp *> (gs)->body;
/* Return the body for the OMP statement GS. */
-static inline gimple_seq
+inline gimple_seq
gimple_omp_body (const gimple *gs)
{
return *gimple_omp_body_ptr (const_cast <gimple *> (gs));
/* Set BODY to be the body for the OMP statement GS. */
-static inline void
+inline void
gimple_omp_set_body (gimple *gs, gimple_seq body)
{
static_cast <gimple_statement_omp *> (gs)->body = body;
/* Return the name associated with OMP_CRITICAL statement CRIT_STMT. */
-static inline tree
+inline tree
gimple_omp_critical_name (const gomp_critical *crit_stmt)
{
return crit_stmt->name;
/* Return a pointer to the name associated with OMP critical statement
CRIT_STMT. */
-static inline tree *
+inline tree *
gimple_omp_critical_name_ptr (gomp_critical *crit_stmt)
{
return &crit_stmt->name;
/* Set NAME to be the name associated with OMP critical statement
CRIT_STMT. */
-static inline void
+inline void
gimple_omp_critical_set_name (gomp_critical *crit_stmt, tree name)
{
crit_stmt->name = name;
/* Return the clauses associated with OMP_CRITICAL statement CRIT_STMT. */
-static inline tree
+inline tree
gimple_omp_critical_clauses (const gomp_critical *crit_stmt)
{
return crit_stmt->clauses;
/* Return a pointer to the clauses associated with OMP critical statement
CRIT_STMT. */
-static inline tree *
+inline tree *
gimple_omp_critical_clauses_ptr (gomp_critical *crit_stmt)
{
return &crit_stmt->clauses;
/* Set CLAUSES to be the clauses associated with OMP critical statement
CRIT_STMT. */
-static inline void
+inline void
gimple_omp_critical_set_clauses (gomp_critical *crit_stmt, tree clauses)
{
crit_stmt->clauses = clauses;
/* Return the clauses associated with OMP_ORDERED statement ORD_STMT. */
-static inline tree
+inline tree
gimple_omp_ordered_clauses (const gomp_ordered *ord_stmt)
{
return ord_stmt->clauses;
/* Return a pointer to the clauses associated with OMP ordered statement
ORD_STMT. */
-static inline tree *
+inline tree *
gimple_omp_ordered_clauses_ptr (gomp_ordered *ord_stmt)
{
return &ord_stmt->clauses;
/* Set CLAUSES to be the clauses associated with OMP ordered statement
ORD_STMT. */
-static inline void
+inline void
gimple_omp_ordered_set_clauses (gomp_ordered *ord_stmt, tree clauses)
{
ord_stmt->clauses = clauses;
/* Return the clauses associated with OMP_SCAN statement SCAN_STMT. */
-static inline tree
+inline tree
gimple_omp_scan_clauses (const gomp_scan *scan_stmt)
{
return scan_stmt->clauses;
/* Return a pointer to the clauses associated with OMP scan statement
ORD_STMT. */
-static inline tree *
+inline tree *
gimple_omp_scan_clauses_ptr (gomp_scan *scan_stmt)
{
return &scan_stmt->clauses;
/* Set CLAUSES to be the clauses associated with OMP scan statement
ORD_STMT. */
-static inline void
+inline void
gimple_omp_scan_set_clauses (gomp_scan *scan_stmt, tree clauses)
{
scan_stmt->clauses = clauses;
/* Return the clauses associated with OMP_TASKGROUP statement GS. */
-static inline tree
+inline tree
gimple_omp_taskgroup_clauses (const gimple *gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASKGROUP);
/* Return a pointer to the clauses associated with OMP taskgroup statement
GS. */
-static inline tree *
+inline tree *
gimple_omp_taskgroup_clauses_ptr (gimple *gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASKGROUP);
/* Set CLAUSES to be the clauses associated with OMP taskgroup statement
GS. */
-static inline void
+inline void
gimple_omp_taskgroup_set_clauses (gimple *gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_TASKGROUP);
/* Return the clauses associated with OMP_MASKED statement GS. */
-static inline tree
+inline tree
gimple_omp_masked_clauses (const gimple *gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_MASKED);
/* Return a pointer to the clauses associated with OMP masked statement
GS. */
-static inline tree *
+inline tree *
gimple_omp_masked_clauses_ptr (gimple *gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_MASKED);
/* Set CLAUSES to be the clauses associated with OMP masked statement
GS. */
-static inline void
+inline void
gimple_omp_masked_set_clauses (gimple *gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_MASKED);
/* Return the clauses associated with OMP_SCOPE statement GS. */
-static inline tree
+inline tree
gimple_omp_scope_clauses (const gimple *gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SCOPE);
/* Return a pointer to the clauses associated with OMP scope statement
GS. */
-static inline tree *
+inline tree *
gimple_omp_scope_clauses_ptr (gimple *gs)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SCOPE);
/* Set CLAUSES to be the clauses associated with OMP scope statement
GS. */
-static inline void
+inline void
gimple_omp_scope_set_clauses (gimple *gs, tree clauses)
{
GIMPLE_CHECK (gs, GIMPLE_OMP_SCOPE);
/* Return the kind of the OMP_FOR statemement G. */
-static inline int
+inline int
gimple_omp_for_kind (const gimple *g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_FOR);
/* Set the kind of the OMP_FOR statement G. */
-static inline void
+inline void
gimple_omp_for_set_kind (gomp_for *g, int kind)
{
g->subcode = (g->subcode & ~GF_OMP_FOR_KIND_MASK)
/* Return true if OMP_FOR statement G has the
GF_OMP_FOR_COMBINED flag set. */
-static inline bool
+inline bool
gimple_omp_for_combined_p (const gimple *g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_FOR);
/* Set the GF_OMP_FOR_COMBINED field in the OMP_FOR statement G depending on
the boolean value of COMBINED_P. */
-static inline void
+inline void
gimple_omp_for_set_combined_p (gomp_for *g, bool combined_p)
{
if (combined_p)
/* Return true if the OMP_FOR statement G has the
GF_OMP_FOR_COMBINED_INTO flag set. */
-static inline bool
+inline bool
gimple_omp_for_combined_into_p (const gimple *g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_FOR);
/* Set the GF_OMP_FOR_COMBINED_INTO field in the OMP_FOR statement G depending
on the boolean value of COMBINED_P. */
-static inline void
+inline void
gimple_omp_for_set_combined_into_p (gomp_for *g, bool combined_p)
{
if (combined_p)
/* Return the clauses associated with the OMP_FOR statement GS. */
-static inline tree
+inline tree
gimple_omp_for_clauses (const gimple *gs)
{
const gomp_for *omp_for_stmt = as_a <const gomp_for *> (gs);
/* Return a pointer to the clauses associated with the OMP_FOR statement
GS. */
-static inline tree *
+inline tree *
gimple_omp_for_clauses_ptr (gimple *gs)
{
gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
/* Set CLAUSES to be the list of clauses associated with the OMP_FOR statement
GS. */
-static inline void
+inline void
gimple_omp_for_set_clauses (gimple *gs, tree clauses)
{
gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
/* Get the collapse count of the OMP_FOR statement GS. */
-static inline size_t
+inline size_t
gimple_omp_for_collapse (const gimple *gs)
{
const gomp_for *omp_for_stmt = as_a <const gomp_for *> (gs);
/* Return the condition code associated with the OMP_FOR statement GS. */
-static inline enum tree_code
+inline enum tree_code
gimple_omp_for_cond (const gimple *gs, size_t i)
{
const gomp_for *omp_for_stmt = as_a <const gomp_for *> (gs);
/* Set COND to be the condition code for the OMP_FOR statement GS. */
-static inline void
+inline void
gimple_omp_for_set_cond (gimple *gs, size_t i, enum tree_code cond)
{
gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
/* Return the index variable for the OMP_FOR statement GS. */
-static inline tree
+inline tree
gimple_omp_for_index (const gimple *gs, size_t i)
{
const gomp_for *omp_for_stmt = as_a <const gomp_for *> (gs);
/* Return a pointer to the index variable for the OMP_FOR statement GS. */
-static inline tree *
+inline tree *
gimple_omp_for_index_ptr (gimple *gs, size_t i)
{
gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
/* Set INDEX to be the index variable for the OMP_FOR statement GS. */
-static inline void
+inline void
gimple_omp_for_set_index (gimple *gs, size_t i, tree index)
{
gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
/* Return the initial value for the OMP_FOR statement GS. */
-static inline tree
+inline tree
gimple_omp_for_initial (const gimple *gs, size_t i)
{
const gomp_for *omp_for_stmt = as_a <const gomp_for *> (gs);
/* Return a pointer to the initial value for the OMP_FOR statement GS. */
-static inline tree *
+inline tree *
gimple_omp_for_initial_ptr (gimple *gs, size_t i)
{
gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
/* Set INITIAL to be the initial value for the OMP_FOR statement GS. */
-static inline void
+inline void
gimple_omp_for_set_initial (gimple *gs, size_t i, tree initial)
{
gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
/* Return the final value for the OMP_FOR statement GS. */
-static inline tree
+inline tree
gimple_omp_for_final (const gimple *gs, size_t i)
{
const gomp_for *omp_for_stmt = as_a <const gomp_for *> (gs);
/* Return a pointer to the final value for the OMP_FOR statement GS. */
-static inline tree *
+inline tree *
gimple_omp_for_final_ptr (gimple *gs, size_t i)
{
gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
/* Set FINAL to be the final value for the OMP_FOR statement GS. */
-static inline void
+inline void
gimple_omp_for_set_final (gimple *gs, size_t i, tree final)
{
gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
/* Return the increment value for the OMP_FOR statement GS. */
-static inline tree
+inline tree
gimple_omp_for_incr (const gimple *gs, size_t i)
{
const gomp_for *omp_for_stmt = as_a <const gomp_for *> (gs);
/* Return a pointer to the increment value for the OMP_FOR statement GS. */
-static inline tree *
+inline tree *
gimple_omp_for_incr_ptr (gimple *gs, size_t i)
{
gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
/* Set INCR to be the increment value for the OMP_FOR statement GS. */
-static inline void
+inline void
gimple_omp_for_set_incr (gimple *gs, size_t i, tree incr)
{
gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
/* Return a pointer to the sequence of statements to execute before the OMP_FOR
statement GS starts. */
-static inline gimple_seq *
+inline gimple_seq *
gimple_omp_for_pre_body_ptr (gimple *gs)
{
gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
/* Return the sequence of statements to execute before the OMP_FOR
statement GS starts. */
-static inline gimple_seq
+inline gimple_seq
gimple_omp_for_pre_body (const gimple *gs)
{
return *gimple_omp_for_pre_body_ptr (const_cast <gimple *> (gs));
/* Set PRE_BODY to be the sequence of statements to execute before the
OMP_FOR statement GS starts. */
-static inline void
+inline void
gimple_omp_for_set_pre_body (gimple *gs, gimple_seq pre_body)
{
gomp_for *omp_for_stmt = as_a <gomp_for *> (gs);
/* Return the clauses associated with OMP_PARALLEL GS. */
-static inline tree
+inline tree
gimple_omp_parallel_clauses (const gimple *gs)
{
const gomp_parallel *omp_parallel_stmt = as_a <const gomp_parallel *> (gs);
/* Return a pointer to the clauses associated with OMP_PARALLEL_STMT. */
-static inline tree *
+inline tree *
gimple_omp_parallel_clauses_ptr (gomp_parallel *omp_parallel_stmt)
{
return &omp_parallel_stmt->clauses;
/* Set CLAUSES to be the list of clauses associated with OMP_PARALLEL_STMT. */
-static inline void
+inline void
gimple_omp_parallel_set_clauses (gomp_parallel *omp_parallel_stmt,
tree clauses)
{
/* Return the child function used to hold the body of OMP_PARALLEL_STMT. */
-static inline tree
+inline tree
gimple_omp_parallel_child_fn (const gomp_parallel *omp_parallel_stmt)
{
return omp_parallel_stmt->child_fn;
/* Return a pointer to the child function used to hold the body of
OMP_PARALLEL_STMT. */
-static inline tree *
+inline tree *
gimple_omp_parallel_child_fn_ptr (gomp_parallel *omp_parallel_stmt)
{
return &omp_parallel_stmt->child_fn;
/* Set CHILD_FN to be the child function for OMP_PARALLEL_STMT. */
-static inline void
+inline void
gimple_omp_parallel_set_child_fn (gomp_parallel *omp_parallel_stmt,
tree child_fn)
{
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_PARALLEL_STMT. */
-static inline tree
+inline tree
gimple_omp_parallel_data_arg (const gomp_parallel *omp_parallel_stmt)
{
return omp_parallel_stmt->data_arg;
/* Return a pointer to the data argument for OMP_PARALLEL_STMT. */
-static inline tree *
+inline tree *
gimple_omp_parallel_data_arg_ptr (gomp_parallel *omp_parallel_stmt)
{
return &omp_parallel_stmt->data_arg;
/* Set DATA_ARG to be the data argument for OMP_PARALLEL_STMT. */
-static inline void
+inline void
gimple_omp_parallel_set_data_arg (gomp_parallel *omp_parallel_stmt,
tree data_arg)
{
/* Return the clauses associated with OMP_TASK GS. */
-static inline tree
+inline tree
gimple_omp_task_clauses (const gimple *gs)
{
const gomp_task *omp_task_stmt = as_a <const gomp_task *> (gs);
/* Return a pointer to the clauses associated with OMP_TASK GS. */
-static inline tree *
+inline tree *
gimple_omp_task_clauses_ptr (gimple *gs)
{
gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
/* Set CLAUSES to be the list of clauses associated with OMP_TASK
GS. */
-static inline void
+inline void
gimple_omp_task_set_clauses (gimple *gs, tree clauses)
{
gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
/* Return true if OMP task statement G has the
GF_OMP_TASK_TASKLOOP flag set. */
-static inline bool
+inline bool
gimple_omp_task_taskloop_p (const gimple *g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_TASK);
/* Set the GF_OMP_TASK_TASKLOOP field in G depending on the boolean
value of TASKLOOP_P. */
-static inline void
+inline void
gimple_omp_task_set_taskloop_p (gimple *g, bool taskloop_p)
{
GIMPLE_CHECK (g, GIMPLE_OMP_TASK);
/* Return true if OMP task statement G has the
GF_OMP_TASK_TASKWAIT flag set. */
-static inline bool
+inline bool
gimple_omp_task_taskwait_p (const gimple *g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_TASK);
/* Set the GF_OMP_TASK_TASKWAIT field in G depending on the boolean
value of TASKWAIT_P. */
-static inline void
+inline void
gimple_omp_task_set_taskwait_p (gimple *g, bool taskwait_p)
{
GIMPLE_CHECK (g, GIMPLE_OMP_TASK);
/* Return the child function used to hold the body of OMP_TASK GS. */
-static inline tree
+inline tree
gimple_omp_task_child_fn (const gimple *gs)
{
const gomp_task *omp_task_stmt = as_a <const gomp_task *> (gs);
/* Return a pointer to the child function used to hold the body of
OMP_TASK GS. */
-static inline tree *
+inline tree *
gimple_omp_task_child_fn_ptr (gimple *gs)
{
gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
/* Set CHILD_FN to be the child function for OMP_TASK GS. */
-static inline void
+inline void
gimple_omp_task_set_child_fn (gimple *gs, tree child_fn)
{
gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TASK GS. */
-static inline tree
+inline tree
gimple_omp_task_data_arg (const gimple *gs)
{
const gomp_task *omp_task_stmt = as_a <const gomp_task *> (gs);
/* Return a pointer to the data argument for OMP_TASK GS. */
-static inline tree *
+inline tree *
gimple_omp_task_data_arg_ptr (gimple *gs)
{
gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
/* Set DATA_ARG to be the data argument for OMP_TASK GS. */
-static inline void
+inline void
gimple_omp_task_set_data_arg (gimple *gs, tree data_arg)
{
gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
/* Return the clauses associated with OMP_TASK GS. */
-static inline tree
+inline tree
gimple_omp_taskreg_clauses (const gimple *gs)
{
const gimple_statement_omp_taskreg *omp_taskreg_stmt
/* Return a pointer to the clauses associated with OMP_TASK GS. */
-static inline tree *
+inline tree *
gimple_omp_taskreg_clauses_ptr (gimple *gs)
{
gimple_statement_omp_taskreg *omp_taskreg_stmt
/* Set CLAUSES to be the list of clauses associated with OMP_TASK
GS. */
-static inline void
+inline void
gimple_omp_taskreg_set_clauses (gimple *gs, tree clauses)
{
gimple_statement_omp_taskreg *omp_taskreg_stmt
/* Return the child function used to hold the body of OMP_TASK GS. */
-static inline tree
+inline tree
gimple_omp_taskreg_child_fn (const gimple *gs)
{
const gimple_statement_omp_taskreg *omp_taskreg_stmt
/* Return a pointer to the child function used to hold the body of
OMP_TASK GS. */
-static inline tree *
+inline tree *
gimple_omp_taskreg_child_fn_ptr (gimple *gs)
{
gimple_statement_omp_taskreg *omp_taskreg_stmt
/* Set CHILD_FN to be the child function for OMP_TASK GS. */
-static inline void
+inline void
gimple_omp_taskreg_set_child_fn (gimple *gs, tree child_fn)
{
gimple_statement_omp_taskreg *omp_taskreg_stmt
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TASK GS. */
-static inline tree
+inline tree
gimple_omp_taskreg_data_arg (const gimple *gs)
{
const gimple_statement_omp_taskreg *omp_taskreg_stmt
/* Return a pointer to the data argument for OMP_TASK GS. */
-static inline tree *
+inline tree *
gimple_omp_taskreg_data_arg_ptr (gimple *gs)
{
gimple_statement_omp_taskreg *omp_taskreg_stmt
/* Set DATA_ARG to be the data argument for OMP_TASK GS. */
-static inline void
+inline void
gimple_omp_taskreg_set_data_arg (gimple *gs, tree data_arg)
{
gimple_statement_omp_taskreg *omp_taskreg_stmt
/* Return the copy function used to hold the body of OMP_TASK GS. */
-static inline tree
+inline tree
gimple_omp_task_copy_fn (const gimple *gs)
{
const gomp_task *omp_task_stmt = as_a <const gomp_task *> (gs);
/* Return a pointer to the copy function used to hold the body of
OMP_TASK GS. */
-static inline tree *
+inline tree *
gimple_omp_task_copy_fn_ptr (gimple *gs)
{
gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
/* Set CHILD_FN to be the copy function for OMP_TASK GS. */
-static inline void
+inline void
gimple_omp_task_set_copy_fn (gimple *gs, tree copy_fn)
{
gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
/* Return size of the data block in bytes in OMP_TASK GS. */
-static inline tree
+inline tree
gimple_omp_task_arg_size (const gimple *gs)
{
const gomp_task *omp_task_stmt = as_a <const gomp_task *> (gs);
/* Return a pointer to the data block size for OMP_TASK GS. */
-static inline tree *
+inline tree *
gimple_omp_task_arg_size_ptr (gimple *gs)
{
gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
/* Set ARG_SIZE to be the data block size for OMP_TASK GS. */
-static inline void
+inline void
gimple_omp_task_set_arg_size (gimple *gs, tree arg_size)
{
gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
/* Return align of the data block in bytes in OMP_TASK GS. */
-static inline tree
+inline tree
gimple_omp_task_arg_align (const gimple *gs)
{
const gomp_task *omp_task_stmt = as_a <const gomp_task *> (gs);
/* Return a pointer to the data block align for OMP_TASK GS. */
-static inline tree *
+inline tree *
gimple_omp_task_arg_align_ptr (gimple *gs)
{
gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
/* Set ARG_SIZE to be the data block align for OMP_TASK GS. */
-static inline void
+inline void
gimple_omp_task_set_arg_align (gimple *gs, tree arg_align)
{
gomp_task *omp_task_stmt = as_a <gomp_task *> (gs);
/* Return the clauses associated with OMP_SINGLE GS. */
-static inline tree
+inline tree
gimple_omp_single_clauses (const gimple *gs)
{
const gomp_single *omp_single_stmt = as_a <const gomp_single *> (gs);
/* Return a pointer to the clauses associated with OMP_SINGLE GS. */
-static inline tree *
+inline tree *
gimple_omp_single_clauses_ptr (gimple *gs)
{
gomp_single *omp_single_stmt = as_a <gomp_single *> (gs);
/* Set CLAUSES to be the clauses associated with OMP_SINGLE_STMT. */
-static inline void
+inline void
gimple_omp_single_set_clauses (gomp_single *omp_single_stmt, tree clauses)
{
omp_single_stmt->clauses = clauses;
/* Return the clauses associated with OMP_TARGET GS. */
-static inline tree
+inline tree
gimple_omp_target_clauses (const gimple *gs)
{
const gomp_target *omp_target_stmt = as_a <const gomp_target *> (gs);
/* Return a pointer to the clauses associated with OMP_TARGET GS. */
-static inline tree *
+inline tree *
gimple_omp_target_clauses_ptr (gimple *gs)
{
gomp_target *omp_target_stmt = as_a <gomp_target *> (gs);
/* Set CLAUSES to be the clauses associated with OMP_TARGET_STMT. */
-static inline void
+inline void
gimple_omp_target_set_clauses (gomp_target *omp_target_stmt,
tree clauses)
{
/* Return the kind of the OMP_TARGET G. */
-static inline int
+inline int
gimple_omp_target_kind (const gimple *g)
{
GIMPLE_CHECK (g, GIMPLE_OMP_TARGET);
/* Set the kind of the OMP_TARGET G. */
-static inline void
+inline void
gimple_omp_target_set_kind (gomp_target *g, int kind)
{
g->subcode = (g->subcode & ~GF_OMP_TARGET_KIND_MASK)
/* Return the child function used to hold the body of OMP_TARGET_STMT. */
-static inline tree
+inline tree
gimple_omp_target_child_fn (const gomp_target *omp_target_stmt)
{
return omp_target_stmt->child_fn;
/* Return a pointer to the child function used to hold the body of
OMP_TARGET_STMT. */
-static inline tree *
+inline tree *
gimple_omp_target_child_fn_ptr (gomp_target *omp_target_stmt)
{
return &omp_target_stmt->child_fn;
/* Set CHILD_FN to be the child function for OMP_TARGET_STMT. */
-static inline void
+inline void
gimple_omp_target_set_child_fn (gomp_target *omp_target_stmt,
tree child_fn)
{
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TARGET_STMT. */
-static inline tree
+inline tree
gimple_omp_target_data_arg (const gomp_target *omp_target_stmt)
{
return omp_target_stmt->data_arg;
/* Return a pointer to the data argument for OMP_TARGET GS. */
-static inline tree *
+inline tree *
gimple_omp_target_data_arg_ptr (gomp_target *omp_target_stmt)
{
return &omp_target_stmt->data_arg;
/* Set DATA_ARG to be the data argument for OMP_TARGET_STMT. */
-static inline void
+inline void
gimple_omp_target_set_data_arg (gomp_target *omp_target_stmt,
tree data_arg)
{
/* Return the clauses associated with OMP_TEAMS GS. */
-static inline tree
+inline tree
gimple_omp_teams_clauses (const gimple *gs)
{
const gomp_teams *omp_teams_stmt = as_a <const gomp_teams *> (gs);
/* Return a pointer to the clauses associated with OMP_TEAMS GS. */
-static inline tree *
+inline tree *
gimple_omp_teams_clauses_ptr (gimple *gs)
{
gomp_teams *omp_teams_stmt = as_a <gomp_teams *> (gs);
/* Set CLAUSES to be the clauses associated with OMP_TEAMS_STMT. */
-static inline void
+inline void
gimple_omp_teams_set_clauses (gomp_teams *omp_teams_stmt, tree clauses)
{
omp_teams_stmt->clauses = clauses;
/* Return the child function used to hold the body of OMP_TEAMS_STMT. */
-static inline tree
+inline tree
gimple_omp_teams_child_fn (const gomp_teams *omp_teams_stmt)
{
return omp_teams_stmt->child_fn;
/* Return a pointer to the child function used to hold the body of
OMP_TEAMS_STMT. */
-static inline tree *
+inline tree *
gimple_omp_teams_child_fn_ptr (gomp_teams *omp_teams_stmt)
{
return &omp_teams_stmt->child_fn;
/* Set CHILD_FN to be the child function for OMP_TEAMS_STMT. */
-static inline void
+inline void
gimple_omp_teams_set_child_fn (gomp_teams *omp_teams_stmt, tree child_fn)
{
omp_teams_stmt->child_fn = child_fn;
/* Return the artificial argument used to send variables and values
from the parent to the children threads in OMP_TEAMS_STMT. */
-static inline tree
+inline tree
gimple_omp_teams_data_arg (const gomp_teams *omp_teams_stmt)
{
return omp_teams_stmt->data_arg;
/* Return a pointer to the data argument for OMP_TEAMS_STMT. */
-static inline tree *
+inline tree *
gimple_omp_teams_data_arg_ptr (gomp_teams *omp_teams_stmt)
{
return &omp_teams_stmt->data_arg;
/* Set DATA_ARG to be the data argument for OMP_TEAMS_STMT. */
-static inline void
+inline void
gimple_omp_teams_set_data_arg (gomp_teams *omp_teams_stmt, tree data_arg)
{
omp_teams_stmt->data_arg = data_arg;
/* Return the host flag of an OMP_TEAMS_STMT. */
-static inline bool
+inline bool
gimple_omp_teams_host (const gomp_teams *omp_teams_stmt)
{
return (gimple_omp_subcode (omp_teams_stmt) & GF_OMP_TEAMS_HOST) != 0;
/* Set host flag of an OMP_TEAMS_STMT to VALUE. */
-static inline void
+inline void
gimple_omp_teams_set_host (gomp_teams *omp_teams_stmt, bool value)
{
if (value)
/* Return the clauses associated with OMP_SECTIONS GS. */
-static inline tree
+inline tree
gimple_omp_sections_clauses (const gimple *gs)
{
const gomp_sections *omp_sections_stmt = as_a <const gomp_sections *> (gs);
/* Return a pointer to the clauses associated with OMP_SECTIONS GS. */
-static inline tree *
+inline tree *
gimple_omp_sections_clauses_ptr (gimple *gs)
{
gomp_sections *omp_sections_stmt = as_a <gomp_sections *> (gs);
/* Set CLAUSES to be the set of clauses associated with OMP_SECTIONS
GS. */
-static inline void
+inline void
gimple_omp_sections_set_clauses (gimple *gs, tree clauses)
{
gomp_sections *omp_sections_stmt = as_a <gomp_sections *> (gs);
/* Return the control variable associated with the GIMPLE_OMP_SECTIONS
in GS. */
-static inline tree
+inline tree
gimple_omp_sections_control (const gimple *gs)
{
const gomp_sections *omp_sections_stmt = as_a <const gomp_sections *> (gs);
/* Return a pointer to the clauses associated with the GIMPLE_OMP_SECTIONS
GS. */
-static inline tree *
+inline tree *
gimple_omp_sections_control_ptr (gimple *gs)
{
gomp_sections *omp_sections_stmt = as_a <gomp_sections *> (gs);
/* Set CONTROL to be the set of clauses associated with the
GIMPLE_OMP_SECTIONS in GS. */
-static inline void
+inline void
gimple_omp_sections_set_control (gimple *gs, tree control)
{
gomp_sections *omp_sections_stmt = as_a <gomp_sections *> (gs);
/* Set the value being stored in an atomic store. */
-static inline void
+inline void
gimple_omp_atomic_store_set_val (gomp_atomic_store *store_stmt, tree val)
{
store_stmt->val = val;
/* Return the value being stored in an atomic store. */
-static inline tree
+inline tree
gimple_omp_atomic_store_val (const gomp_atomic_store *store_stmt)
{
return store_stmt->val;
/* Return a pointer to the value being stored in an atomic store. */
-static inline tree *
+inline tree *
gimple_omp_atomic_store_val_ptr (gomp_atomic_store *store_stmt)
{
return &store_stmt->val;
/* Set the LHS of an atomic load. */
-static inline void
+inline void
gimple_omp_atomic_load_set_lhs (gomp_atomic_load *load_stmt, tree lhs)
{
load_stmt->lhs = lhs;
/* Get the LHS of an atomic load. */
-static inline tree
+inline tree
gimple_omp_atomic_load_lhs (const gomp_atomic_load *load_stmt)
{
return load_stmt->lhs;
/* Return a pointer to the LHS of an atomic load. */
-static inline tree *
+inline tree *
gimple_omp_atomic_load_lhs_ptr (gomp_atomic_load *load_stmt)
{
return &load_stmt->lhs;
/* Set the RHS of an atomic load. */
-static inline void
+inline void
gimple_omp_atomic_load_set_rhs (gomp_atomic_load *load_stmt, tree rhs)
{
load_stmt->rhs = rhs;
/* Get the RHS of an atomic load. */
-static inline tree
+inline tree
gimple_omp_atomic_load_rhs (const gomp_atomic_load *load_stmt)
{
return load_stmt->rhs;
/* Return a pointer to the RHS of an atomic load. */
-static inline tree *
+inline tree *
gimple_omp_atomic_load_rhs_ptr (gomp_atomic_load *load_stmt)
{
return &load_stmt->rhs;
/* Get the definition of the control variable in a GIMPLE_OMP_CONTINUE. */
-static inline tree
+inline tree
gimple_omp_continue_control_def (const gomp_continue *cont_stmt)
{
return cont_stmt->control_def;
/* The same as above, but return the address. */
-static inline tree *
+inline tree *
gimple_omp_continue_control_def_ptr (gomp_continue *cont_stmt)
{
return &cont_stmt->control_def;
/* Set the definition of the control variable in a GIMPLE_OMP_CONTINUE. */
-static inline void
+inline void
gimple_omp_continue_set_control_def (gomp_continue *cont_stmt, tree def)
{
cont_stmt->control_def = def;
/* Get the use of the control variable in a GIMPLE_OMP_CONTINUE. */
-static inline tree
+inline tree
gimple_omp_continue_control_use (const gomp_continue *cont_stmt)
{
return cont_stmt->control_use;
/* The same as above, but return the address. */
-static inline tree *
+inline tree *
gimple_omp_continue_control_use_ptr (gomp_continue *cont_stmt)
{
return &cont_stmt->control_use;
/* Set the use of the control variable in a GIMPLE_OMP_CONTINUE. */
-static inline void
+inline void
gimple_omp_continue_set_control_use (gomp_continue *cont_stmt, tree use)
{
cont_stmt->control_use = use;
/* Return the guard associated with the GIMPLE_ASSUME statement GS. */
-static inline tree
+inline tree
gimple_assume_guard (const gimple *gs)
{
const gimple_statement_assume *assume_stmt
/* Set the guard associated with the GIMPLE_ASSUME statement GS. */
-static inline void
+inline void
gimple_assume_set_guard (gimple *gs, tree guard)
{
gimple_statement_assume *assume_stmt = as_a <gimple_statement_assume *> (gs);
assume_stmt->guard = guard;
}
-static inline tree *
+inline tree *
gimple_assume_guard_ptr (gimple *gs)
{
gimple_statement_assume *assume_stmt = as_a <gimple_statement_assume *> (gs);
/* Return the address of the GIMPLE sequence contained in the GIMPLE_ASSUME
statement GS. */
-static inline gimple_seq *
+inline gimple_seq *
gimple_assume_body_ptr (gimple *gs)
{
gimple_statement_assume *assume_stmt = as_a <gimple_statement_assume *> (gs);
/* Return the GIMPLE sequence contained in the GIMPLE_ASSUME statement GS. */
-static inline gimple_seq
+inline gimple_seq
gimple_assume_body (const gimple *gs)
{
const gimple_statement_assume *assume_stmt
/* Return a pointer to the body for the GIMPLE_TRANSACTION statement
TRANSACTION_STMT. */
-static inline gimple_seq *
+inline gimple_seq *
gimple_transaction_body_ptr (gtransaction *transaction_stmt)
{
return &transaction_stmt->body;
/* Return the body for the GIMPLE_TRANSACTION statement TRANSACTION_STMT. */
-static inline gimple_seq
+inline gimple_seq
gimple_transaction_body (const gtransaction *transaction_stmt)
{
return transaction_stmt->body;
/* Return the label associated with a GIMPLE_TRANSACTION. */
-static inline tree
+inline tree
gimple_transaction_label_norm (const gtransaction *transaction_stmt)
{
return transaction_stmt->label_norm;
}
-static inline tree *
+inline tree *
gimple_transaction_label_norm_ptr (gtransaction *transaction_stmt)
{
return &transaction_stmt->label_norm;
}
-static inline tree
+inline tree
gimple_transaction_label_uninst (const gtransaction *transaction_stmt)
{
return transaction_stmt->label_uninst;
}
-static inline tree *
+inline tree *
gimple_transaction_label_uninst_ptr (gtransaction *transaction_stmt)
{
return &transaction_stmt->label_uninst;
}
-static inline tree
+inline tree
gimple_transaction_label_over (const gtransaction *transaction_stmt)
{
return transaction_stmt->label_over;
}
-static inline tree *
+inline tree *
gimple_transaction_label_over_ptr (gtransaction *transaction_stmt)
{
return &transaction_stmt->label_over;
/* Return the subcode associated with a GIMPLE_TRANSACTION. */
-static inline unsigned int
+inline unsigned int
gimple_transaction_subcode (const gtransaction *transaction_stmt)
{
return transaction_stmt->subcode;
/* Set BODY to be the body for the GIMPLE_TRANSACTION statement
TRANSACTION_STMT. */
-static inline void
+inline void
gimple_transaction_set_body (gtransaction *transaction_stmt,
gimple_seq body)
{
/* Set the label associated with a GIMPLE_TRANSACTION. */
-static inline void
+inline void
gimple_transaction_set_label_norm (gtransaction *transaction_stmt, tree label)
{
transaction_stmt->label_norm = label;
}
-static inline void
+inline void
gimple_transaction_set_label_uninst (gtransaction *transaction_stmt, tree label)
{
transaction_stmt->label_uninst = label;
}
-static inline void
+inline void
gimple_transaction_set_label_over (gtransaction *transaction_stmt, tree label)
{
transaction_stmt->label_over = label;
/* Set the subcode associated with a GIMPLE_TRANSACTION. */
-static inline void
+inline void
gimple_transaction_set_subcode (gtransaction *transaction_stmt,
unsigned int subcode)
{
/* Return a pointer to the return value for GIMPLE_RETURN GS. */
-static inline tree *
+inline tree *
gimple_return_retval_ptr (greturn *gs)
{
return &gs->op[0];
/* Return the return value for GIMPLE_RETURN GS. */
-static inline tree
+inline tree
gimple_return_retval (const greturn *gs)
{
return gs->op[0];
/* Set RETVAL to be the return value for GIMPLE_RETURN GS. */
-static inline void
+inline void
gimple_return_set_retval (greturn *gs, tree retval)
{
gs->op[0] = retval;
case GIMPLE_OMP_ATOMIC_STORE: \
case GIMPLE_OMP_CONTINUE
-static inline bool
+inline bool
is_gimple_omp (const gimple *stmt)
{
switch (gimple_code (stmt))
/* Return true if the OMP gimple statement STMT is any of the OpenACC types
specifically. */
-static inline bool
+inline bool
is_gimple_omp_oacc (const gimple *stmt)
{
gcc_assert (is_gimple_omp (stmt));
/* Return true if the OMP gimple statement STMT is offloaded. */
-static inline bool
+inline bool
is_gimple_omp_offloaded (const gimple *stmt)
{
gcc_assert (is_gimple_omp (stmt));
/* Returns TRUE if statement G is a GIMPLE_NOP. */
-static inline bool
+inline bool
gimple_nop_p (const gimple *g)
{
return gimple_code (g) == GIMPLE_NOP;
/* Return true if GS is a GIMPLE_RESX. */
-static inline bool
+inline bool
is_gimple_resx (const gimple *gs)
{
return gimple_code (gs) == GIMPLE_RESX;
extern uint64_t gimple_alloc_sizes[];
/* Return the allocation kind for a given stmt CODE. */
-static inline enum gimple_alloc_kind
+inline enum gimple_alloc_kind
gimple_alloc_kind (enum gimple_code code)
{
switch (code)
/* Return true if a location should not be emitted for this statement
by annotate_all_with_location. */
-static inline bool
+inline bool
gimple_do_not_emit_location_p (gimple *g)
{
return gimple_plf (g, GF_PLF_1);
/* Mark statement G so a location will not be emitted by
annotate_one_with_location. */
-static inline void
+inline void
gimple_set_do_not_emit_location (gimple *g)
{
/* The PLF flags are initialized to 0 when a new tuple is created,
typedef unsigned graphite_dim_t;
-static inline graphite_dim_t scop_nb_params (scop_p);
+inline graphite_dim_t scop_nb_params (scop_p);
/* A data reference can write or read some memory or we
just know it may write some memory. */
void debug_pdr (poly_dr_p);
void print_pdr (FILE *, poly_dr_p);
-static inline bool
+inline bool
pdr_read_p (poly_dr_p pdr)
{
return PDR_TYPE (pdr) == PDR_READ;
/* Returns true when PDR is a "write". */
-static inline bool
+inline bool
pdr_write_p (poly_dr_p pdr)
{
return PDR_TYPE (pdr) == PDR_WRITE;
/* Returns true when PDR is a "may write". */
-static inline bool
+inline bool
pdr_may_write_p (poly_dr_p pdr)
{
return PDR_TYPE (pdr) == PDR_MAY_WRITE;
/* The basic block of the PBB. */
-static inline basic_block
+inline basic_block
pbb_bb (poly_bb_p pbb)
{
return GBB_BB (PBB_BLACK_BOX (pbb));
}
-static inline int
+inline int
pbb_index (poly_bb_p pbb)
{
return pbb_bb (pbb)->index;
/* The loop of the PBB. */
-static inline loop_p
+inline loop_p
pbb_loop (poly_bb_p pbb)
{
return gbb_loop (PBB_BLACK_BOX (pbb));
/* The scop that contains the PDR. */
-static inline scop_p
+inline scop_p
pdr_scop (poly_dr_p pdr)
{
return PBB_SCOP (PDR_PBB (pdr));
/* Set black box of PBB to BLACKBOX. */
-static inline void
+inline void
pbb_set_black_box (poly_bb_p pbb, gimple_poly_bb_p black_box)
{
pbb->black_box = black_box;
/* Set the region of SCOP to REGION. */
-static inline void
+inline void
scop_set_region (scop_p scop, sese_info_p region)
{
scop->scop_info = region;
/* Returns the number of parameters for SCOP. */
-static inline graphite_dim_t
+inline graphite_dim_t
scop_nb_params (scop_p scop)
{
return scop->nb_params;
/* Set the number of params of SCOP to NB_PARAMS. */
-static inline void
+inline void
scop_set_nb_params (scop_p scop, graphite_dim_t nb_params)
{
scop->nb_params = nb_params;
#define CLEAR_HARD_REG_SET(TO) ((TO) = HARD_CONST (0))
#define SET_HARD_REG_SET(TO) ((TO) = ~ HARD_CONST (0))
-static inline bool
+inline bool
hard_reg_set_subset_p (const_hard_reg_set x, const_hard_reg_set y)
{
return (x & ~y) == HARD_CONST (0);
}
-static inline bool
+inline bool
hard_reg_set_intersect_p (const_hard_reg_set x, const_hard_reg_set y)
{
return (x & y) != HARD_CONST (0);
}
-static inline bool
+inline bool
hard_reg_set_empty_p (const_hard_reg_set x)
{
return x == HARD_CONST (0);
set.elts[i] = -1;
}
-static inline bool
+inline bool
hard_reg_set_subset_p (const_hard_reg_set x, const_hard_reg_set y)
{
HARD_REG_ELT_TYPE bad = 0;
return bad == 0;
}
-static inline bool
+inline bool
hard_reg_set_intersect_p (const_hard_reg_set x, const_hard_reg_set y)
{
HARD_REG_ELT_TYPE good = 0;
return good != 0;
}
-static inline bool
+inline bool
hard_reg_set_empty_p (const_hard_reg_set x)
{
HARD_REG_ELT_TYPE bad = 0;
/* The implementation of the iterator functions is fully analogous to
the bitmap iterators. */
-static inline void
+inline void
hard_reg_set_iter_init (hard_reg_set_iterator *iter, const_hard_reg_set set,
unsigned min, unsigned *regno)
{
*regno = min;
}
-static inline bool
+inline bool
hard_reg_set_iter_set (hard_reg_set_iterator *iter, unsigned *regno)
{
while (1)
}
}
-static inline void
+inline void
hard_reg_set_iter_next (hard_reg_set_iterator *iter, unsigned *regno)
{
iter->bits >>= 1;
/* ggc marking routines. */
template<typename K, typename V, typename H>
-static inline void
+inline void
gt_ggc_mx (hash_map<K, V, H> *h)
{
gt_ggc_mx (&h->m_table);
}
template<typename K, typename V, typename H>
-static inline void
+inline void
gt_pch_nx (hash_map<K, V, H> *h)
{
gt_pch_nx (&h->m_table);
}
template<typename K, typename V, typename H>
-static inline void
+inline void
gt_cleare_cache (hash_map<K, V, H> *h)
{
if (h)
}
template<typename K, typename V, typename H>
-static inline void
+inline void
gt_pch_nx (hash_map<K, V, H> *h, gt_pointer_operator op, void *cookie)
{
op (&h->m_table.m_entries, NULL, cookie);
/* ggc marking routines. */
template<typename K, typename H>
-static inline void
+inline void
gt_ggc_mx (hash_set<K, false, H> *h)
{
gt_ggc_mx (&h->m_table);
}
template<typename K, typename H>
-static inline void
+inline void
gt_pch_nx (hash_set<K, false, H> *h)
{
gt_pch_nx (&h->m_table);
}
template<typename K, typename H>
-static inline void
+inline void
gt_pch_nx (hash_set<K, false, H> *h, gt_pointer_operator op, void *cookie)
{
op (&h->m_table.m_entries, NULL, cookie);
/* ggc walking routines. */
template<typename E>
-static inline void
+inline void
gt_ggc_mx (hash_table<E> *h)
{
typedef hash_table<E> table;
}
template<typename D>
-static inline void
+inline void
hashtab_entry_note_pointers (void *obj, void *h, gt_pointer_operator op,
void *cookie)
{
}
template<typename D>
-static inline void
+inline void
gt_pch_nx (hash_table<D> *h, gt_pointer_operator op, void *cookie)
{
op (&h->m_entries, NULL, cookie);
/* Return X with all but the lowest bit masked off. */
-static inline unsigned HOST_WIDE_INT
+inline unsigned HOST_WIDE_INT
least_bit_hwi (unsigned HOST_WIDE_INT x)
{
return (x & -x);
/* True if X is zero or a power of two. */
-static inline bool
+inline bool
pow2_or_zerop (unsigned HOST_WIDE_INT x)
{
return least_bit_hwi (x) == x;
/* True if X is a power of two. */
-static inline bool
+inline bool
pow2p_hwi (unsigned HOST_WIDE_INT x)
{
return x && pow2_or_zerop (x);
#else /* GCC_VERSION >= 3004 */
/* For convenience, define 0 -> word_size. */
-static inline int
+inline int
clz_hwi (unsigned HOST_WIDE_INT x)
{
if (x == 0)
# endif
}
-static inline int
+inline int
ctz_hwi (unsigned HOST_WIDE_INT x)
{
if (x == 0)
# endif
}
-static inline int
+inline int
ffs_hwi (unsigned HOST_WIDE_INT x)
{
# if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
# endif
}
-static inline int
+inline int
popcount_hwi (unsigned HOST_WIDE_INT x)
{
# if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
# endif
}
-static inline int
+inline int
floor_log2 (unsigned HOST_WIDE_INT x)
{
return HOST_BITS_PER_WIDE_INT - 1 - clz_hwi (x);
}
-static inline int
+inline int
ceil_log2 (unsigned HOST_WIDE_INT x)
{
return x == 0 ? 0 : floor_log2 (x - 1) + 1;
}
-static inline int
+inline int
exact_log2 (unsigned HOST_WIDE_INT x)
{
return pow2p_hwi (x) ? ctz_hwi (x) : -1;
/* Like ctz_hwi, except 0 when x == 0. */
-static inline int
+inline int
ctz_or_zero (unsigned HOST_WIDE_INT x)
{
return ffs_hwi (x) - 1;
/* Sign extend SRC starting from PREC. */
-static inline HOST_WIDE_INT
+inline HOST_WIDE_INT
sext_hwi (HOST_WIDE_INT src, unsigned int prec)
{
if (prec == HOST_BITS_PER_WIDE_INT)
}
/* Zero extend SRC starting from PREC. */
-static inline unsigned HOST_WIDE_INT
+inline unsigned HOST_WIDE_INT
zext_hwi (unsigned HOST_WIDE_INT src, unsigned int prec)
{
if (prec == HOST_BITS_PER_WIDE_INT)
that is part of a macro replacement-list defined in a system
header, but expanded in a non-system file. */
-static inline int
+inline int
in_system_header_at (location_t loc)
{
return linemap_location_in_system_header_p (line_table, loc);
/* Return true if LOCATION is the locus of a token that
comes from a macro expansion, false otherwise. */
-static inline bool
+inline bool
from_macro_expansion_at (location_t loc)
{
return linemap_location_from_macro_expansion_p (line_table, loc);
a macro definition, false otherwise. This differs from from_macro_expansion_at
in its treatment of macro arguments, for which this returns false. */
-static inline bool
+inline bool
from_macro_definition_at (location_t loc)
{
return linemap_location_from_macro_definition_p (line_table, loc);
}
-static inline location_t
+inline location_t
get_pure_location (location_t loc)
{
return get_pure_location (line_table, loc);
/* Get the start of any range encoded within location LOC. */
-static inline location_t
+inline location_t
get_start (location_t loc)
{
return get_range_from_loc (line_table, loc).m_start;
/* Get the endpoint of any range encoded within location LOC. */
-static inline location_t
+inline location_t
get_finish (location_t loc)
{
return get_range_from_loc (line_table, loc).m_finish;
#define INSN_ADDRESSES_SET_P() (insn_addresses_.exists ())
#define INSN_ADDRESSES_SIZE() (insn_addresses_.length ())
-static inline void
+inline void
insn_addresses_new (rtx_insn *insn, int insn_addr)
{
unsigned insn_uid = INSN_UID ((insn));
extern const char *const internal_fn_name_array[];
-static inline const char *
+inline const char *
internal_fn_name (enum internal_fn fn)
{
return internal_fn_name_array[(int) fn];
extern const int internal_fn_flags_array[];
-static inline int
+inline int
internal_fn_flags (enum internal_fn fn)
{
return internal_fn_flags_array[(int) fn];
extern GTY(()) const_tree internal_fn_fnspec_array[IFN_LAST + 1];
-static inline const_tree
+inline const_tree
internal_fn_fnspec (enum internal_fn fn)
{
return internal_fn_fnspec_array[(int) fn];
/* Return true if EDGE is a cross module call. */
-static inline bool
+inline bool
cross_module_call_p (struct cgraph_edge *edge)
{
/* Here we do not want to walk to alias target becuase ICF may create
/* Logs a MESSAGE to dump_file if exists and returns false. FUNC is name
of function and LINE is location in the source file. */
-static inline bool
+inline bool
return_false_with_message_1 (const char *message, const char *filename,
const char *func, unsigned int line)
{
/* Logs return value if RESULT is false. FUNC is name of function and LINE
is location in the source file. */
-static inline bool
+inline bool
return_with_result (bool result, const char *filename,
const char *func, unsigned int line)
{
/* Verbose logging function logging statements S1 and S2 of a CODE.
FUNC is name of function and LINE is location in the source file. */
-static inline bool
+inline bool
return_different_stmts_1 (gimple *s1, gimple *s2, const char *code,
const char *func, unsigned int line)
{
/* Return estimated size of the inline sequence of EDGE. */
-static inline int
+inline int
estimate_edge_size (struct cgraph_edge *edge)
{
edge_growth_cache_entry *entry;
/* Return lower bound on estimated callee growth after inlining EDGE. */
-static inline int
+inline int
estimate_min_edge_growth (struct cgraph_edge *edge)
{
ipa_call_summary *s = ipa_call_summaries->get (edge);
/* Return estimated callee growth after inlining EDGE. */
-static inline int
+inline int
estimate_edge_growth (struct cgraph_edge *edge)
{
ipa_call_summary *s = ipa_call_summaries->get (edge);
/* Return estimated callee runtime increase after inlining
EDGE. */
-static inline sreal
+inline sreal
estimate_edge_time (struct cgraph_edge *edge, sreal *nonspec_time = NULL)
{
edge_growth_cache_entry *entry;
/* Return estimated callee runtime increase after inlining
EDGE. */
-static inline ipa_hints
+inline ipa_hints
estimate_edge_hints (struct cgraph_edge *edge)
{
edge_growth_cache_entry *entry;
MODREF_FLAGS are flags determined by analysis of function body while
FLAGS are flags known otherwise (i.e. by fnspec, pure/const attributes
etc.) */
-static inline int
+inline int
interposable_eaf_flags (int modref_flags, int flags)
{
/* If parameter was previously unused, we know it is only read
/* Return the constant stored in a constant jump functin JFUNC. */
-static inline tree
+inline tree
ipa_get_jf_constant (struct ipa_jump_func *jfunc)
{
gcc_checking_assert (jfunc->type == IPA_JF_CONST);
return jfunc->value.constant.value;
}
-static inline struct ipa_cst_ref_desc *
+inline struct ipa_cst_ref_desc *
ipa_get_jf_constant_rdesc (struct ipa_jump_func *jfunc)
{
gcc_checking_assert (jfunc->type == IPA_JF_CONST);
/* Return the operand of a pass through jmp function JFUNC. */
-static inline tree
+inline tree
ipa_get_jf_pass_through_operand (struct ipa_jump_func *jfunc)
{
gcc_checking_assert (jfunc->type == IPA_JF_PASS_THROUGH);
/* Return the number of the caller's formal parameter that a pass through jump
function JFUNC refers to. */
-static inline int
+inline int
ipa_get_jf_pass_through_formal_id (struct ipa_jump_func *jfunc)
{
gcc_checking_assert (jfunc->type == IPA_JF_PASS_THROUGH);
/* Return operation of a pass through jump function JFUNC. */
-static inline enum tree_code
+inline enum tree_code
ipa_get_jf_pass_through_operation (struct ipa_jump_func *jfunc)
{
gcc_checking_assert (jfunc->type == IPA_JF_PASS_THROUGH);
/* Return the agg_preserved flag of a pass through jump function JFUNC. */
-static inline bool
+inline bool
ipa_get_jf_pass_through_agg_preserved (struct ipa_jump_func *jfunc)
{
gcc_checking_assert (jfunc->type == IPA_JF_PASS_THROUGH);
/* Return true if pass through jump function JFUNC preserves type
information. */
-static inline bool
+inline bool
ipa_get_jf_pass_through_type_preserved (struct ipa_jump_func *jfunc)
{
gcc_checking_assert (jfunc->type == IPA_JF_PASS_THROUGH);
/* Return the offset of an ancestor jump function JFUNC. */
-static inline HOST_WIDE_INT
+inline HOST_WIDE_INT
ipa_get_jf_ancestor_offset (struct ipa_jump_func *jfunc)
{
gcc_checking_assert (jfunc->type == IPA_JF_ANCESTOR);
/* Return the number of the caller's formal parameter that an ancestor jump
function JFUNC refers to. */
-static inline int
+inline int
ipa_get_jf_ancestor_formal_id (struct ipa_jump_func *jfunc)
{
gcc_checking_assert (jfunc->type == IPA_JF_ANCESTOR);
/* Return the agg_preserved flag of an ancestor jump function JFUNC. */
-static inline bool
+inline bool
ipa_get_jf_ancestor_agg_preserved (struct ipa_jump_func *jfunc)
{
gcc_checking_assert (jfunc->type == IPA_JF_ANCESTOR);
/* Return true if ancestor jump function JFUNC presrves type information. */
-static inline bool
+inline bool
ipa_get_jf_ancestor_type_preserved (struct ipa_jump_func *jfunc)
{
gcc_checking_assert (jfunc->type == IPA_JF_ANCESTOR);
parameter for non-NULLness unless it does not matter because the offset is
zero anyway. */
-static inline bool
+inline bool
ipa_get_jf_ancestor_keep_null (struct ipa_jump_func *jfunc)
{
gcc_checking_assert (jfunc->type == IPA_JF_ANCESTOR);
/* Return the number of formal parameters. */
-static inline int
+inline int
ipa_get_param_count (class ipa_node_params *info)
{
return vec_safe_length (info->descriptors);
/* Return the parameter declaration in DESCRIPTORS at index I and assert it is
indeed a PARM_DECL. */
-static inline tree
+inline tree
ipa_get_param (const vec<ipa_param_descriptor, va_gc> &descriptors, int i)
{
tree t = descriptors[i].decl_or_type;
using ipa_initialize_node_params. This function should not be called in
WPA. */
-static inline tree
+inline tree
ipa_get_param (class ipa_node_params *info, int i)
{
gcc_checking_assert (info->descriptors);
/* Return the type of Ith formal parameter of the function corresponding
to INFO if it is known or NULL if not. */
-static inline tree
+inline tree
ipa_get_type (class ipa_node_params *info, int i)
{
if (vec_safe_length (info->descriptors) <= (unsigned) i)
/* Return the move cost of Ith formal parameter of the function corresponding
to INFO. */
-static inline int
+inline int
ipa_get_param_move_cost (class ipa_node_params *info, int i)
{
gcc_checking_assert (info->descriptors);
/* Set the used flag corresponding to the Ith formal parameter of the function
associated with INFO to VAL. */
-static inline void
+inline void
ipa_set_param_used (class ipa_node_params *info, int i, bool val)
{
gcc_checking_assert (info->descriptors);
/* Set the used_by_ipa_predicates flag corresponding to the Ith formal
parameter of the function associated with INFO to VAL. */
-static inline void
+inline void
ipa_set_param_used_by_ipa_predicates (class ipa_node_params *info, int i, bool val)
{
gcc_checking_assert (info->descriptors);
/* Set the used_by_indirect_call flag corresponding to the Ith formal
parameter of the function associated with INFO to VAL. */
-static inline void
+inline void
ipa_set_param_used_by_indirect_call (class ipa_node_params *info, int i, bool val)
{
gcc_checking_assert (info->descriptors);
/* Set the .used_by_polymorphic_call flag corresponding to the Ith formal
parameter of the function associated with INFO to VAL. */
-static inline void
+inline void
ipa_set_param_used_by_polymorphic_call (class ipa_node_params *info, int i, bool val)
{
gcc_checking_assert (info->descriptors);
/* Return how many uses described by ipa-prop a parameter has or
IPA_UNDESCRIBED_USE if there is a use that is not described by these
structures. */
-static inline int
+inline int
ipa_get_controlled_uses (class ipa_node_params *info, int i)
{
/* FIXME: introducing speculation causes out of bounds access here. */
/* Set the controlled counter of a given parameter. */
-static inline void
+inline void
ipa_set_controlled_uses (class ipa_node_params *info, int i, int val)
{
gcc_checking_assert (info->descriptors);
/* Assuming a parameter does not have IPA_UNDESCRIBED_USE controlled uses,
return flag which indicates it has been dereferenced but only in a load. */
-static inline int
+inline int
ipa_get_param_load_dereferenced (class ipa_node_params *info, int i)
{
gcc_assert (ipa_get_controlled_uses (info, i) != IPA_UNDESCRIBED_USE);
/* Set the load_dereferenced flag of a given parameter. */
-static inline void
+inline void
ipa_set_param_load_dereferenced (class ipa_node_params *info, int i, bool val)
{
gcc_checking_assert (info->descriptors);
/* Return the used flag corresponding to the Ith formal parameter of the
function associated with INFO. */
-static inline bool
+inline bool
ipa_is_param_used (class ipa_node_params *info, int i)
{
gcc_checking_assert (info->descriptors);
/* Return the used_by_ipa_predicates flag corresponding to the Ith formal
parameter of the function associated with INFO. */
-static inline bool
+inline bool
ipa_is_param_used_by_ipa_predicates (class ipa_node_params *info, int i)
{
gcc_checking_assert (info->descriptors);
/* Return the used_by_indirect_call flag corresponding to the Ith formal
parameter of the function associated with INFO. */
-static inline bool
+inline bool
ipa_is_param_used_by_indirect_call (class ipa_node_params *info, int i)
{
gcc_checking_assert (info->descriptors);
/* Return the used_by_polymorphic_call flag corresponding to the Ith formal
parameter of the function associated with INFO. */
-static inline bool
+inline bool
ipa_is_param_used_by_polymorphic_call (class ipa_node_params *info, int i)
{
gcc_checking_assert (info->descriptors);
/* Return the number of actual arguments. */
-static inline int
+inline int
ipa_get_cs_argument_count (class ipa_edge_args *args)
{
return vec_safe_length (args->jump_functions);
there is no setter function as jump functions are all set up in
ipa_compute_jump_functions. */
-static inline struct ipa_jump_func *
+inline struct ipa_jump_func *
ipa_get_ith_jump_func (class ipa_edge_args *args, int i)
{
return &(*args->jump_functions)[i];
/* Returns a pointer to the polymorphic call context for the ith argument.
NULL if contexts are not computed. */
-static inline class ipa_polymorphic_call_context *
+inline class ipa_polymorphic_call_context *
ipa_get_ith_polymorhic_call_context (class ipa_edge_args *args, int i)
{
if (!args->polymorphic_call_contexts)
/* This function ensures the array of node param infos is big enough to
accommodate a structure for all nodes and reallocates it if not. */
-static inline void
+inline void
ipa_check_create_node_params (void)
{
if (!ipa_node_params_sum)
of this function is that debug dumping function can check info availability
without causing allocations. */
-static inline bool
+inline bool
ipa_edge_args_info_available_for_edge_p (struct cgraph_edge *edge)
{
return ipa_edge_args_sum->exists (edge);
}
-static inline ipcp_transformation *
+inline ipcp_transformation *
ipcp_get_transformation_summary (cgraph_node *node)
{
if (ipcp_transformation_sum == NULL)
extern ira_emit_data_t ira_allocno_emit_data;
/* Abbreviation for frequent emit data access. */
-static inline rtx
+inline rtx
allocno_emit_reg (ira_allocno_t a)
{
return ALLOCNO_EMIT_DATA (a)->reg;
/* Initialize the iterator I for bit vector VEC containing minimal and
maximal values MIN and MAX. */
-static inline void
+inline void
minmax_set_iter_init (minmax_set_iterator *i, IRA_INT_TYPE *vec, int min,
int max)
{
/* Return TRUE if we have more allocnos to visit, in which case *N is
set to the number of the element to be visited. Otherwise, return
FALSE. */
-static inline bool
+inline bool
minmax_set_iter_cond (minmax_set_iterator *i, int *n)
{
/* Skip words that are zeros. */
}
/* Advance to the next element in the set. */
-static inline void
+inline void
minmax_set_iter_next (minmax_set_iterator *i)
{
i->word >>= 1;
\f
/* Return true if equivalence of pseudo REGNO is not a lvalue. */
-static inline bool
+inline bool
ira_equiv_no_lvalue_p (int regno)
{
if (regno >= ira_reg_equiv_len)
\f
/* Initialize register costs for MODE if necessary. */
-static inline void
+inline void
ira_init_register_move_cost_if_necessary (machine_mode mode)
{
if (ira_register_move_cost[mode] == NULL)
};
/* Initialize the iterator I. */
-static inline void
+inline void
ira_allocno_iter_init (ira_allocno_iterator *i)
{
i->n = 0;
/* Return TRUE if we have more allocnos to visit, in which case *A is
set to the allocno to be visited. Otherwise, return FALSE. */
-static inline bool
+inline bool
ira_allocno_iter_cond (ira_allocno_iterator *i, ira_allocno_t *a)
{
int n;
};
/* Initialize the iterator I. */
-static inline void
+inline void
ira_object_iter_init (ira_object_iterator *i)
{
i->n = 0;
/* Return TRUE if we have more objects to visit, in which case *OBJ is
set to the object to be visited. Otherwise, return FALSE. */
-static inline bool
+inline bool
ira_object_iter_cond (ira_object_iterator *i, ira_object_t *obj)
{
int n;
};
/* Initialize the iterator I. */
-static inline void
+inline void
ira_allocno_object_iter_init (ira_allocno_object_iterator *i)
{
i->n = 0;
/* Return TRUE if we have more objects to visit in allocno A, in which
case *O is set to the object to be visited. Otherwise, return
FALSE. */
-static inline bool
+inline bool
ira_allocno_object_iter_cond (ira_allocno_object_iterator *i, ira_allocno_t a,
ira_object_t *o)
{
};
/* Initialize the iterator I. */
-static inline void
+inline void
ira_pref_iter_init (ira_pref_iterator *i)
{
i->n = 0;
/* Return TRUE if we have more prefs to visit, in which case *PREF is
set to the pref to be visited. Otherwise, return FALSE. */
-static inline bool
+inline bool
ira_pref_iter_cond (ira_pref_iterator *i, ira_pref_t *pref)
{
int n;
};
/* Initialize the iterator I. */
-static inline void
+inline void
ira_copy_iter_init (ira_copy_iterator *i)
{
i->n = 0;
/* Return TRUE if we have more copies to visit, in which case *CP is
set to the copy to be visited. Otherwise, return FALSE. */
-static inline bool
+inline bool
ira_copy_iter_cond (ira_copy_iterator *i, ira_copy_t *cp)
{
int n;
};
/* Initialize the iterator I with ALLOCNO conflicts. */
-static inline void
+inline void
ira_object_conflict_iter_init (ira_object_conflict_iterator *i,
ira_object_t obj)
{
/* Return TRUE if we have more conflicting allocnos to visit, in which
case *A is set to the allocno to be visited. Otherwise, return
FALSE. */
-static inline bool
+inline bool
ira_object_conflict_iter_cond (ira_object_conflict_iterator *i,
ira_object_t *pobj)
{
/* The function returns TRUE if at least one hard register from ones
starting with HARD_REGNO and containing value of MODE are in set
HARD_REGSET. */
-static inline bool
+inline bool
ira_hard_reg_set_intersection_p (int hard_regno, machine_mode mode,
HARD_REG_SET hard_regset)
{
}
/* Return number of hard registers in hard register SET. */
-static inline int
+inline int
hard_reg_set_size (HARD_REG_SET set)
{
int i, size;
/* The function returns TRUE if hard registers starting with
HARD_REGNO and containing value of MODE are fully in set
HARD_REGSET. */
-static inline bool
+inline bool
ira_hard_reg_in_set_p (int hard_regno, machine_mode mode,
HARD_REG_SET hard_regset)
{
/* Allocate cost vector *VEC for hard registers of ACLASS and
initialize the elements by VAL if it is necessary */
-static inline void
+inline void
ira_allocate_and_set_costs (int **vec, reg_class_t aclass, int val)
{
int i, *reg_costs;
/* Allocate cost vector *VEC for hard registers of ACLASS and copy
values of vector SRC into the vector if it is necessary */
-static inline void
+inline void
ira_allocate_and_copy_costs (int **vec, enum reg_class aclass, int *src)
{
int len;
/* Allocate cost vector *VEC for hard registers of ACLASS and add
values of vector SRC into the vector if it is necessary */
-static inline void
+inline void
ira_allocate_and_accumulate_costs (int **vec, enum reg_class aclass, int *src)
{
int i, len;
/* Allocate cost vector *VEC for hard registers of ACLASS and copy
values of vector SRC into the vector or initialize it by VAL (if
SRC is null). */
-static inline void
+inline void
ira_allocate_and_set_or_copy_costs (int **vec, enum reg_class aclass,
int val, int *src)
{
non-local goto code using frame-pointer to address saved stack
pointer value after restoring old frame pointer value. The
function returns TRUE if REGNO is such a static chain pseudo. */
-static inline bool
+inline bool
non_spilled_static_chain_regno_p (int regno)
{
return (cfun->static_chain_decl && crtl->has_nonlocal_goto
}
#endif
-static inline void
+inline void
pass (const char* fmt, ...)
{
va_list ap;
#endif
}
-static inline void
+inline void
xpass (const char* fmt, ...)
{
va_list ap;
#endif
}
-static inline void
+inline void
fail (const char* fmt, ...)
{
va_list ap;
#endif
}
-static inline void
+inline void
xfail (const char* fmt, ...)
{
va_list ap;
#endif
}
-static inline void
+inline void
untested (const char* fmt, ...)
{
va_list ap;
#endif
}
-static inline void
+inline void
unresolved (const char* fmt, ...)
{
va_list ap;
#endif
}
-static inline void
+inline void
note (const char* fmt, ...)
{
va_list ap;
#endif
}
-static inline void
+inline void
totals (void)
{
printf ("\nTotals:\n");
For array and vector types the number of element also
has to match, aswell as the element types themself. */
-static inline bool
+inline bool
types_kinda_same (recording::type *a, recording::type *b)
{
/* Handle trivial case here, to allow for inlining. */
/* Return the hard register which given pseudo REGNO assigned to.
Negative value means that the register got memory or we don't know
allocation yet. */
-static inline int
+inline int
lra_get_regno_hard_regno (int regno)
{
resize_reg_info ();
/* Update insn operands which are duplication of NOP operand. The
insn is represented by its LRA internal representation ID. */
-static inline void
+inline void
lra_update_dup (lra_insn_recog_data_t id, int nop)
{
int i;
operands processing. Generally speaking, we could do this probably
simultaneously with operands processing because a common practice
is to enumerate the operators after their operands. */
-static inline void
+inline void
lra_update_operator_dups (lra_insn_recog_data_t id)
{
int i;
}
/* Return info about INSN. Set up the info if it is not done yet. */
-static inline lra_insn_recog_data_t
+inline lra_insn_recog_data_t
lra_get_insn_recog_data (rtx_insn *insn)
{
lra_insn_recog_data_t data;
}
/* Update offset from pseudos with VAL by INCR. */
-static inline void
+inline void
lra_update_reg_val_offset (int val, poly_int64 incr)
{
int i;
}
/* Return true if register content is equal to VAL with OFFSET. */
-static inline bool
+inline bool
lra_reg_val_equal_p (int regno, int val, poly_int64 offset)
{
if (lra_reg_info[regno].val == val
}
/* Assign value of register FROM to TO. */
-static inline void
+inline void
lra_assign_reg_val (int from, int to)
{
lra_reg_info[to].val = lra_reg_info[from].val;
/* Return the allocno reg class of REGNO. If it is a reload pseudo,
the pseudo should finally get hard register of the allocno
class. */
-static inline enum reg_class
+inline enum reg_class
lra_get_allocno_class (int regno)
{
resize_reg_info ();
extern vec<lto_out_decl_state_ptr> lto_function_decl_states;
/* Return true if LTO tag TAG corresponds to a tree code. */
-static inline bool
+inline bool
lto_tag_is_tree_code_p (enum LTO_tags tag)
{
return tag > LTO_first_tree_tag && (unsigned) tag <= MAX_TREE_CODES;
/* Return true if LTO tag TAG corresponds to a gimple code. */
-static inline bool
+inline bool
lto_tag_is_gimple_code_p (enum LTO_tags tag)
{
return (unsigned) tag >= LTO_first_gimple_tag
/* Return the LTO tag corresponding to gimple code CODE. See enum
LTO_tags for details on the conversion. */
-static inline enum LTO_tags
+inline enum LTO_tags
lto_gimple_code_to_tag (enum gimple_code code)
{
return (enum LTO_tags) ((unsigned) code + LTO_first_gimple_tag);
/* Return the GIMPLE code corresponding to TAG. See enum LTO_tags for
details on the conversion. */
-static inline enum gimple_code
+inline enum gimple_code
lto_tag_to_gimple_code (enum LTO_tags tag)
{
gcc_assert (lto_tag_is_gimple_code_p (tag));
/* Return the LTO tag corresponding to tree code CODE. See enum
LTO_tags for details on the conversion. */
-static inline enum LTO_tags
+inline enum LTO_tags
lto_tree_code_to_tag (enum tree_code code)
{
return (enum LTO_tags) ((unsigned) code + LTO_first_tree_tag);
/* Return the tree code corresponding to TAG. See enum LTO_tags for
details on the conversion. */
-static inline enum tree_code
+inline enum tree_code
lto_tag_to_tree_code (enum LTO_tags tag)
{
gcc_assert (lto_tag_is_tree_code_p (tag));
}
/* Check that tag ACTUAL == EXPECTED. */
-static inline void
+inline void
lto_tag_check (enum LTO_tags actual, enum LTO_tags expected)
{
if (actual != expected)
}
/* Check that tag ACTUAL is in the range [TAG1, TAG2]. */
-static inline void
+inline void
lto_tag_check_range (enum LTO_tags actual, enum LTO_tags tag1,
enum LTO_tags tag2)
{
}
/* Initialize an lto_out_decl_buffer ENCODER. */
-static inline void
+inline void
lto_init_tree_ref_encoder (struct lto_tree_ref_encoder *encoder)
{
encoder->tree_hash_table = new hash_map<tree, unsigned> (251);
/* Destroy an lto_tree_ref_encoder ENCODER by freeing its contents. The
memory used by ENCODER is not freed by this function. */
-static inline void
+inline void
lto_destroy_tree_ref_encoder (struct lto_tree_ref_encoder *encoder)
{
/* Hash table may be delete already. */
}
/* Return the number of trees encoded in ENCODER. */
-static inline unsigned int
+inline unsigned int
lto_tree_ref_encoder_size (struct lto_tree_ref_encoder *encoder)
{
return encoder->trees.length ();
}
/* Return the IDX-th tree in ENCODER. */
-static inline tree
+inline tree
lto_tree_ref_encoder_get_tree (struct lto_tree_ref_encoder *encoder,
unsigned int idx)
{
}
/* Return number of encoded nodes in ENCODER. */
-static inline int
+inline int
lto_symtab_encoder_size (lto_symtab_encoder_t encoder)
{
return encoder->nodes.length ();
/* Look up NODE in encoder. Return NODE's reference if it has been encoded
or LCC_NOT_FOUND if it is not there. */
-static inline int
+inline int
lto_symtab_encoder_lookup (lto_symtab_encoder_t encoder,
symtab_node *node)
{
}
/* Return true if iterator LSE points to nothing. */
-static inline bool
+inline bool
lsei_end_p (lto_symtab_encoder_iterator lsei)
{
return lsei.index >= (unsigned)lto_symtab_encoder_size (lsei.encoder);
}
/* Advance iterator LSE. */
-static inline void
+inline void
lsei_next (lto_symtab_encoder_iterator *lsei)
{
lsei->index++;
}
/* Return the node pointed to by LSI. */
-static inline symtab_node *
+inline symtab_node *
lsei_node (lto_symtab_encoder_iterator lsei)
{
return lsei.encoder->nodes[lsei.index].node;
}
/* Return the node pointed to by LSI. */
-static inline struct cgraph_node *
+inline struct cgraph_node *
lsei_cgraph_node (lto_symtab_encoder_iterator lsei)
{
return dyn_cast<cgraph_node *> (lsei.encoder->nodes[lsei.index].node);
}
/* Return the node pointed to by LSI. */
-static inline varpool_node *
+inline varpool_node *
lsei_varpool_node (lto_symtab_encoder_iterator lsei)
{
return dyn_cast<varpool_node *> (lsei.encoder->nodes[lsei.index].node);
/* Return the cgraph node corresponding to REF using ENCODER. */
-static inline symtab_node *
+inline symtab_node *
lto_symtab_encoder_deref (lto_symtab_encoder_t encoder, int ref)
{
if (ref == LCC_NOT_FOUND)
}
/* Return an iterator to the first node in LSI. */
-static inline lto_symtab_encoder_iterator
+inline lto_symtab_encoder_iterator
lsei_start (lto_symtab_encoder_t encoder)
{
lto_symtab_encoder_iterator lsei;
}
/* Advance iterator LSE. */
-static inline void
+inline void
lsei_next_in_partition (lto_symtab_encoder_iterator *lsei)
{
lsei_next (lsei);
}
/* Return an iterator to the first node in LSI. */
-static inline lto_symtab_encoder_iterator
+inline lto_symtab_encoder_iterator
lsei_start_in_partition (lto_symtab_encoder_t encoder)
{
lto_symtab_encoder_iterator lsei = lsei_start (encoder);
}
/* Advance iterator LSE. */
-static inline void
+inline void
lsei_next_function_in_partition (lto_symtab_encoder_iterator *lsei)
{
lsei_next (lsei);
}
/* Return an iterator to the first node in LSI. */
-static inline lto_symtab_encoder_iterator
+inline lto_symtab_encoder_iterator
lsei_start_function_in_partition (lto_symtab_encoder_t encoder)
{
lto_symtab_encoder_iterator lsei = lsei_start (encoder);
}
/* Advance iterator LSE. */
-static inline void
+inline void
lsei_next_variable_in_partition (lto_symtab_encoder_iterator *lsei)
{
lsei_next (lsei);
}
/* Return an iterator to the first node in LSI. */
-static inline lto_symtab_encoder_iterator
+inline lto_symtab_encoder_iterator
lsei_start_variable_in_partition (lto_symtab_encoder_t encoder)
{
lto_symtab_encoder_iterator lsei = lsei_start (encoder);
};
/* Return the memory model from a host integer. */
-static inline enum memmodel
+inline enum memmodel
memmodel_from_int (unsigned HOST_WIDE_INT val)
{
return (enum memmodel) (val & MEMMODEL_MASK);
}
/* Return the base memory model from a host integer. */
-static inline enum memmodel
+inline enum memmodel
memmodel_base (unsigned HOST_WIDE_INT val)
{
return (enum memmodel) (val & MEMMODEL_BASE_MASK);
}
/* Return TRUE if the memory model is RELAXED. */
-static inline bool
+inline bool
is_mm_relaxed (enum memmodel model)
{
return (model & MEMMODEL_BASE_MASK) == MEMMODEL_RELAXED;
}
/* Return TRUE if the memory model is CONSUME. */
-static inline bool
+inline bool
is_mm_consume (enum memmodel model)
{
return (model & MEMMODEL_BASE_MASK) == MEMMODEL_CONSUME;
}
/* Return TRUE if the memory model is ACQUIRE. */
-static inline bool
+inline bool
is_mm_acquire (enum memmodel model)
{
return (model & MEMMODEL_BASE_MASK) == MEMMODEL_ACQUIRE;
}
/* Return TRUE if the memory model is RELEASE. */
-static inline bool
+inline bool
is_mm_release (enum memmodel model)
{
return (model & MEMMODEL_BASE_MASK) == MEMMODEL_RELEASE;
}
/* Return TRUE if the memory model is ACQ_REL. */
-static inline bool
+inline bool
is_mm_acq_rel (enum memmodel model)
{
return (model & MEMMODEL_BASE_MASK) == MEMMODEL_ACQ_REL;
}
/* Return TRUE if the memory model is SEQ_CST. */
-static inline bool
+inline bool
is_mm_seq_cst (enum memmodel model)
{
return (model & MEMMODEL_BASE_MASK) == MEMMODEL_SEQ_CST;
}
/* Return TRUE if the memory model is a SYNC variant. */
-static inline bool
+inline bool
is_mm_sync (enum memmodel model)
{
return (model & MEMMODEL_SYNC);
static tree objc_add_method (tree, tree, int, bool);
static tree add_instance_variable (tree, objc_ivar_visibility_kind, tree);
static tree build_ivar_reference (tree);
-static tree is_ivar (tree, tree);
/* We only need the following for ObjC; ObjC++ will use C++'s definition
of DERIVED_FROM_P. */
static void interface_hash_init (void);
static tree add_interface (tree, tree);
static void add_category (tree, tree);
-static inline tree lookup_category (tree, tree);
/* Protocols. */
#define objc_is_class_id(TYPE) (OBJC_TYPE_NAME (TYPE) == objc_class_id)
/* Retrieve category interface CAT_NAME (if any) associated with CLASS. */
-static inline tree
+inline tree
lookup_category (tree klass, tree cat_name)
{
tree category = CLASS_CATEGORY_LIST (klass);
}
/* Count only the fields occurring in T. */
-static inline int
+inline int
ivar_list_length (tree t)
{
int count = 0;
return count;
}
-static inline tree
+inline tree
is_ivar (tree decl_chain, tree ident)
{
for ( ; decl_chain; decl_chain = DECL_CHAIN (decl_chain))
value NULL, and objc_map_get() will return NULL in that case.
So a result of NULL means that they key *was* found, and the value
associated with it was NULL. */
-static inline tree
+inline tree
objc_map_get (objc_map_t map, /* struct tree_identifier * */tree key)
{
/* The inline implementation is private and may change without notice. */
You can use any identifier as key, with the exception of NULL.
You can use any tree as value, including NULL. */
-static inline
+inline
void objc_map_put (objc_map_t map, /*struct tree_identifier * */tree key, tree value)
{
/* The inline implementation is private and may change without notice. */
/* Initialize an iterator to iterate over the specified objc_map. You
must use this before starting the iteration, to get a working
iterator. */
-static inline
+inline
void
objc_map_iterator_initialize (objc_map_t map ATTRIBUTE_UNUSED, objc_map_iterator_t *i)
{
been initialized using objc_map_iterator_initialize(). Note that
because this function is modifying the iterator, you need to pass a
pointer to it. */
-static inline
+inline
int
objc_map_iterator_move_to_next (objc_map_t map, objc_map_iterator_t *i)
{
first element), and only if the last call returned
OBJC_MAP_SUCCESS. The behavior is otherwise undefined, probably a
segmentation fault. */
-static inline
+inline
tree
objc_map_iterator_current_key (objc_map_t map, objc_map_iterator_t i)
{
the first element), and only if the last call returned
OBJC_MAP_SUCCESS. The behavior is otherwise undefined, probably a
segmentation fault. */
-static inline
+inline
tree
objc_map_iterator_current_value (objc_map_t map, objc_map_iterator_t i)
{
extern GTY(()) enum omp_requires omp_requires_mask;
-static inline dump_flags_t
+inline dump_flags_t
get_openacc_privatization_dump_flags ()
{
dump_flags_t l_dump_flags = MSG_NOTE;
/* Return insn code for a comparison operator with VMODE
resultin MASK_MODE, unsigned if UNS is true. */
-static inline enum insn_code
+inline enum insn_code
get_vec_cmp_icode (machine_mode vmode, machine_mode mask_mode, bool uns)
{
optab tab = uns ? vec_cmpu_optab : vec_cmp_optab;
/* Return insn code for a comparison operator with VMODE
resultin MASK_MODE (only for EQ/NE). */
-static inline enum insn_code
+inline enum insn_code
get_vec_cmp_eq_icode (machine_mode vmode, machine_mode mask_mode)
{
return convert_optab_handler (vec_cmpeq_optab, vmode, mask_mode);
/* Return insn code for a conditional operator with a mask mode
MMODE resulting in a value of mode VMODE. */
-static inline enum insn_code
+inline enum insn_code
get_vcond_mask_icode (machine_mode vmode, machine_mode mmode)
{
return convert_optab_handler (vcond_mask_optab, vmode, mmode);
/* Return insn code for a conditional operator with a comparison in
mode CMODE (only EQ/NE), resulting in a value of mode VMODE. */
-static inline enum insn_code
+inline enum insn_code
get_vcond_eq_icode (machine_mode vmode, machine_mode cmode)
{
return convert_optab_handler (vcondeq_optab, vmode, cmode);
/* Initialize OP with the given fields. Initialise the other fields
to their default values. */
-static inline void
+inline void
create_expand_operand (class expand_operand *op,
enum expand_operand_type type,
rtx value, machine_mode mode,
/* Make OP describe an operand that must use rtx X, even if X is volatile. */
-static inline void
+inline void
create_fixed_operand (class expand_operand *op, rtx x)
{
create_expand_operand (op, EXPAND_FIXED, x, VOIDmode, false);
It is OK for VALUE to be inconsistent with MODE, although it will just
be ignored in that case. */
-static inline void
+inline void
create_output_operand (class expand_operand *op, rtx x,
machine_mode mode)
{
VALUE be copied into a different kind of rtx before being passed
as an operand. */
-static inline void
+inline void
create_input_operand (class expand_operand *op, rtx value,
machine_mode mode)
{
/* Like create_input_operand, except that VALUE must first be converted
to mode MODE. UNSIGNED_P says whether VALUE is unsigned. */
-static inline void
+inline void
create_convert_operand_to (class expand_operand *op, rtx value,
machine_mode mode, bool unsigned_p)
{
conversion (as for convert_modes) and duplicating a scalar to fill
a vector (if VALUE is a scalar but the operand is a vector). */
-static inline void
+inline void
create_convert_operand_from (class expand_operand *op, rtx value,
machine_mode mode, bool unsigned_p)
{
/* Make OP describe an input Pmode address operand. VALUE is the value
of the address, but it may need to be converted to Pmode first. */
-static inline void
+inline void
create_address_operand (class expand_operand *op, rtx value)
{
create_expand_operand (op, EXPAND_ADDRESS, value, Pmode, false);
EVENT - the event identifier
GCC_DATA - event-specific data provided by the compiler */
-static inline int
+inline int
invoke_plugin_callbacks (int event ATTRIBUTE_UNUSED,
void *gcc_data ATTRIBUTE_UNUSED)
{
/* Finishes constructing a NULL-terminated character string representing
the buffered text. */
-static inline const char *
+inline const char *
output_buffer_formatted_text (output_buffer *buff)
{
obstack_1grow (buff->obstack, '\0');
/* Append to the output buffer a string specified by its
STARTing character and LENGTH. */
-static inline void
+inline void
output_buffer_append_r (output_buffer *buff, const char *start, int length)
{
gcc_checking_assert (start);
/* Return a pointer to the last character emitted in the
output_buffer. A NULL pointer means no character available. */
-static inline const char *
+inline const char *
output_buffer_last_position_in_text (const output_buffer *buff)
{
const char *p = NULL;
diagnostic_url_format url_format;
};
-static inline const char *
+inline const char *
pp_get_prefix (const pretty_printer *pp) { return pp->prefix; }
#define pp_space(PP) pp_character (PP, ' ')
extern void pp_end_url (pretty_printer *pp);
/* Switch into verbatim mode and return the old mode. */
-static inline pp_wrapping_mode_t
+inline pp_wrapping_mode_t
pp_set_verbatim_wrapping_ (pretty_printer *pp)
{
pp_wrapping_mode_t oldmode = pp_wrapping_mode (pp);
// Return an irange instance that is a boolean TRUE.
-static inline int_range<1>
+inline int_range<1>
range_true (tree type)
{
unsigned prec = TYPE_PRECISION (type);
// Return an irange instance that is a boolean FALSE.
-static inline int_range<1>
+inline int_range<1>
range_false (tree type)
{
unsigned prec = TYPE_PRECISION (type);
// Return an irange that covers both true and false.
-static inline int_range<1>
+inline int_range<1>
range_true_and_false (tree type)
{
unsigned prec = TYPE_PRECISION (type);
/* Read the next character from the MD file. */
-static inline int
+inline int
read_char (void)
{
return md_reader_ptr->read_char ();
/* Put back CH, which was the last character read from the MD file. */
-static inline void
+inline void
unread_char (int ch)
{
md_reader_ptr->unread_char (ch);
/* Return the class for operand I of alternative ALT, taking matching
constraints into account. */
-static inline enum reg_class
+inline enum reg_class
alternative_class (const operand_alternative *alt, int i)
{
return alt[i].matches >= 0 ? alt[alt[i].matches].cl : alt[i].cl;
extern int recog (rtx, rtx_insn *, int *);
#ifndef GENERATOR_FILE
-static inline int recog_memoized (rtx_insn *insn);
+inline int recog_memoized (rtx_insn *insn);
#endif
extern void add_clobbers (rtx, int);
extern int added_clobbers_hard_reg_p (int);
The automatically-generated function `recog' is normally called
through this one. */
-static inline int
+inline int
recog_memoized (rtx_insn *insn)
{
if (INSN_CODE (insn) < 0)
/* Skip chars until the next ',' or the end of the string. This is
useful to skip alternatives in a constraint string. */
-static inline const char *
+inline const char *
skip_alternative (const char *p)
{
const char *r = p;
extern struct regstat_n_sets_and_refs_t *regstat_n_sets_and_refs;
/* Indexed by n, gives number of times (REG n) is used or set. */
-static inline int
+inline int
REG_N_REFS (int regno)
{
return regstat_n_sets_and_refs[regno].refs;
#define INC_REG_N_REFS(N,V) (regstat_n_sets_and_refs[N].refs += V)
/* Indexed by n, gives number of times (REG n) is set. */
-static inline int
+inline int
REG_N_SETS (int regno)
{
return regstat_n_sets_and_refs[regno].sets;
/* Return an exclusive upper bound on the registers occupied by hard
register (reg:MODE REGNO). */
-static inline unsigned int
+inline unsigned int
end_hard_regno (machine_mode mode, unsigned int regno)
{
return regno + hard_regno_nregs (regno, mode);
/* Add to REGS all the registers required to store a value of mode MODE
in register REGNO. */
-static inline void
+inline void
add_to_hard_reg_set (HARD_REG_SET *regs, machine_mode mode,
unsigned int regno)
{
/* Likewise, but remove the registers. */
-static inline void
+inline void
remove_from_hard_reg_set (HARD_REG_SET *regs, machine_mode mode,
unsigned int regno)
{
/* Return true if REGS contains the whole of (reg:MODE REGNO). */
-static inline bool
+inline bool
in_hard_reg_set_p (const_hard_reg_set regs, machine_mode mode,
unsigned int regno)
{
/* Return true if (reg:MODE REGNO) includes an element of REGS. */
-static inline bool
+inline bool
overlaps_hard_reg_set_p (const_hard_reg_set regs, machine_mode mode,
unsigned int regno)
{
/* Like add_to_hard_reg_set, but use a REGNO/NREGS range instead of
REGNO and MODE. */
-static inline void
+inline void
add_range_to_hard_reg_set (HARD_REG_SET *regs, unsigned int regno,
int nregs)
{
/* Likewise, but remove the registers. */
-static inline void
+inline void
remove_range_from_hard_reg_set (HARD_REG_SET *regs, unsigned int regno,
int nregs)
{
/* Like overlaps_hard_reg_set_p, but use a REGNO/NREGS range instead of
REGNO and MODE. */
-static inline bool
+inline bool
range_overlaps_hard_reg_set_p (const_hard_reg_set set, unsigned regno,
int nregs)
{
/* Like in_hard_reg_set_p, but use a REGNO/NREGS range instead of
REGNO and MODE. */
-static inline bool
+inline bool
range_in_hard_reg_set_p (const_hard_reg_set set, unsigned regno, int nregs)
{
while (nregs-- > 0)
/* Return true if CODE has no subrtxes. */
-static inline bool
+inline bool
leaf_code_p (enum rtx_code code)
{
return rtx_all_subrtx_bounds[code].count == 0;
#define LABEL_REFS(LABEL) XCEXP (LABEL, 3, CODE_LABEL)
/* Get the label that a LABEL_REF references. */
-static inline rtx_insn *
+inline rtx_insn *
label_ref_label (const_rtx ref)
{
return as_a<rtx_insn *> (XCEXP (ref, 0, LABEL_REF));
/* Set the label that LABEL_REF ref refers to. */
-static inline void
+inline void
set_label_ref_label (rtx ref, rtx_insn *label)
{
XCEXP (ref, 0, LABEL_REF) = label;
(RTL_FLAG_CHECK1 ("ORIGINAL_REGNO", (RTX), REG)->u2.original_regno)
/* Force the REGNO macro to only be used on the lhs. */
-static inline unsigned int
+inline unsigned int
rhs_regno (const_rtx x)
{
return REG_CHECK (x)->regno;
}
/* Return the final register in REG X plus one. */
-static inline unsigned int
+inline unsigned int
END_REGNO (const_rtx x)
{
return REGNO (x) + REG_NREGS (x);
/* Change the REGNO and REG_NREGS of REG X to the specified values,
bypassing the df machinery. */
-static inline void
+inline void
set_regno_raw (rtx x, unsigned int regno, unsigned int nregs)
{
reg_info *reg = REG_CHECK (x);
/* Return true if CODE always has VOIDmode. */
-static inline bool
+inline bool
always_void_p (enum rtx_code code)
{
return code == SET;
};
/* Initialize a full_rtx_costs structure C to the maximum cost. */
-static inline void
+inline void
init_costs_to_max (struct full_rtx_costs *c)
{
c->speed = MAX_COST;
}
/* Initialize a full_rtx_costs structure C to zero cost. */
-static inline void
+inline void
init_costs_to_zero (struct full_rtx_costs *c)
{
c->speed = 0;
/* Compare two full_rtx_costs structures A and B, returning true
if A < B when optimizing for speed. */
-static inline bool
+inline bool
costs_lt_p (struct full_rtx_costs *a, struct full_rtx_costs *b,
bool speed)
{
/* Increase both members of the full_rtx_costs structure C by the
cost of N insns. */
-static inline void
+inline void
costs_add_n_insns (struct full_rtx_costs *c, int n)
{
c->speed += COSTS_N_INSNS (n);
/* Return the shape of a SUBREG rtx. */
-static inline subreg_shape
+inline subreg_shape
shape_of_subreg (const_rtx x)
{
return subreg_shape (GET_MODE (SUBREG_REG (x)),
/* Return the cost of SET X. SPEED_P is true if optimizing for speed
rather than size. */
-static inline int
+inline int
set_rtx_cost (rtx x, bool speed_p)
{
return rtx_cost (x, VOIDmode, INSN, 4, speed_p);
/* Like set_rtx_cost, but return both the speed and size costs in C. */
-static inline void
+inline void
get_full_set_rtx_cost (rtx x, struct full_rtx_costs *c)
{
get_full_rtx_cost (x, VOIDmode, INSN, 4, c);
of a register move. SPEED_P is true if optimizing for speed rather
than size. */
-static inline int
+inline int
set_src_cost (rtx x, machine_mode mode, bool speed_p)
{
return rtx_cost (x, mode, SET, 1, speed_p);
/* Like set_src_cost, but return both the speed and size costs in C. */
-static inline void
+inline void
get_full_set_src_cost (rtx x, machine_mode mode, struct full_rtx_costs *c)
{
get_full_rtx_cost (x, mode, SET, 1, c);
#ifndef GENERATOR_FILE
/* Return the attributes of a MEM rtx. */
-static inline const class mem_attrs *
+inline const class mem_attrs *
get_mem_attrs (const_rtx x)
{
class mem_attrs *attrs;
#ifdef GENERATOR_FILE
#define PUT_MODE(RTX, MODE) PUT_MODE_RAW (RTX, MODE)
#else
-static inline void
+inline void
PUT_MODE (rtx x, machine_mode mode)
{
if (REG_P (x))
/* Verify that access at INDEX in bitmap MAP is valid. */
-static inline void
+inline void
bitmap_check_index (const_sbitmap map, int index)
{
gcc_checking_assert (index >= 0);
/* Verify that bitmaps A and B have same size. */
-static inline void
+inline void
bitmap_check_sizes (const_sbitmap a, const_sbitmap b)
{
gcc_checking_assert (a->n_bits == b->n_bits);
}
/* Test if bit number bitno in the bitmap is set. */
-static inline bool
+inline bool
bitmap_bit_p (const_sbitmap map, int bitno)
{
bitmap_check_index (map, bitno);
/* Set bit number BITNO in the sbitmap MAP.
Return true if the bit changed. */
-static inline bool
+inline bool
bitmap_set_bit (sbitmap map, int bitno)
{
bitmap_check_index (map, bitno);
/* Reset bit number BITNO in the sbitmap MAP.
Return true if the bit changed. */
-static inline bool
+inline bool
bitmap_clear_bit (sbitmap map, int bitno)
{
bitmap_check_index (map, bitno);
/* Initialize the iterator I with sbitmap BMP and the initial index
MIN. */
-static inline void
+inline void
bmp_iter_set_init (sbitmap_iterator *i, const_sbitmap bmp,
unsigned int min, unsigned *bit_no ATTRIBUTE_UNUSED)
{
to the index of the bit to be visited. Otherwise, return
false. */
-static inline bool
+inline bool
bmp_iter_set (sbitmap_iterator *i, unsigned int *n)
{
/* Skip words that are zeros. */
/* Advance to the next bit. */
-static inline void
+inline void
bmp_iter_next (sbitmap_iterator *i, unsigned *bit_no ATTRIBUTE_UNUSED)
{
i->word >>= 1;
extern const struct common_sched_info_def haifa_common_sched_info;
/* Return true if selective scheduling pass is working. */
-static inline bool
+inline bool
sel_sched_p (void)
{
return common_sched_info->sched_pass_id == SCHED_SEL_PASS;
struct _deps_link. */
/* Return initialized iterator. */
-static inline sd_iterator_def
+inline sd_iterator_def
sd_iterator_start (rtx insn, sd_list_types_def types)
{
/* Some dep_link a pointer to which will return NULL. */
}
/* Return the current element. */
-static inline bool
+inline bool
sd_iterator_cond (sd_iterator_def *it_ptr, dep_t *dep_ptr)
{
while (true)
}
/* Advance iterator. */
-static inline void
+inline void
sd_iterator_next (sd_iterator_def *it_ptr)
{
it_ptr->linkp = &DEP_LINK_NEXT (*it_ptr->linkp);
we can't move them in sel-sched-ir.cc. */
extern object_allocator<_list_node> sched_lists_pool;
-static inline _list_t
+inline _list_t
_list_alloc (void)
{
return sched_lists_pool.allocate ();
}
-static inline void
+inline void
_list_add (_list_t *lp)
{
_list_t l = _list_alloc ();
*lp = l;
}
-static inline void
+inline void
_list_remove_nofree (_list_t *lp)
{
_list_t n = *lp;
*lp = _LIST_NEXT (n);
}
-static inline void
+inline void
_list_remove (_list_t *lp)
{
_list_t n = *lp;
sched_lists_pool.remove (n);
}
-static inline void
+inline void
_list_clear (_list_t *l)
{
while (*l)
bool removed_p;
};
-static inline void
+inline void
_list_iter_start (_list_iterator *ip, _list_t *lp, bool can_remove_p)
{
ip->lp = lp;
ip->removed_p = false;
}
-static inline void
+inline void
_list_iter_next (_list_iterator *ip)
{
if (!ip->removed_p)
ip->removed_p = false;
}
-static inline void
+inline void
_list_iter_remove (_list_iterator *ip)
{
gcc_assert (!ip->removed_p && ip->can_remove_p);
ip->removed_p = true;
}
-static inline void
+inline void
_list_iter_remove_nofree (_list_iterator *ip)
{
gcc_assert (!ip->removed_p && ip->can_remove_p);
/* _xlist_t functions. */
-static inline void
+inline void
_xlist_add (_xlist_t *lp, rtx x)
{
_list_add (lp);
#define _xlist_remove(LP) (_list_remove (LP))
#define _xlist_clear(LP) (_list_clear (LP))
-static inline bool
+inline bool
_xlist_is_in_p (_xlist_t l, rtx x)
{
while (l)
}
/* Used through _FOR_EACH. */
-static inline bool
+inline bool
_list_iter_cond_x (_xlist_t l, rtx *xp)
{
if (l)
/* ilist_t functions. */
-static inline void
+inline void
ilist_add (ilist_t *lp, insn_t insn)
{
_list_add (lp);
#define ilist_remove(LP) (_list_remove (LP))
#define ilist_clear(LP) (_list_clear (LP))
-static inline bool
+inline bool
ilist_is_in_p (ilist_t l, insn_t insn)
{
while (l)
}
/* Used through _FOR_EACH. */
-static inline bool
+inline bool
_list_iter_cond_insn (ilist_t l, insn_t *ip)
{
if (l)
#define FOR_EACH_DEF(DEF, I, DEF_LIST) _FOR_EACH (def, (DEF), (I), (DEF_LIST))
-static inline bool
+inline bool
_list_iter_cond_def (def_list_t def_list, def_t *def)
{
if (def_list)
extern bool in_current_region_p (basic_block);
/* True when BB is a header of the inner loop. */
-static inline bool
+inline bool
inner_loop_header_p (basic_block bb)
{
class loop *inner_loop;
}
/* Return exit edges of LOOP, filtering out edges with the same dest bb. */
-static inline vec<edge>
+inline vec<edge>
get_loop_exit_edges_unique_dests (const class loop *loop)
{
vec<edge> edges = vNULL;
traverse all of them and if any of them turns out to be another loop header
(after skipping empty BBs), add its loop exits to the resulting vector
as well. */
-static inline vec<edge>
+inline vec<edge>
get_all_loop_exits (basic_block bb)
{
vec<edge> exits = vNULL;
/* We need to return a succ_iterator to avoid 'unitialized' warning
during bootstrap. */
-static inline succ_iterator
+inline succ_iterator
_succ_iter_start (insn_t *succp, insn_t insn, int flags)
{
succ_iterator i;
return i;
}
-static inline bool
+inline bool
_succ_iter_cond (succ_iterator *ip, insn_t *succp, insn_t insn,
bool check (edge, succ_iterator *))
{
}
}
-static inline void
+inline void
_succ_iter_next (succ_iterator *ip)
{
gcc_assert (!ip->e2 || ip->e1);
empty blocks. When E2P is not null, the resulting edge is written there.
FLAGS are used to specify whether back edges and out-of-region edges
should be considered. */
-static inline bool
+inline bool
_eligible_successor_edge_p (edge e1, succ_iterator *ip)
{
edge e2 = e1;
#define SUCC_ITER_EDGE(ITER) ((ITER)->e1)
/* Return the next block of BB not running into inconsistencies. */
-static inline basic_block
+inline basic_block
bb_next_bb (basic_block bb)
{
switch (EDGE_COUNT (bb->succs))
/* Get the entry of an sese S. */
-static inline basic_block
+inline basic_block
get_entry_bb (const sese_l &s)
{
return s.entry->dest;
/* Get the exit of an sese S. */
-static inline basic_block
+inline basic_block
get_exit_bb (const sese_l &s)
{
return s.exit->src;
/* The number of parameters in REGION. */
-static inline unsigned
+inline unsigned
sese_nb_params (sese_info_p region)
{
return region->params.length ();
/* Checks whether BB is contained in the region delimited by ENTRY and
EXIT blocks. */
-static inline bool
+inline bool
bb_in_region (const_basic_block bb, const_basic_block entry, const_basic_block exit)
{
return dominated_by_p (CDI_DOMINATORS, bb, entry)
/* Checks whether BB is contained in the region delimited by ENTRY and
EXIT blocks. */
-static inline bool
+inline bool
bb_in_sese_p (basic_block bb, const sese_l &r)
{
return bb_in_region (bb, r.entry->dest, r.exit->dest);
/* Returns true when STMT is defined in REGION. */
-static inline bool
+inline bool
stmt_in_sese_p (gimple *stmt, const sese_l &r)
{
basic_block bb = gimple_bb (stmt);
/* Returns true when NAME is defined in REGION. */
-static inline bool
+inline bool
defined_in_sese_p (tree name, const sese_l &r)
{
return stmt_in_sese_p (SSA_NAME_DEF_STMT (name), r);
/* Returns true when LOOP is in REGION. */
-static inline bool
+inline bool
loop_in_sese_p (class loop *loop, const sese_l ®ion)
{
return (bb_in_sese_p (loop->header, region)
loop_1 exists, but is not completely contained in the region -> depth 0
loop_2 is completely contained -> depth 1 */
-static inline unsigned int
+inline unsigned int
sese_loop_depth (const sese_l ®ion, loop_p loop)
{
unsigned int depth = 0;
extern edge get_true_edge_from_guard_bb (basic_block);
extern edge get_false_edge_from_guard_bb (basic_block);
-static inline edge
+inline edge
if_region_entry (ifsese if_region)
{
return if_region->region->region.entry;
}
-static inline edge
+inline edge
if_region_exit (ifsese if_region)
{
return if_region->region->region.exit;
}
-static inline basic_block
+inline basic_block
if_region_get_condition_block (ifsese if_region)
{
return if_region_entry (if_region)->dest;
/* Return the innermost loop that contains the basic block GBB. */
-static inline class loop *
+inline class loop *
gbb_loop (gimple_poly_bb_p gbb)
{
return GBB_BB (gbb)->loop_father;
/* Returns the gimple loop, that corresponds to the loop_iterator_INDEX.
If there is no corresponding gimple loop, we return NULL. */
-static inline loop_p
+inline loop_p
gbb_loop_at_index (gimple_poly_bb_p gbb, sese_l ®ion, int index)
{
loop_p loop = gbb_loop (gbb);
/* The number of common loops in REGION for GBB1 and GBB2. */
-static inline int
+inline int
nb_common_loops (sese_l ®ion, gimple_poly_bb_p gbb1, gimple_poly_bb_p gbb2)
{
loop_p l1 = gbb_loop (gbb1);
/* Operation: S = {}
Clear the set of all elements. */
-static inline void
+inline void
sparseset_clear (sparseset s)
{
s->members = 0;
/* Return the number of elements currently in the set. */
-static inline SPARSESET_ELT_TYPE
+inline SPARSESET_ELT_TYPE
sparseset_cardinality (sparseset s)
{
return s->members;
/* Return the maximum number of elements this set can hold. */
-static inline SPARSESET_ELT_TYPE
+inline SPARSESET_ELT_TYPE
sparseset_size (sparseset s)
{
return s->size;
/* Return true if e is a member of the set S, otherwise return false. */
-static inline bool
+inline bool
sparseset_bit_p (sparseset s, SPARSESET_ELT_TYPE e)
{
SPARSESET_ELT_TYPE idx;
/* Low level insertion routine not meant for use outside of sparseset.[ch].
Assumes E is valid and not already a member of the set S. */
-static inline void
+inline void
sparseset_insert_bit (sparseset s, SPARSESET_ELT_TYPE e, SPARSESET_ELT_TYPE idx)
{
s->sparse[e] = idx;
/* Operation: S = S + {e}
Insert E into the set S, if it isn't already a member. */
-static inline void
+inline void
sparseset_set_bit (sparseset s, SPARSESET_ELT_TYPE e)
{
if (!sparseset_bit_p (s, e))
/* Return and remove the last member added to the set S. */
-static inline SPARSESET_ELT_TYPE
+inline SPARSESET_ELT_TYPE
sparseset_pop (sparseset s)
{
SPARSESET_ELT_TYPE mem = s->members;
return s->dense[s->members];
}
-static inline void
+inline void
sparseset_iter_init (sparseset s)
{
s->iter = 0;
s->iterating = true;
}
-static inline bool
+inline bool
sparseset_iter_p (sparseset s)
{
if (s->iterating && s->iter < s->members)
return s->iterating = false;
}
-static inline SPARSESET_ELT_TYPE
+inline SPARSESET_ELT_TYPE
sparseset_iter_elm (sparseset s)
{
return s->dense[s->iter];
}
-static inline void
+inline void
sparseset_iter_next (sparseset s)
{
s->iter += s->iter_inc;
(void) ((DEST) = next_readonly_imm_use (&(ITER))))
/* Forward declare for use in the class below. */
-static inline void end_imm_use_stmt_traverse (imm_use_iterator *);
+inline void end_imm_use_stmt_traverse (imm_use_iterator *);
/* arrange to automatically call, upon descruction, end_imm_use_stmt_traverse
with a given pointer to imm_use_iterator. */
/* Delink an immediate_uses node from its chain. */
-static inline void
+inline void
delink_imm_use (ssa_use_operand_t *linknode)
{
/* Return if this node is not in a list. */
}
/* Link ssa_imm_use node LINKNODE into the chain for LIST. */
-static inline void
+inline void
link_imm_use_to_list (ssa_use_operand_t *linknode, ssa_use_operand_t *list)
{
/* Link the new node at the head of the list. If we are in the process of
}
/* Link ssa_imm_use node LINKNODE into the chain for DEF. */
-static inline void
+inline void
link_imm_use (ssa_use_operand_t *linknode, tree def)
{
ssa_use_operand_t *root;
}
/* Set the value of a use pointed to by USE to VAL. */
-static inline void
+inline void
set_ssa_use_from_ptr (use_operand_p use, tree val)
{
delink_imm_use (use);
/* Link ssa_imm_use node LINKNODE into the chain for DEF, with use occurring
in STMT. */
-static inline void
+inline void
link_imm_use_stmt (ssa_use_operand_t *linknode, tree def, gimple *stmt)
{
if (stmt)
}
/* Relink a new node in place of an old node in the list. */
-static inline void
+inline void
relink_imm_use (ssa_use_operand_t *node, ssa_use_operand_t *old)
{
/* The node one had better be in the same list. */
/* Relink ssa_imm_use node LINKNODE into the chain for OLD, with use occurring
in STMT. */
-static inline void
+inline void
relink_imm_use_stmt (ssa_use_operand_t *linknode, ssa_use_operand_t *old,
gimple *stmt)
{
/* Return true is IMM has reached the end of the immediate use list. */
-static inline bool
+inline bool
end_readonly_imm_use_p (const imm_use_iterator *imm)
{
return (imm->imm_use == imm->end_p);
}
/* Initialize iterator IMM to process the list for VAR. */
-static inline use_operand_p
+inline use_operand_p
first_readonly_imm_use (imm_use_iterator *imm, tree var)
{
imm->end_p = &(SSA_NAME_IMM_USE_NODE (var));
}
/* Bump IMM to the next use in the list. */
-static inline use_operand_p
+inline use_operand_p
next_readonly_imm_use (imm_use_iterator *imm)
{
use_operand_p old = imm->imm_use;
/* Return true if VAR has no nondebug uses. */
-static inline bool
+inline bool
has_zero_uses (const_tree var)
{
const ssa_use_operand_t *const head = &(SSA_NAME_IMM_USE_NODE (var));
}
/* Return true if VAR has a single nondebug use. */
-static inline bool
+inline bool
has_single_use (const_tree var)
{
const ssa_use_operand_t *const head = &(SSA_NAME_IMM_USE_NODE (var));
/* If VAR has only a single immediate nondebug use, return true, and
set USE_P and STMT to the use pointer and stmt of occurrence. */
-static inline bool
+inline bool
single_imm_use (const_tree var, use_operand_p *use_p, gimple **stmt)
{
const ssa_use_operand_t *const ptr = &(SSA_NAME_IMM_USE_NODE (var));
}
/* Return the number of nondebug immediate uses of VAR. */
-static inline unsigned int
+inline unsigned int
num_imm_uses (const_tree var)
{
const ssa_use_operand_t *const start = &(SSA_NAME_IMM_USE_NODE (var));
SSA operands. */
/* Return true if PTR is finished iterating. */
-static inline bool
+inline bool
op_iter_done (const ssa_op_iter *ptr)
{
return ptr->done;
}
/* Get the next iterator use value for PTR. */
-static inline use_operand_p
+inline use_operand_p
op_iter_next_use (ssa_op_iter *ptr)
{
use_operand_p use_p;
}
/* Get the next iterator def value for PTR. */
-static inline def_operand_p
+inline def_operand_p
op_iter_next_def (ssa_op_iter *ptr)
{
gcc_checking_assert (ptr->iter_type == ssa_op_iter_def);
}
/* Get the next iterator tree value for PTR. */
-static inline tree
+inline tree
op_iter_next_tree (ssa_op_iter *ptr)
{
tree val;
used to prevent warnings in the compile about might be uninitialized
components. */
-static inline void
+inline void
clear_and_done_ssa_iter (ssa_op_iter *ptr)
{
ptr->i = 0;
}
/* Initialize the iterator PTR to the virtual defs in STMT. */
-static inline void
+inline void
op_iter_init (ssa_op_iter *ptr, gimple *stmt, int flags)
{
/* PHI nodes require a different iterator initialization path. We
/* Initialize iterator PTR to the use operands in STMT based on FLAGS. Return
the first use. */
-static inline use_operand_p
+inline use_operand_p
op_iter_init_use (ssa_op_iter *ptr, gimple *stmt, int flags)
{
gcc_checking_assert ((flags & SSA_OP_ALL_DEFS) == 0
/* Initialize iterator PTR to the def operands in STMT based on FLAGS. Return
the first def. */
-static inline def_operand_p
+inline def_operand_p
op_iter_init_def (ssa_op_iter *ptr, gimple *stmt, int flags)
{
gcc_checking_assert ((flags & SSA_OP_ALL_USES) == 0
/* Initialize iterator PTR to the operands in STMT based on FLAGS. Return
the first operand as a tree. */
-static inline tree
+inline tree
op_iter_init_tree (ssa_op_iter *ptr, gimple *stmt, int flags)
{
op_iter_init (ptr, stmt, flags);
/* If there is a single operand in STMT matching FLAGS, return it. Otherwise
return NULL. */
-static inline tree
+inline tree
single_ssa_tree_operand (gimple *stmt, int flags)
{
tree var;
/* If there is a single operand in STMT matching FLAGS, return it. Otherwise
return NULL. */
-static inline use_operand_p
+inline use_operand_p
single_ssa_use_operand (gimple *stmt, int flags)
{
use_operand_p var;
/* Return the single virtual use operand in STMT if present. Otherwise
return NULL. */
-static inline use_operand_p
+inline use_operand_p
ssa_vuse_operand (gimple *stmt)
{
if (! gimple_vuse (stmt))
/* If there is a single operand in STMT matching FLAGS, return it. Otherwise
return NULL. */
-static inline def_operand_p
+inline def_operand_p
single_ssa_def_operand (gimple *stmt, int flags)
{
def_operand_p var;
/* Return true if there are zero operands in STMT matching the type
given in FLAGS. */
-static inline bool
+inline bool
zero_ssa_operands (gimple *stmt, int flags)
{
ssa_op_iter iter;
/* Return the number of operands matching FLAGS in STMT. */
-static inline int
+inline int
num_ssa_operands (gimple *stmt, int flags)
{
ssa_op_iter iter;
/* If there is a single DEF in the PHI node which matches FLAG, return it.
Otherwise return NULL_DEF_OPERAND_P. */
-static inline tree
+inline tree
single_phi_def (gphi *stmt, int flags)
{
tree def = PHI_RESULT (stmt);
/* Initialize the iterator PTR for uses matching FLAGS in PHI. FLAGS should
be either SSA_OP_USES or SSA_OP_VIRTUAL_USES. */
-static inline use_operand_p
+inline use_operand_p
op_iter_init_phiuse (ssa_op_iter *ptr, gphi *phi, int flags)
{
tree phi_def = gimple_phi_result (phi);
/* Start an iterator for a PHI definition. */
-static inline def_operand_p
+inline def_operand_p
op_iter_init_phidef (ssa_op_iter *ptr, gphi *phi, int flags)
{
tree phi_def = PHI_RESULT (phi);
/* Return true is IMM has reached the end of the immediate use stmt list. */
-static inline bool
+inline bool
end_imm_use_stmt_p (const imm_use_iterator *imm)
{
return (imm->imm_use == imm->end_p);
/* Finished the traverse of an immediate use stmt list IMM by removing the
placeholder node from the list. */
-static inline void
+inline void
end_imm_use_stmt_traverse (imm_use_iterator *imm)
{
delink_imm_use (&(imm->iter_node));
currently delimited by HEAD and LAST_P. The new LAST_P value is
returned. */
-static inline use_operand_p
+inline use_operand_p
move_use_after_head (use_operand_p use_p, use_operand_p head,
use_operand_p last_p)
{
/* This routine will relink all uses with the same stmt as HEAD into the list
immediately following HEAD for iterator IMM. */
-static inline void
+inline void
link_use_stmts_after (use_operand_p head, imm_use_iterator *imm)
{
use_operand_p use_p;
}
/* Initialize IMM to traverse over uses of VAR. Return the first statement. */
-static inline gimple *
+inline gimple *
first_imm_use_stmt (imm_use_iterator *imm, tree var)
{
imm->end_p = &(SSA_NAME_IMM_USE_NODE (var));
/* Bump IMM to the next stmt which has a use of var. */
-static inline gimple *
+inline gimple *
next_imm_use_stmt (imm_use_iterator *imm)
{
imm->imm_use = imm->iter_node.next;
/* This routine will return the first use on the stmt IMM currently refers
to. */
-static inline use_operand_p
+inline use_operand_p
first_imm_use_on_stmt (imm_use_iterator *imm)
{
imm->next_imm_name = imm->imm_use->next;
/* Return TRUE if the last use on the stmt IMM refers to has been visited. */
-static inline bool
+inline bool
end_imm_use_on_stmt_p (const imm_use_iterator *imm)
{
return (imm->imm_use == &(imm->iter_node));
/* Bump to the next use on the stmt IMM refers to, return NULL if done. */
-static inline use_operand_p
+inline use_operand_p
next_imm_use_on_stmt (imm_use_iterator *imm)
{
imm->imm_use = imm->next_imm_name;
}
/* Delink all immediate_use information for STMT. */
-static inline void
+inline void
delink_stmt_imm_use (gimple *stmt)
{
ssa_op_iter iter;
so does GCC 3.4.x (PR17436). */
#define CONST_CAST2(TOTYPE,FROMTYPE,X) ((__extension__(union {FROMTYPE _q; TOTYPE _nq;})(X))._nq)
#elif defined(__GNUC__)
-static inline char *
+inline char *
helper_const_non_const_cast (const char *p)
{
union {
/* Return true if STR string starts with PREFIX. */
-static inline bool
+inline bool
startswith (const char *str, const char *prefix)
{
return strncmp (str, prefix, strlen (prefix)) == 0;
/* Return true if STR string ends with SUFFIX. */
-static inline bool
+inline bool
endswith (const char *str, const char *suffix)
{
size_t str_len = strlen (str);
extern class target_globals *save_target_globals (void);
extern class target_globals *save_target_globals_default_opts (void);
-static inline void
+inline void
restore_target_globals (class target_globals *g)
{
this_target_flag_state = g->flag_state;
runtime value is needed for correctness, since the function only
provides a rough guess. */
-static inline HOST_WIDE_INT
+inline HOST_WIDE_INT
estimated_poly_value (poly_int64 x,
poly_value_estimate_kind kind = POLY_VALUE_LIKELY)
{
#define CUMULATIVE_ARGS_MAGIC ((void *) &targetm.calls)
#endif
-static inline CUMULATIVE_ARGS *
+inline CUMULATIVE_ARGS *
get_cumulative_args (cumulative_args_t arg)
{
#if CHECKING_P
return (CUMULATIVE_ARGS *) arg.p;
}
-static inline cumulative_args_t
+inline cumulative_args_t
pack_cumulative_args (CUMULATIVE_ARGS *arg)
{
cumulative_args_t ret;
};
/* Provided for backward compatibility. */
-static inline void
+inline void
timevar_push (timevar_id_t tv)
{
if (g_timer)
g_timer->push (tv);
}
-static inline void
+inline void
timevar_pop (timevar_id_t tv)
{
if (g_timer)
/* After having added an automatically generated element, please
include it in the following function. */
-static inline bool
+inline bool
automatically_generated_chrec_p (const_tree chrec)
{
return (chrec == chrec_dont_know
/* The tree nodes aka. CHRECs. */
-static inline bool
+inline bool
tree_is_chrec (const_tree expr)
{
if (TREE_CODE (expr) == POLYNOMIAL_CHREC
/* Determines whether CHREC is equal to zero. */
-static inline bool
+inline bool
chrec_zerop (const_tree chrec)
{
if (chrec == NULL_TREE)
/* Determines whether CHREC is a loop invariant with respect to LOOP_NUM.
Set the result in RES and return true when the property can be computed. */
-static inline bool
+inline bool
no_evolution_in_loop_p (tree chrec, unsigned loop_num, bool *res)
{
tree scev;
/* Build a polynomial chain of recurrence. */
-static inline tree
+inline tree
build_polynomial_chrec (unsigned loop_num,
tree left,
tree right)
/* Determines whether the expression CHREC is a constant. */
-static inline bool
+inline bool
evolution_function_is_constant_p (const_tree chrec)
{
if (chrec == NULL_TREE)
/* Determine whether CHREC is an affine evolution function in LOOPNUM. */
-static inline bool
+inline bool
evolution_function_is_affine_in_loop (const_tree chrec, int loopnum)
{
if (chrec == NULL_TREE)
/* Determine whether CHREC is an affine evolution function or not. */
-static inline bool
+inline bool
evolution_function_is_affine_p (const_tree chrec)
{
return chrec
/* Determines whether EXPR does not contains chrec expressions. */
-static inline bool
+inline bool
tree_does_not_contain_chrecs (const_tree expr)
{
return !tree_contains_chrecs (expr, NULL);
/* Returns the type of the chrec. */
-static inline tree
+inline tree
chrec_type (const_tree chrec)
{
if (automatically_generated_chrec_p (chrec))
return TREE_TYPE (chrec);
}
-static inline tree
+inline tree
chrec_fold_op (enum tree_code code, tree type, tree op0, tree op1)
{
switch (code)
/* Return true when the base objects of data references A and B are
the same memory object. */
-static inline bool
+inline bool
same_data_refs_base_objects (data_reference_p a, data_reference_p b)
{
return DR_NUM_DIMENSIONS (a) == DR_NUM_DIMENSIONS (b)
memory object with the same access functions. Optionally skip the
last OFFSET dimensions in the data reference. */
-static inline bool
+inline bool
same_data_refs (data_reference_p a, data_reference_p b, int offset = 0)
{
unsigned int i;
LEVEL = 0 means a lexicographic dependence, i.e. a dependence due
to the sequence of statements, not carried by any loop. */
-static inline unsigned
+inline unsigned
dependence_level (lambda_vector dist_vect, int length)
{
int i;
/* Return the dependence level for the DDR relation. */
-static inline unsigned
+inline unsigned
ddr_dependence_level (ddr_p ddr)
{
unsigned vector;
/* Return the index of the variable VAR in the LOOP_NEST array. */
-static inline int
+inline int
index_in_loop_nest (int var, const vec<loop_p> &loop_nest)
{
class loop *loopi;
/* Returns true when the data reference DR the form "A[i] = ..."
with a stride equal to its unit type size. */
-static inline bool
+inline bool
adjacent_dr_p (struct data_reference *dr)
{
/* If this is a bitfield store bail out. */
/* Compute the greatest common divisor of a VECTOR of SIZE numbers. */
-static inline lambda_int
+inline lambda_int
lambda_vector_gcd (lambda_vector vector, int size)
{
int i;
/* Allocate a new vector of given SIZE. */
-static inline lambda_vector
+inline lambda_vector
lambda_vector_new (int size)
{
/* ??? We shouldn't abuse the GC allocator here. */
/* Clear out vector VEC1 of length SIZE. */
-static inline void
+inline void
lambda_vector_clear (lambda_vector vec1, int size)
{
memset (vec1, 0, size * sizeof (*vec1));
/* Returns true when the vector V is lexicographically positive, in
other words, when the first nonzero element is positive. */
-static inline bool
+inline bool
lambda_vector_lexico_pos (lambda_vector v,
unsigned n)
{
/* Return true if vector VEC1 of length SIZE is the zero vector. */
-static inline bool
+inline bool
lambda_vector_zerop (lambda_vector vec1, int size)
{
int i;
/* Allocate a matrix of M rows x N cols. */
-static inline lambda_matrix
+inline lambda_matrix
lambda_matrix_new (int m, int n, struct obstack *lambda_obstack)
{
lambda_matrix mat;
tree operator* () const { return ptr->stmt; }
};
-static inline tree_stmt_iterator
+inline tree_stmt_iterator
tsi_start (tree t)
{
tree_stmt_iterator i;
return i;
}
-static inline tree_stmt_iterator
+inline tree_stmt_iterator
tsi_last (tree t)
{
tree_stmt_iterator i;
return i;
}
-static inline bool
+inline bool
tsi_end_p (tree_stmt_iterator i)
{
return i.ptr == NULL;
}
-static inline bool
+inline bool
tsi_one_before_end_p (tree_stmt_iterator i)
{
return i.ptr != NULL && i.ptr->next == NULL;
}
-static inline void
+inline void
tsi_next (tree_stmt_iterator *i)
{
++(*i);
}
-static inline void
+inline void
tsi_prev (tree_stmt_iterator *i)
{
--(*i);
}
-static inline tree *
+inline tree *
tsi_stmt_ptr (tree_stmt_iterator i)
{
return &(*i);
}
-static inline tree
+inline tree
tsi_stmt (tree_stmt_iterator i)
{
return *i;
/* Returns the RTX expression representing the storage of the outof-SSA
partition that the SSA name EXP is a member of. */
-static inline rtx
+inline rtx
get_rtx_for_ssa_name (tree exp)
{
int p = partition_find (SA.map->var_partition, SSA_NAME_VERSION (exp));
/* If TER decided to forward the definition of SSA name EXP this function
returns the defining statement, otherwise NULL. */
-static inline gimple *
+inline gimple *
get_gimple_for_ssa_name (tree exp)
{
int v = SSA_NAME_VERSION (exp);
extern tree degenerate_phi_result (gphi *);
extern void set_phi_nodes (basic_block, gimple_seq);
-static inline use_operand_p
+inline use_operand_p
gimple_phi_arg_imm_use_ptr (gimple *gs, int i)
{
return &gimple_phi_arg (gs, i)->imm_use;
/* Return the phi argument which contains the specified use. */
-static inline int
+inline int
phi_arg_index_from_use (use_operand_p use)
{
struct phi_arg_d *element, *root;
/* Returns the basic block preceding LOOP, or the CFG entry block when
the loop is function's body. */
-static inline basic_block
+inline basic_block
block_before_loop (loop_p loop)
{
edge preheader = loop_preheader_edge (loop);
symbolic form. LOOP is the loop in which symbolic names have to
be analyzed and instantiated. */
-static inline tree
+inline tree
instantiate_parameters (class loop *loop, tree chrec)
{
return instantiate_scev (loop_preheader_edge (loop), loop, chrec);
/* Returns the loop of the polynomial chrec CHREC. */
-static inline class loop *
+inline class loop *
get_chrec_loop (const_tree chrec)
{
return get_loop (cfun, CHREC_VARIABLE (chrec));
/* Return true iff TYPE is stdarg va_list type (which early SRA and IPA-SRA
should leave alone). */
-static inline bool
+inline bool
is_va_list_type (tree type)
{
return TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (va_list_type_node);
overlap. SIZE1 and/or SIZE2 can be (unsigned)-1 in which case the
range is open-ended. Otherwise return false. */
-static inline bool
+inline bool
ranges_overlap_p (HOST_WIDE_INT pos1,
unsigned HOST_WIDE_INT size1,
HOST_WIDE_INT pos2,
/* Return number of partitions in MAP. */
-static inline unsigned
+inline unsigned
num_var_partitions (var_map map)
{
return map->num_partitions;
/* Given partition index I from MAP, return the variable which represents that
partition. */
-static inline tree
+inline tree
partition_to_var (var_map map, int i)
{
tree name;
/* Given ssa_name VERSION, if it has a partition in MAP, return the var it
is associated with. Otherwise return NULL. */
-static inline tree
+inline tree
version_to_var (var_map map, int version)
{
int part;
/* Given VAR, return the partition number in MAP which contains it.
NO_PARTITION is returned if it's not in any partition. */
-static inline int
+inline int
var_to_partition (var_map map, tree var)
{
int part;
/* Given VAR, return the variable which represents the entire partition
it is a member of in MAP. NULL is returned if it is not in a partition. */
-static inline tree
+inline tree
var_to_partition_to_var (var_map map, tree var)
{
int part;
/* Return the index into the basevar table for PARTITION's base in MAP. */
-static inline int
+inline int
basevar_index (var_map map, int partition)
{
gcc_checking_assert (partition >= 0
/* Return the number of different base variables in MAP. */
-static inline int
+inline int
num_basevars (var_map map)
{
return map->num_basevars;
/* Return TRUE if P is marked as a global in LIVE. */
-static inline int
+inline int
partition_is_global (tree_live_info_p live, int p)
{
gcc_checking_assert (live->global);
/* Return the bitmap from LIVE representing the live on entry blocks for
partition P. */
-static inline bitmap
+inline bitmap
live_on_entry (tree_live_info_p live, basic_block bb)
{
gcc_checking_assert (live->livein
/* Return the bitmap from LIVE representing the live on exit partitions from
block BB. */
-static inline bitmap
+inline bitmap
live_on_exit (tree_live_info_p live, basic_block bb)
{
gcc_checking_assert (live->liveout
/* Return the partition map which the information in LIVE utilizes. */
-static inline var_map
+inline var_map
live_var_map (tree_live_info_p live)
{
return live->map;
/* Mark partition P as live on entry to basic block BB in LIVE. */
-static inline void
+inline void
make_live_on_entry (tree_live_info_p live, basic_block bb , int p)
{
bitmap_set_bit (&live->livein[bb->index], p);
extern void rewrite_into_loop_closed_ssa (bitmap, unsigned);
extern void verify_loop_closed_ssa (bool, class loop * = NULL);
-static inline void
+inline void
checking_verify_loop_closed_ssa (bool verify_ssa_p, class loop *loop = NULL)
{
if (flag_checking)
/* Returns the loop of the statement STMT. */
-static inline class loop *
+inline class loop *
loop_containing_stmt (gimple *stmt)
{
basic_block bb = gimple_bb (stmt);
extern void unlink_stmt_vdef (gimple *);
/* Return the tree pointed-to by USE. */
-static inline tree
+inline tree
get_use_from_ptr (use_operand_p use)
{
return *(use->use);
}
/* Return the tree pointed-to by DEF. */
-static inline tree
+inline tree
get_def_from_ptr (def_operand_p def)
{
return *def;
/* If SIM_P is true, statement S will be simulated again. */
-static inline void
+inline void
prop_set_simulate_again (gimple *s, bool visit_p)
{
gimple_set_visited (s, visit_p);
/* Return true if statement T should be simulated again. */
-static inline bool
+inline bool
prop_simulate_again_p (gimple *s)
{
return gimple_visited_p (s);
/* Return the size of a vn_nary_op_t with LENGTH operands. */
-static inline size_t
+inline size_t
sizeof_vn_nary_op (unsigned int length)
{
return sizeof (struct vn_nary_op_s) + sizeof (tree) * length - sizeof (tree);
/* Hash the type TYPE using bits that distinguishes it in the
types_compatible_p sense. */
-static inline hashval_t
+inline hashval_t
vn_hash_type (tree type)
{
return (INTEGRAL_TYPE_P (type)
/* Hash the constant CONSTANT with distinguishing type incompatible
constants in the types_compatible_p sense. */
-static inline hashval_t
+inline hashval_t
vn_hash_constant_with_type (tree constant)
{
inchash::hash hstate;
/* Compare the constants C1 and C2 with distinguishing type incompatible
constants in the types_compatible_p sense. */
-static inline bool
+inline bool
vn_constant_eq_with_type (tree c1, tree c2)
{
return (expressions_equal_p (c1, c2)
unsigned int get_or_alloc_constant_value_id (tree);
/* Return true if V is a value id for a constant. */
-static inline bool
+inline bool
value_id_constant_p (unsigned int v)
{
return (int)v < 0;
/* Return TRUE iff VAR is marked as maybe-undefined. See
mark_ssa_maybe_undefs. */
-static inline bool
+inline bool
ssa_name_maybe_undef_p (tree var)
{
gcc_checking_assert (TREE_CODE (var) == SSA_NAME);
/* Set (or clear, depending on VALUE) VAR's maybe-undefined mark. */
-static inline void
+inline void
ssa_name_set_maybe_undef (tree var, bool value = true)
{
gcc_checking_assert (TREE_CODE (var) == SSA_NAME);
/* Given an edge_var_map V, return the PHI arg definition. */
-static inline tree
+inline tree
redirect_edge_var_map_def (edge_var_map *v)
{
return v->def;
/* Given an edge_var_map V, return the PHI result. */
-static inline tree
+inline tree
redirect_edge_var_map_result (edge_var_map *v)
{
return v->result;
/* Given an edge_var_map V, return the PHI arg location. */
-static inline location_t
+inline location_t
redirect_edge_var_map_location (edge_var_map *v)
{
return v->locus;
/* Verify SSA invariants, if internal consistency checks are enabled. */
-static inline void
+inline void
checking_verify_ssa (bool check_modified_stmt, bool check_ssa_operands)
{
if (flag_checking)
/* Return an SSA_NAME node for variable VAR defined in statement STMT
in function cfun. */
-static inline tree
+inline tree
make_ssa_name (tree var, gimple *stmt = NULL)
{
return make_ssa_name_fn (cfun, var, stmt);
/* Return an SSA_NAME node using the template SSA name NAME defined in
statement STMT in function cfun. */
-static inline tree
+inline tree
copy_ssa_name (tree var, gimple *stmt = NULL)
{
return copy_ssa_name_fn (cfun, var, stmt);
/* Creates a duplicate of a SSA name NAME tobe defined by statement STMT
in function cfun. */
-static inline tree
+inline tree
duplicate_ssa_name (tree var, gimple *stmt)
{
return duplicate_ssa_name_fn (cfun, var, stmt);
/* Release the SSA name NAME used in function cfun. */
-static inline void
+inline void
release_ssa_name (tree name)
{
release_ssa_name_fn (cfun, name);
/* Return an anonymous SSA_NAME node for type TYPE defined in statement STMT
in function cfun. Arrange so that it uses NAME in dumps. */
-static inline tree
+inline tree
make_temp_ssa_name (tree type, gimple *stmt, const char *name)
{
tree ssa_name;
/* Return the tree node at slot IX in CACHE. */
-static inline tree
+inline tree
streamer_tree_cache_get_tree (struct streamer_tree_cache_d *cache, unsigned ix)
{
return cache->nodes[ix];
/* Return the tree hash value at slot IX in CACHE. */
-static inline hashval_t
+inline hashval_t
streamer_tree_cache_get_hash (struct streamer_tree_cache_d *cache, unsigned ix)
{
return cache->hashes[ix];
}
-static inline void
+inline void
bp_pack_machine_mode (struct bitpack_d *bp, machine_mode mode)
{
streamer_mode_table[mode] = 1;
bp_pack_enum (bp, machine_mode, 1 << 8, mode);
}
-static inline machine_mode
+inline machine_mode
bp_unpack_machine_mode (struct bitpack_d *bp)
{
return (machine_mode)
/* Release CLUSTERS vector and destruct all dynamically allocated items. */
-static inline void
+inline void
release_clusters (vec<cluster *> &clusters)
{
for (unsigned i = 0; i < clusters.length (); i++)
stack. */
typedef opt_pointer_wrapper <loop_vec_info> opt_loop_vec_info;
-static inline loop_vec_info
+inline loop_vec_info
loop_vec_info_for_loop (class loop *loop)
{
return (loop_vec_info) loop->aux;
&& TYPE_PRECISION (TYPE) == 1 \
&& TYPE_UNSIGNED (TYPE)))
-static inline bool
+inline bool
nested_in_vect_loop_p (class loop *loop, stmt_vec_info stmt_info)
{
return (loop->inner
Return the initial value of the variable on entry to the containing
loop. */
-static inline tree
+inline tree
vect_phi_initial_value (gphi *phi)
{
basic_block bb = gimple_bb (phi);
/* Return true if STMT_INFO should produce a vector mask type rather than
a normal nonmask type. */
-static inline bool
+inline bool
vect_use_mask_type_p (stmt_vec_info stmt_info)
{
return stmt_info->mask_precision && stmt_info->mask_precision != ~0U;
/* Return TRUE if a statement represented by STMT_INFO is a part of a
pattern. */
-static inline bool
+inline bool
is_pattern_stmt_p (stmt_vec_info stmt_info)
{
return stmt_info->pattern_stmt_p;
/* Return the later statement between STMT1_INFO and STMT2_INFO. */
-static inline stmt_vec_info
+inline stmt_vec_info
get_later_stmt (stmt_vec_info stmt1_info, stmt_vec_info stmt2_info)
{
if (gimple_uid (vect_orig_stmt (stmt1_info)->stmt)
/* Return true if BB is a loop header. */
-static inline bool
+inline bool
is_loop_header_bb_p (basic_block bb)
{
if (bb == (bb->loop_father)->header)
/* Return pow2 (X). */
-static inline int
+inline int
vect_pow2 (int x)
{
int i, res = 1;
/* Alias targetm.vectorize.builtin_vectorization_cost. */
-static inline int
+inline int
builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
tree vectype, int misalign)
{
/* Get cost by calling cost target builtin. */
-static inline
+inline
int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
{
return builtin_vectorization_cost (type_of_cost, NULL, 0);
/* Alias targetm.vectorize.init_cost. */
-static inline vector_costs *
+inline vector_costs *
init_cost (vec_info *vinfo, bool costing_for_scalar)
{
return targetm.vectorize.create_costs (vinfo, costing_for_scalar);
/* Alias targetm.vectorize.add_stmt_cost. */
-static inline unsigned
+inline unsigned
add_stmt_cost (vector_costs *costs, int count,
enum vect_cost_for_stmt kind,
stmt_vec_info stmt_info, slp_tree node,
return cost;
}
-static inline unsigned
+inline unsigned
add_stmt_cost (vector_costs *costs, int count, enum vect_cost_for_stmt kind,
enum vect_cost_model_location where)
{
/* Alias targetm.vectorize.add_stmt_cost. */
-static inline unsigned
+inline unsigned
add_stmt_cost (vector_costs *costs, stmt_info_for_cost *i)
{
return add_stmt_cost (costs, i->count, i->kind, i->stmt_info, i->node,
/* Alias targetm.vectorize.finish_cost. */
-static inline void
+inline void
finish_cost (vector_costs *costs, const vector_costs *scalar_costs,
unsigned *prologue_cost, unsigned *body_cost,
unsigned *epilogue_cost, unsigned *suggested_unroll_factor = NULL)
#define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL)
/* Only defined once DR_MISALIGNMENT is defined. */
-static inline const poly_uint64
+inline const poly_uint64
dr_target_alignment (dr_vec_info *dr_info)
{
if (STMT_VINFO_GROUPED_ACCESS (dr_info->stmt))
}
#define DR_TARGET_ALIGNMENT(DR) dr_target_alignment (DR)
-static inline void
+inline void
set_dr_target_alignment (dr_vec_info *dr_info, poly_uint64 val)
{
dr_info->target_alignment = val;
/* Return true if data access DR_INFO is aligned to the targets
preferred alignment for VECTYPE (which may be less than a full vector). */
-static inline bool
+inline bool
aligned_access_p (dr_vec_info *dr_info, tree vectype)
{
return (dr_misalignment (dr_info, vectype) == 0);
respect to the targets preferred alignment for VECTYPE, and FALSE
otherwise. */
-static inline bool
+inline bool
known_alignment_for_access_p (dr_vec_info *dr_info, tree vectype)
{
return (dr_misalignment (dr_info, vectype) != DR_MISALIGNMENT_UNKNOWN);
/* Return the minimum alignment in bytes that the vectorized version
of DR_INFO is guaranteed to have. */
-static inline unsigned int
+inline unsigned int
vect_known_alignment_in_bytes (dr_vec_info *dr_info, tree vectype)
{
int misalignment = dr_misalignment (dr_info, vectype);
(which for outer loop vectorization might not be the behavior recorded
in DR_INFO itself). */
-static inline innermost_loop_behavior *
+inline innermost_loop_behavior *
vect_dr_behavior (vec_info *vinfo, dr_vec_info *dr_info)
{
stmt_vec_info stmt_info = dr_info->stmt;
/* Return the vect cost model for LOOP. */
-static inline enum vect_cost_model
+inline enum vect_cost_model
loop_cost_model (loop_p loop)
{
if (loop != NULL
}
/* Return true if the vect cost model is unlimited. */
-static inline bool
+inline bool
unlimited_cost_model (loop_p loop)
{
return loop_cost_model (loop) == VECT_COST_MODEL_UNLIMITED;
if the first iteration should use a partial mask in order to achieve
alignment. */
-static inline bool
+inline bool
vect_use_loop_mask_for_alignment_p (loop_vec_info loop_vinfo)
{
return (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
NUNITS elements. NUNITS should be based on the vectorization factor,
so it is always a known multiple of the number of elements in VECTYPE. */
-static inline unsigned int
+inline unsigned int
vect_get_num_vectors (poly_uint64 nunits, tree vectype)
{
return exact_div (nunits, TYPE_VECTOR_SUBPARTS (vectype)).to_constant ();
vectorization factor divided by the number of elements in
VECTYPE and is always known at compile time. */
-static inline unsigned int
+inline unsigned int
vect_get_num_copies (loop_vec_info loop_vinfo, tree vectype)
{
return vect_get_num_vectors (LOOP_VINFO_VECT_FACTOR (loop_vinfo), vectype);
/* Update maximum unit count *MAX_NUNITS so that it accounts for
NUNITS. *MAX_NUNITS can be 1 if we haven't yet recorded anything. */
-static inline void
+inline void
vect_update_max_nunits (poly_uint64 *max_nunits, poly_uint64 nunits)
{
/* All unit counts have the form vec_info::vector_size * X for some
the number of units in vector type VECTYPE. *MAX_NUNITS can be 1
if we haven't yet recorded any vector types. */
-static inline void
+inline void
vect_update_max_nunits (poly_uint64 *max_nunits, tree vectype)
{
vect_update_max_nunits (max_nunits, TYPE_VECTOR_SUBPARTS (vectype));
Pick a reasonable estimate if the vectorization factor isn't
known at compile time. */
-static inline unsigned int
+inline unsigned int
vect_vf_for_cost (loop_vec_info loop_vinfo)
{
return estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
Pick a reasonable estimate if the exact number isn't known at
compile time. */
-static inline unsigned int
+inline unsigned int
vect_nunits_for_cost (tree vec_type)
{
return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vec_type));
/* Return the maximum possible vectorization factor for LOOP_VINFO. */
-static inline unsigned HOST_WIDE_INT
+inline unsigned HOST_WIDE_INT
vect_max_vf (loop_vec_info loop_vinfo)
{
unsigned HOST_WIDE_INT vf;
/* Overload of record_stmt_cost with VECTYPE derived from STMT_INFO. */
-static inline unsigned
+inline unsigned
record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
int misalign, enum vect_cost_model_location where)
location. */
#define CAN_HAVE_LOCATION_P(NODE) ((NODE) && EXPR_P (NODE))
-static inline source_range
+inline source_range
get_expr_source_range (tree expr)
{
location_t loc = EXPR_LOCATION (expr);
/* Set decl_type of a DECL. Set it to T when SET is true, or reset
it to NONE. */
-static inline void
+inline void
set_function_decl_type (tree decl, function_decl_type t, bool set)
{
if (set)
/* Compute the number of operands in an expression node NODE. For
tcc_vl_exp nodes like CALL_EXPRs, this is stored in the node itself,
otherwise it is looked up from the node's code. */
-static inline int
+inline int
tree_operand_length (const_tree node)
{
if (VL_EXP_CLASS_P (node))
/* Return true if we can construct vector types with the given number
of subparts. */
-static inline bool
+inline bool
valid_vector_subparts_p (poly_uint64 subparts)
{
unsigned HOST_WIDE_INT coeff0 = subparts.coeffs[0];
/* _loc versions of build[1-5]. */
-static inline tree
+inline tree
build1_loc (location_t loc, enum tree_code code, tree type,
tree arg1 CXX_MEM_STAT_INFO)
{
return t;
}
-static inline tree
+inline tree
build2_loc (location_t loc, enum tree_code code, tree type, tree arg0,
tree arg1 CXX_MEM_STAT_INFO)
{
return t;
}
-static inline tree
+inline tree
build3_loc (location_t loc, enum tree_code code, tree type, tree arg0,
tree arg1, tree arg2 CXX_MEM_STAT_INFO)
{
return t;
}
-static inline tree
+inline tree
build4_loc (location_t loc, enum tree_code code, tree type, tree arg0,
tree arg1, tree arg2, tree arg3 CXX_MEM_STAT_INFO)
{
return t;
}
-static inline tree
+inline tree
build5_loc (location_t loc, enum tree_code code, tree type, tree arg0,
tree arg1, tree arg2, tree arg3, tree arg4 CXX_MEM_STAT_INFO)
{
/* Return true if T is an expression that get_inner_reference handles. */
-static inline bool
+inline bool
handled_component_p (const_tree t)
{
switch (TREE_CODE (t))
/* Return true T is a component with reverse storage order. */
-static inline bool
+inline bool
reverse_storage_order_for_component_p (tree t)
{
/* The storage order only applies to scalar components. */
outer type, a VIEW_CONVERT_EXPR can modify the storage order because
it can change the partition of the aggregate object into scalars. */
-static inline bool
+inline bool
storage_order_barrier_p (const_tree t)
{
if (TREE_CODE (t) != VIEW_CONVERT_EXPR)
/* Initialize the iterator I with arguments from function FNDECL */
-static inline void
+inline void
function_args_iter_init (function_args_iterator *i, const_tree fntype)
{
i->next = TYPE_ARG_TYPES (fntype);
/* Return a pointer that holds the next argument if there are more arguments to
handle, otherwise return NULL. */
-static inline tree *
+inline tree *
function_args_iter_cond_ptr (function_args_iterator *i)
{
return (i->next) ? &TREE_VALUE (i->next) : NULL;
/* Return the next argument if there are more arguments to handle, otherwise
return NULL. */
-static inline tree
+inline tree
function_args_iter_cond (function_args_iterator *i)
{
return (i->next) ? TREE_VALUE (i->next) : NULL_TREE;
}
/* Advance to the next argument. */
-static inline void
+inline void
function_args_iter_next (function_args_iterator *i)
{
gcc_assert (i->next != NULL_TREE);
so the function returns true for all but the innermost and outermost
blocks into which an expression has been inlined. */
-static inline bool
+inline bool
inlined_function_outer_scope_p (const_tree block)
{
return LOCATION_LOCUS (BLOCK_SOURCE_LOCATION (block)) != UNKNOWN_LOCATION;
pointer. Assumes all pointers are interchangeable, which is sort
of already assumed by gcc elsewhere IIRC. */
-static inline int
+inline int
struct_ptr_eq (const void *a, const void *b)
{
const void * const * x = (const void * const *) a;
return *x == *y;
}
-static inline hashval_t
+inline hashval_t
struct_ptr_hash (const void *a)
{
const void * const * x = (const void * const *) a;
}
/* Return nonzero if CODE is a tree code that represents a truth value. */
-static inline bool
+inline bool
truth_value_p (enum tree_code code)
{
return (TREE_CODE_CLASS (code) == tcc_comparison
/* Return whether TYPE is a type suitable for an offset for
a POINTER_PLUS_EXPR. */
-static inline bool
+inline bool
ptrofftype_p (tree type)
{
return (INTEGRAL_TYPE_P (type)
/* Return true if the argument is a complete type or an array
of unknown bound (whose type is incomplete but) whose elements
have complete type. */
-static inline bool
+inline bool
complete_or_array_type_p (const_tree type)
{
return COMPLETE_TYPE_P (type)
/* Compat version until all callers are converted. Return hash for
TREE with SEED. */
-static inline hashval_t iterative_hash_expr(const_tree tree, hashval_t seed)
+inline hashval_t iterative_hash_expr(const_tree tree, hashval_t seed)
{
inchash::hash hstate (seed);
inchash::add_expr (tree, hstate);
/* Initialize the abstract argument list iterator object ITER with the
arguments from CALL_EXPR node EXP. */
-static inline void
+inline void
init_call_expr_arg_iterator (tree exp, call_expr_arg_iterator *iter)
{
iter->t = exp;
iter->i = 0;
}
-static inline void
+inline void
init_const_call_expr_arg_iterator (const_tree exp, const_call_expr_arg_iterator *iter)
{
iter->t = exp;
/* Return the next argument from abstract argument list iterator object ITER,
and advance its state. Return NULL_TREE if there are no more arguments. */
-static inline tree
+inline tree
next_call_expr_arg (call_expr_arg_iterator *iter)
{
tree result;
return result;
}
-static inline const_tree
+inline const_tree
next_const_call_expr_arg (const_call_expr_arg_iterator *iter)
{
const_tree result;
past and return the first argument. Useful in for expressions, e.g.
for (arg = first_call_expr_arg (exp, &iter); arg;
arg = next_call_expr_arg (&iter)) */
-static inline tree
+inline tree
first_call_expr_arg (tree exp, call_expr_arg_iterator *iter)
{
init_call_expr_arg_iterator (exp, iter);
return next_call_expr_arg (iter);
}
-static inline const_tree
+inline const_tree
first_const_call_expr_arg (const_tree exp, const_call_expr_arg_iterator *iter)
{
init_const_call_expr_arg_iterator (exp, iter);
/* Test whether there are more arguments in abstract argument list iterator
ITER, without changing its state. */
-static inline bool
+inline bool
more_call_expr_args_p (const call_expr_arg_iterator *iter)
{
return (iter->i < iter->n);
(arg) = next_const_call_expr_arg (&(iter)))
/* Return true if tree node T is a language-specific node. */
-static inline bool
+inline bool
is_lang_specific (const_tree t)
{
return TREE_CODE (t) == LANG_TYPE || TREE_CODE (t) >= NUM_TREE_CODES;
(IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (builtin_decl_explicit (BUILTIN))))
/* Return the tree node for an explicit standard builtin function or NULL. */
-static inline tree
+inline tree
builtin_decl_explicit (enum built_in_function fncode)
{
gcc_checking_assert (BUILTIN_VALID_P (fncode));
}
/* Return the tree node for an implicit builtin function or NULL. */
-static inline tree
+inline tree
builtin_decl_implicit (enum built_in_function fncode)
{
size_t uns_fncode = (size_t)fncode;
/* Set explicit builtin function nodes and whether it is an implicit
function. */
-static inline void
+inline void
set_builtin_decl (enum built_in_function fncode, tree decl, bool implicit_p)
{
size_t ufncode = (size_t)fncode;
/* Set the implicit flag for a builtin function. */
-static inline void
+inline void
set_builtin_decl_implicit_p (enum built_in_function fncode, bool implicit_p)
{
size_t uns_fncode = (size_t)fncode;
/* Set the declared flag for a builtin function. */
-static inline void
+inline void
set_builtin_decl_declared_p (enum built_in_function fncode, bool declared_p)
{
size_t uns_fncode = (size_t)fncode;
/* Return whether the standard builtin function can be used as an explicit
function. */
-static inline bool
+inline bool
builtin_decl_explicit_p (enum built_in_function fncode)
{
gcc_checking_assert (BUILTIN_VALID_P (fncode));
/* Return whether the standard builtin function can be used implicitly. */
-static inline bool
+inline bool
builtin_decl_implicit_p (enum built_in_function fncode)
{
size_t uns_fncode = (size_t)fncode;
/* Return whether the standard builtin function was declared. */
-static inline bool
+inline bool
builtin_decl_declared_p (enum built_in_function fncode)
{
size_t uns_fncode = (size_t)fncode;
Avoid using this, as it's generally better to use attributes rather
than to check for functions by name. */
-static inline bool
+inline bool
maybe_special_function_p (const_tree fndecl)
{
tree name_decl = DECL_NAME (fndecl);
/* Return true if T (assumed to be a DECL) is a global variable.
A variable is considered global if its storage is not automatic. */
-static inline bool
+inline bool
is_global_var (const_tree t)
{
return (TREE_STATIC (t) || DECL_EXTERNAL (t));
maybe aliased if it has its address taken by the local TU
or possibly by another TU and might be modified through a pointer. */
-static inline bool
+inline bool
may_be_aliased (const_tree var)
{
return (TREE_CODE (var) != CONST_DECL
}
/* Return pointer to optimization flags of FNDECL. */
-static inline struct cl_optimization *
+inline struct cl_optimization *
opts_for_fn (const_tree fndecl)
{
tree fn_opts = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
}
/* Return pointer to target flags of FNDECL. */
-static inline cl_target_option *
+inline cl_target_option *
target_opts_for_fn (const_tree fndecl)
{
tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
/* Accumulate a set of overflows into OVERFLOW. */
-static inline void
+inline void
wi::accumulate_overflow (wi::overflow_type &overflow,
wi::overflow_type suboverflow)
{